diff --git a/.actrc b/.actrc index 9b8fcf1c72a..a54c7b0efae 100644 --- a/.actrc +++ b/.actrc @@ -2,3 +2,4 @@ # The `full` variants are needed for Maven for TeamCity configuration validation -P ubuntu-latest=ghcr.io/catthehacker/ubuntu:act-22.04 -P linux=ghcr.io/catthehacker/ubuntu:act-22.04 +-P custom-linux-medium=ghcr.io/catthehacker/ubuntu:act-22.04 diff --git a/.changelog/25743.txt b/.changelog/25743.txt new file mode 100644 index 00000000000..9eb18e184e4 --- /dev/null +++ b/.changelog/25743.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_ec2_transit_gateway_peering_attachments +``` diff --git a/.changelog/26399.txt b/.changelog/26399.txt new file mode 100644 index 00000000000..19345acf8fd --- /dev/null +++ b/.changelog/26399.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_drs_replication_configuration_template +``` \ No newline at end of file diff --git a/.changelog/34474.txt b/.changelog/34474.txt new file mode 100644 index 00000000000..7d90c29688b --- /dev/null +++ b/.changelog/34474.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_lakeformation_data_lake_settings: Add `allow_full_table_external_data_access` attribute +``` + +```release-note:enhancement +data-source/aws_lakeformation_data_lake_settings: Add `allow_full_table_external_data_access` attribute +``` \ No newline at end of file diff --git a/.changelog/35003.txt b/.changelog/35003.txt new file mode 100644 index 00000000000..139b4213c3e --- /dev/null +++ b/.changelog/35003.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_rds_certificate +``` \ No newline at end of file diff --git a/.changelog/35168.txt b/.changelog/35168.txt new file mode 100644 index 00000000000..f633690a19c --- /dev/null +++ b/.changelog/35168.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_networkfirewall_tls_inspection_configuration +``` + +```release-note:enhancement +resource/aws_networkfirewall_logging_configuration: Add plan-time validation of `firewall_arn` +``` \ No newline at end of file diff --git a/.changelog/35722.txt b/.changelog/35722.txt new file mode 100644 index 00000000000..68bffb9b829 --- /dev/null +++ b/.changelog/35722.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_networkmonitor_monitor +``` + +```release-note:new-resource +aws_networkmonitor_probe +``` \ No newline at end of file diff --git a/.changelog/36286.txt b/.changelog/36286.txt new file mode 100644 index 00000000000..108fa87be0a --- /dev/null +++ b/.changelog/36286.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_iot_topic_rule: Retry IAM eventual consistency errors on Update +``` \ No newline at end of file diff --git a/.changelog/36301.txt b/.changelog/36301.txt new file mode 100644 index 00000000000..6806629537e --- /dev/null +++ b/.changelog/36301.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_cloudfront_origin_access_control +``` \ No newline at end of file diff --git a/.changelog/36368.txt b/.changelog/36368.txt new file mode 100644 index 00000000000..83ea46b996b --- /dev/null +++ b/.changelog/36368.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_timestreamwrite_database +``` \ No newline at end of file diff --git a/.changelog/36423.txt b/.changelog/36423.txt new file mode 100644 index 00000000000..164dda3c323 --- /dev/null +++ b/.changelog/36423.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lb_target_group: Use the configured `ip_address_type` value when `target_type` is `instance` +``` diff --git a/.changelog/36568.txt b/.changelog/36568.txt new file mode 100644 index 00000000000..ac36dddd268 --- /dev/null +++ b/.changelog/36568.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_appsync_api_key: Add `api_key_id` attribute +``` diff --git a/.changelog/36599.txt b/.changelog/36599.txt new file mode 100644 index 00000000000..d82e112a3bd --- /dev/null +++ b/.changelog/36599.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_timestreamwrite_table +``` \ No newline at end of file diff --git a/.changelog/36754.txt b/.changelog/36754.txt new file mode 100644 index 00000000000..b446cfcbb1e --- /dev/null +++ b/.changelog/36754.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_ec2_capacity_reservation: Add configurable timeouts +``` + +```release-note:enhancement +resource/aws_ec2_capacity_reservation: Retry `InsufficientInstanceCapacity` errors +``` \ No newline at end of file diff --git a/.changelog/36772.txt b/.changelog/36772.txt new file mode 100644 index 00000000000..fbb9b74ed61 --- /dev/null +++ b/.changelog/36772.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_iot_topic_rule: Add `cloudwatch_logs.batch_mode` and `error_action.cloudwatch_logs.batch_mode` arguments +``` \ No newline at end of file diff --git a/.changelog/36830.txt b/.changelog/36830.txt new file mode 100644 index 00000000000..99fe1b698e2 --- /dev/null +++ b/.changelog/36830.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_quicksight_account_subscription: Add `iam_identity_center_instance_arn` attribute +``` diff --git a/.changelog/36900.txt b/.changelog/36900.txt new file mode 100644 index 00000000000..e630c5cfa54 --- /dev/null +++ b/.changelog/36900.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_fis_experiment_template: Add `experiment_options` configuration block +``` diff --git a/.changelog/36902.txt b/.changelog/36902.txt new file mode 100644 index 00000000000..df5376dfdf4 --- /dev/null +++ b/.changelog/36902.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ec2_transit_gateway_peering_attachment: Add `options` argument +``` \ No newline at end of file diff --git a/.changelog/36966.txt b/.changelog/36966.txt new file mode 100644 index 00000000000..f17f986bc81 --- /dev/null +++ b/.changelog/36966.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ec2_network_insights_path: Mark `destination` as Optional +``` \ No newline at end of file diff --git a/.changelog/36968.txt b/.changelog/36968.txt new file mode 100644 index 00000000000..55a11aa29bd --- /dev/null +++ b/.changelog/36968.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_msk_replicator: Add `starting_position` argument +``` \ No newline at end of file diff --git a/.changelog/37006.txt b/.changelog/37006.txt new file mode 100644 index 00000000000..49562bbb5ca --- /dev/null +++ b/.changelog/37006.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_wafv2_web_acl: Add `enable_machine_learning` to `aws_managed_rules_bot_control_rule_set` configuration block +``` \ No newline at end of file diff --git a/.changelog/37082.txt b/.changelog/37082.txt new file mode 100644 index 00000000000..53675860fe1 --- /dev/null +++ b/.changelog/37082.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lb_target_group: Add `target_group_health` configuration block +``` diff --git a/.changelog/37105.txt b/.changelog/37105.txt new file mode 100644 index 00000000000..85a19673e49 --- /dev/null +++ b/.changelog/37105.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_amplify_domain_association: Add `certificate_settings` argument +``` \ No newline at end of file diff --git a/.changelog/37108.txt b/.changelog/37108.txt new file mode 100644 index 00000000000..b5eaf6e3fd9 --- /dev/null +++ b/.changelog/37108.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_rds_cluster: Add `ca_certificate_identifier` argument and `ca_certificate_valid_till` attribute +``` \ No newline at end of file diff --git a/.changelog/37139.txt b/.changelog/37139.txt new file mode 100644 index 00000000000..e40a31af0f7 --- /dev/null +++ b/.changelog/37139.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_sagemaker_domain: Add `default_user_settings.canvas_app_settings.generative_ai_settings` configuration block +``` + +```release-note:enhancement +resource/aws_sagemaker_user_profile: Add `user_settings.canvas_app_settings.generative_ai_settings` configuration block +``` diff --git a/.changelog/37142.txt b/.changelog/37142.txt new file mode 100644 index 00000000000..fc8b25f4bf8 --- /dev/null +++ b/.changelog/37142.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_launch_template: Add `network_interfaces.primary_ipv6` argument +``` + +```release-note:enhancement +data-source/aws_launch_template: Add `network_interfaces.primary_ipv6` attribute +``` \ No newline at end of file diff --git a/.changelog/37152.txt b/.changelog/37152.txt new file mode 100644 index 00000000000..87ba726e3a6 --- /dev/null +++ b/.changelog/37152.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_iot_authorizer: Add `tags` argument +``` + +```release-note:bug +resource/aws_iot_provisioning_template: Fix `pre_provisioning_hook` update operation +``` \ No newline at end of file diff --git a/.changelog/37153.txt b/.changelog/37153.txt new file mode 100644 index 00000000000..5af39c665fe --- /dev/null +++ b/.changelog/37153.txt @@ -0,0 +1,8 @@ + +```release-note:enhancement +resource/aws_sagemaker_domain: Add `default_user_settings.code_editor_app_settings.custom_image` configuration block +``` + +```release-note:enhancement +resource/aws_sagemaker_user_profile: Add `user_settings.code_editor_app_settings.custom_image` configuration block +``` diff --git a/.changelog/37174.txt b/.changelog/37174.txt new file mode 100644 index 00000000000..40f846003a6 --- /dev/null +++ b/.changelog/37174.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_autoscaling_group: Fix bug updating `warm_pool.max_group_prepared_capacity` to `0` +``` + +```release-note:enhancement +resource/aws_autoscaling_group: Add plan-time validation of `warm_pool.max_group_prepared_capacity` and `warm_pool.min_size` +``` diff --git a/.changelog/37242.txt b/.changelog/37242.txt new file mode 100644 index 00000000000..5d845c364e7 --- /dev/null +++ b/.changelog/37242.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_route53_resolver_firewall_rule: Add `firewall_domain_redirection_action` argument +``` \ No newline at end of file diff --git a/.changelog/37291.txt b/.changelog/37291.txt new file mode 100644 index 00000000000..4749615dac6 --- /dev/null +++ b/.changelog/37291.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_appfabric_ingestion +``` \ No newline at end of file diff --git a/.changelog/37317.txt b/.changelog/37317.txt new file mode 100644 index 00000000000..1476b8fcb6b --- /dev/null +++ b/.changelog/37317.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_imagebuilder_image_pipeline: Add `execution_role` and `workflow` arguments +``` diff --git a/.changelog/37348.txt b/.changelog/37348.txt new file mode 100644 index 00000000000..48a9c90e135 --- /dev/null +++ b/.changelog/37348.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_lb_listener: Correctly set `default_action.target_group_arn` +``` \ No newline at end of file diff --git a/.changelog/37399.txt b/.changelog/37399.txt new file mode 100644 index 00000000000..a92ef98950d --- /dev/null +++ b/.changelog/37399.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_cognito_user_pool +``` diff --git a/.changelog/37627.txt b/.changelog/37627.txt new file mode 100644 index 00000000000..375cbe93938 --- /dev/null +++ b/.changelog/37627.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_appfabric_ingestion_destination +``` \ No newline at end of file diff --git a/.changelog/37705.txt b/.changelog/37705.txt new file mode 100644 index 00000000000..aa2d9daccd4 --- /dev/null +++ b/.changelog/37705.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_msk_cluster: Add `broker_node_group_info` attribute +``` \ No newline at end of file diff --git a/.changelog/37708.txt b/.changelog/37708.txt new file mode 100644 index 00000000000..bd064669db0 --- /dev/null +++ b/.changelog/37708.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_db_instance: Add `engine_lifecycle_support` argument +``` + +```release-note:enhancement +resource/aws_rds_cluster: Add `engine_lifecycle_support` argument +``` + +```release-note:enhancement +resource/aws_rds_global_cluster: Add `engine_lifecycle_support` argument +``` \ No newline at end of file diff --git a/.changelog/37726.txt b/.changelog/37726.txt new file mode 100644 index 00000000000..6979dc6e8a3 --- /dev/null +++ b/.changelog/37726.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sagemaker_endpoint_configuration: Add support for `InputAndOutput` in `capture_mode` +``` \ No newline at end of file diff --git a/.changelog/37748.txt b/.changelog/37748.txt new file mode 100644 index 00000000000..50a1b8230e1 --- /dev/null +++ b/.changelog/37748.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_codebuild_project: Fix unsetting `concurrent_build_limit` +``` \ No newline at end of file diff --git a/.changelog/37760.txt b/.changelog/37760.txt new file mode 100644 index 00000000000..39ac8c7ee24 --- /dev/null +++ b/.changelog/37760.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_glue_job: Add `maintenance_window` argument +``` \ No newline at end of file diff --git a/.changelog/37889.txt b/.changelog/37889.txt new file mode 100644 index 00000000000..190c669a721 --- /dev/null +++ b/.changelog/37889.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_emrserverless_application: Add `interactive_configuration` argument +``` \ No newline at end of file diff --git a/.changelog/37890.txt b/.changelog/37890.txt new file mode 100644 index 00000000000..14345b206ce --- /dev/null +++ b/.changelog/37890.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_backup_plan: Add `rule` attribute +``` \ No newline at end of file diff --git a/.changelog/37898.txt b/.changelog/37898.txt new file mode 100644 index 00000000000..5b228b6e1ed --- /dev/null +++ b/.changelog/37898.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_cloudformation_stack_set_instance: Extend `deployment_targets` argument. +``` + +```release-note:bug +resource/aws_cloudformation_stack_set_instance: Add `ForceNew` to deployment_targets attributes to ensure a new resource is recreated when the deployment_targets argument is changed, which was not the case previously. +``` diff --git a/.changelog/37932.txt b/.changelog/37932.txt new file mode 100644 index 00000000000..22155d47f59 --- /dev/null +++ b/.changelog/37932.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_ecs_cluster: Add `configuration.managed_storage_configuration` argument +``` \ No newline at end of file diff --git a/.changelog/37964.txt b/.changelog/37964.txt new file mode 100644 index 00000000000..2b83af1eae3 --- /dev/null +++ b/.changelog/37964.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_vpclattice_listener: Support `TLS_PASSTHROUGH` as a valid value for `protocol` +``` \ No newline at end of file diff --git a/.changelog/#####.txt b/.changelog/37977.txt similarity index 100% rename from .changelog/#####.txt rename to .changelog/37977.txt diff --git a/.changelog/37980.txt b/.changelog/37980.txt new file mode 100644 index 00000000000..c8c21b1b80f --- /dev/null +++ b/.changelog/37980.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lambda_event_source_mapping: Remove the upper limit on `scaling_config.maximum_concurrency` +``` \ No newline at end of file diff --git a/.changelog/37991.txt b/.changelog/37991.txt new file mode 100644 index 00000000000..9b338f4e5f3 --- /dev/null +++ b/.changelog/37991.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_dynamodb_table: Fixes perpetual diff when `ttl.attribute_name` is set when `ttl.enabled` is not set. +``` + +```release-note:enhancement +resource/aws_dynamodb_table: Adds validation for `ttl` values. +``` diff --git a/.changelog/38003.txt b/.changelog/38003.txt new file mode 100644 index 00000000000..81e8aa44fcf --- /dev/null +++ b/.changelog/38003.txt @@ -0,0 +1,19 @@ +```release-note:enhancement +resource/aws_launch_template: Add `instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` argument +``` + +```release-note:enhancement +resource/aws_ec2_fleet: Add `launch_template_config.override.instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` argument +``` + +```release-note:enhancement +resource/aws_autoscaling_group: Add `mixed_instances_policy.launch_template.override.instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` argument +``` + +```release-note:enhancement +data-source/aws_autoscaling_group: Add `mixed_instances_policy.launch_template.override.instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` attribute +``` + +```release-note:enhancement +data-source/aws_launch_template: Add `instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` attribute +``` diff --git a/.changelog/38010.txt b/.changelog/38010.txt new file mode 100644 index 00000000000..eceb03c0e5d --- /dev/null +++ b/.changelog/38010.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_api_gateway_integration: Increase maximum value of `timeout_milliseconds` from `29000` (29 seconds) to `300000` (5 minutes) +``` diff --git a/.changelog/38011.txt b/.changelog/38011.txt new file mode 100644 index 00000000000..29f38daa2fc --- /dev/null +++ b/.changelog/38011.txt @@ -0,0 +1,3 @@ +```release-note:bug +service/transitgateway: Fix resource Read pagination regression causing `NotFound` errors +``` \ No newline at end of file diff --git a/.changelog/38013.txt b/.changelog/38013.txt new file mode 100644 index 00000000000..ebbbe02970a --- /dev/null +++ b/.changelog/38013.txt @@ -0,0 +1,23 @@ +```release-note:enhancement +data-source/aws_networkmanager_core_network_policy_document: Add `attachment_policies.action.add_to_network_function_group` argument +``` + +```release-note:enhancement +data-source/aws_networkmanager_core_network_policy_document: Add `send-via` and `send-to` as valid values for `segment_actions.action` +``` + +```release-note:enhancement +data-source/aws_networkmanager_core_network_policy_document: Add `single-hop` and `dual-hop` as valid values for `segment_actions.mode` +``` + +```release-note:enhancement +data-source/aws_networkmanager_core_network_policy_document: Add `when_sent_to` and `via` configuration blocks to `segment_actions` +``` + +```release-note:bug +data-source/aws_networkmanager_core_network_policy_document: Add correct `except` values to the returned JSON document when `segment_actions.share_with_except` is configured +``` + +```release-note:enhancement +data-source/aws_networkmanager_core_network_policy_document: Add `network_function_groups` configuration block +``` \ No newline at end of file diff --git a/.changelog/38057.txt b/.changelog/38057.txt new file mode 100644 index 00000000000..78d69677fd4 --- /dev/null +++ b/.changelog/38057.txt @@ -0,0 +1,3 @@ +```release-note:bug +provider: Now falls back to non-FIPS endpoint if `use_fips_endpoint` is set and no FIPS endpoint is available +``` diff --git a/.changelog/38067.txt b/.changelog/38067.txt new file mode 100644 index 00000000000..31f0052a13e --- /dev/null +++ b/.changelog/38067.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_ecs_service: Correctly set `tags` +``` diff --git a/.changelog/38071.txt b/.changelog/38071.txt new file mode 100644 index 00000000000..d1b631727d9 --- /dev/null +++ b/.changelog/38071.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_controltower_control: Add `parameters` argument and `arn` attribute +``` \ No newline at end of file diff --git a/.changelog/38074.txt b/.changelog/38074.txt new file mode 100644 index 00000000000..7fea5269f5a --- /dev/null +++ b/.changelog/38074.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_route53_resolver_firewall_rule: Add `q_type` argument +``` \ No newline at end of file diff --git a/.changelog/38077.txt b/.changelog/38077.txt new file mode 100644 index 00000000000..68b2672a4c0 --- /dev/null +++ b/.changelog/38077.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_wafv2_web_acl: Add `sensitivity_level` argument to `sqli_match_statement` configuration block +``` diff --git a/.changelog/38078.txt b/.changelog/38078.txt new file mode 100644 index 00000000000..08e28ee09fb --- /dev/null +++ b/.changelog/38078.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sagemaker_workforce: add `oidc_config.authentication_request_extra_params` and `oidc_config.scope` arguments +``` diff --git a/.changelog/38084.txt b/.changelog/38084.txt new file mode 100644 index 00000000000..bd603d840b9 --- /dev/null +++ b/.changelog/38084.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_appfabric_app_authorization_connection +``` \ No newline at end of file diff --git a/.changelog/38085.txt b/.changelog/38085.txt new file mode 100644 index 00000000000..524ec90bf03 --- /dev/null +++ b/.changelog/38085.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sagemaker_endpoint_configuration: Add `production_variants.inference_ami_version` and `shadow_production_variants.inference_ami_version` arguments +``` \ No newline at end of file diff --git a/.changelog/38087.txt b/.changelog/38087.txt new file mode 100644 index 00000000000..ccc63b5d6da --- /dev/null +++ b/.changelog/38087.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_sagemaker_workteam: Add `worker_access_configuration` attribute +``` diff --git a/.changelog/38101.txt b/.changelog/38101.txt new file mode 100644 index 00000000000..9cf29553c92 --- /dev/null +++ b/.changelog/38101.txt @@ -0,0 +1,7 @@ +```release-note:new-resource +aws_grafana_workspace_service_account_token +``` + +```release-note:new-resource +aws_grafana_workspace_service_account +``` \ No newline at end of file diff --git a/.changelog/38109.txt b/.changelog/38109.txt new file mode 100644 index 00000000000..db68ff85f8e --- /dev/null +++ b/.changelog/38109.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ecs_service: Change `volume_configuration.managed_ebs_volume.throughput` from `TypeString` to `TypeInt` +``` \ No newline at end of file diff --git a/.changelog/38143.txt b/.changelog/38143.txt new file mode 100644 index 00000000000..49e60f8c3b3 --- /dev/null +++ b/.changelog/38143.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_drs_replication_configuration_template: Fix issues preventing creation and deletion +``` \ No newline at end of file diff --git a/.changelog/38161.txt b/.changelog/38161.txt new file mode 100644 index 00000000000..402a2a10dfa --- /dev/null +++ b/.changelog/38161.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_fms_policy: Add `resource_set_ids` attribute +``` \ No newline at end of file diff --git a/.changelog/38162.txt b/.changelog/38162.txt new file mode 100644 index 00000000000..4819d5a0db0 --- /dev/null +++ b/.changelog/38162.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_eks_cluster: Add `bootstrap_self_managed_addons` argument +``` \ No newline at end of file diff --git a/.changelog/38168.txt b/.changelog/38168.txt new file mode 100644 index 00000000000..c831f475431 --- /dev/null +++ b/.changelog/38168.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_cognito_user_pool_client: Fix `InvalidParameterException: 2 validation errors detected` errors on Read +``` diff --git a/.changelog/38181.txt b/.changelog/38181.txt new file mode 100644 index 00000000000..873fdead36d --- /dev/null +++ b/.changelog/38181.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_verifiedpermissions_identity_source +``` \ No newline at end of file diff --git a/.changelog/38182.txt b/.changelog/38182.txt new file mode 100644 index 00000000000..2ae2ffe12a8 --- /dev/null +++ b/.changelog/38182.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cognito_user: Fix a bug that caused resource recreation for resources imported with certain [import ID](https://developer.hashicorp.com/terraform/language/import#import-id) formats +``` \ No newline at end of file diff --git a/.changelog/38184.txt b/.changelog/38184.txt new file mode 100644 index 00000000000..8e82a6d37fa --- /dev/null +++ b/.changelog/38184.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_cognito_user_pool: Fix `runtime error: index out of range [0] with length 0` panic when adding `lambda_config` +``` \ No newline at end of file diff --git a/.changelog/38196.txt b/.changelog/38196.txt new file mode 100644 index 00000000000..6e5f5658c4e --- /dev/null +++ b/.changelog/38196.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_globalaccelerator_cross_account_attachment: Add `cidr_block` argument to `resource` configuration block +``` diff --git a/.changelog/38199.txt b/.changelog/38199.txt new file mode 100644 index 00000000000..65d3e1fe67d --- /dev/null +++ b/.changelog/38199.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_codebuild_webhook: Add `scope_configuration` argument +``` \ No newline at end of file diff --git a/.changelog/38212.txt b/.changelog/38212.txt new file mode 100644 index 00000000000..a653decd871 --- /dev/null +++ b/.changelog/38212.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_iam_server_certificate: Add configurable `delete` timeout +``` \ No newline at end of file diff --git a/.changelog/38213.txt b/.changelog/38213.txt new file mode 100644 index 00000000000..6c530efca6a --- /dev/null +++ b/.changelog/38213.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_transfer_connector +``` diff --git a/.changelog/38225.txt b/.changelog/38225.txt new file mode 100644 index 00000000000..e65dfb9396e --- /dev/null +++ b/.changelog/38225.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_appstream_image +``` diff --git a/.changelog/38227.txt b/.changelog/38227.txt new file mode 100644 index 00000000000..89611312341 --- /dev/null +++ b/.changelog/38227.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_db_instance: Fix `InvalidParameterCombination: A parameter group can't be specified during Read Replica creation for the following DB engine: postgres` errors +``` diff --git a/.changelog/38252.txt b/.changelog/38252.txt new file mode 100644 index 00000000000..507b83ced79 --- /dev/null +++ b/.changelog/38252.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_db_instance: Correctly mark incomplete instances as [tainted](https://developer.hashicorp.com/terraform/cli/state/taint#the-tainted-status) during creation +``` diff --git a/.changelog/38269.txt b/.changelog/38269.txt new file mode 100644 index 00000000000..bc86f4cfa24 --- /dev/null +++ b/.changelog/38269.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_elasticache_serverless_cache: Allow `cache_usage_limits.data_storage.maximum`, `cache_usage_limits.data_storage.minimum`, `cache_usage_limits.ecpu_per_second.maximum` and `cache_usage_limits.ecpu_per_second.minimum` to be updated in-place +``` \ No newline at end of file diff --git a/.changelog/38270.txt b/.changelog/38270.txt new file mode 100644 index 00000000000..37dd0768010 --- /dev/null +++ b/.changelog/38270.txt @@ -0,0 +1,31 @@ +```release-note:enhancement +resource/aws_mskconnect_connector: Add `tags` argument and `tags_all` attribute +``` + +```release-note:enhancement +resource/aws_mskconnect_custom_plugin: Add `tags` argument and `tags_all` attribute +``` + +```release-note:enhancement +resource/aws_mskconnect_worker_configuration: Add `tags` argument and `tags_all` attribute +``` + +```release-note:enhancement +resource/aws_mskconnect_worker_configuration: Add resource deletion logic +``` + +```release-note:enhancement +data-source/aws_mskconnect_connector: Add `tags` attribute +``` + +```release-note:enhancement +data-source/aws_mskconnect_custom_plugin: Add `tags` attribute +``` + +```release-note:enhancement +data-source/aws_mskconnect_worker_configuration: Add `tags` attribute +``` + +```release-note:bug +resource/aws_mskconnect_connector: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panic when `log_delivery.worker_log_delivery` is empty (`{}`) +``` \ No newline at end of file diff --git a/.changelog/38271.txt b/.changelog/38271.txt new file mode 100644 index 00000000000..c916e66e17d --- /dev/null +++ b/.changelog/38271.txt @@ -0,0 +1,4 @@ +@@ -0,0 +1,27 @@ +```release-note:enhancement +resource/aws_ssm_association: Add `tags` argument and `tags_all` attribute +``` \ No newline at end of file diff --git a/.changelog/38272.txt b/.changelog/38272.txt new file mode 100644 index 00000000000..3004fe6db6d --- /dev/null +++ b/.changelog/38272.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_ecr_repository: Fix issue where the `tags` attribute is not set +``` \ No newline at end of file diff --git a/.changelog/38273.txt b/.changelog/38273.txt new file mode 100644 index 00000000000..95bbe013401 --- /dev/null +++ b/.changelog/38273.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_cloudtrail_event_data_store: Add `billing_mode` argument +``` \ No newline at end of file diff --git a/.changelog/38274.txt b/.changelog/38274.txt new file mode 100644 index 00000000000..7d5a31838b5 --- /dev/null +++ b/.changelog/38274.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_appstream_fleet: Support `0` as a valid value for `idle_disconnect_timeout_in_seconds` +``` \ No newline at end of file diff --git a/.changelog/38277.txt b/.changelog/38277.txt new file mode 100644 index 00000000000..1c7fff45e33 --- /dev/null +++ b/.changelog/38277.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_oam_link: Add `link_configuration` argument +``` + +```release-note:enhancement +data-source/aws_oam_link: Add `link_configuration` attribute +``` \ No newline at end of file diff --git a/.changelog/38292.txt b/.changelog/38292.txt new file mode 100644 index 00000000000..f6c2f03f528 --- /dev/null +++ b/.changelog/38292.txt @@ -0,0 +1,39 @@ +```release-note:bug +aws_dx_lag: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_dynamodb_kinesis_streaming_destination: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_ec2_capacity_block_reservation: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_route_table: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_opensearchserverless_access_policy: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_opensearchserverless_collection: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_opensearchserverless_security_config: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_opensearchserverless_security_policy: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_opensearchserverless_vpc_endpoint: Checks for errors other than NotFound when reading. +``` + +```release-note:bug +aws_ram_principal_association: Checks for errors other than NotFound when reading. +``` diff --git a/.changelog/38295.txt b/.changelog/38295.txt new file mode 100644 index 00000000000..56508de28fa --- /dev/null +++ b/.changelog/38295.txt @@ -0,0 +1,7 @@ +```release-note:bug +resource/aws_eks_cluster: Set `access_config.bootstrap_cluster_creator_admin_permissions` to `true` on Read for clusters with no `access_config` configured. This allows in-place updates of existing clusters when `access_config` is configured +``` + +```release-note:bug +data-source/aws_eks_cluster: Add `access_config.bootstrap_cluster_creator_admin_permissions` attribute +``` \ No newline at end of file diff --git a/.changelog/38308.txt b/.changelog/38308.txt new file mode 100644 index 00000000000..f50e830bdd2 --- /dev/null +++ b/.changelog/38308.txt @@ -0,0 +1,11 @@ +```release-note:enhancement +resource/aws_fsx_ontap_file_system: Add `MULTI_AZ_2` as a valid value for `deployment_type` +``` + +```release-note:enhancement +resource/aws_fsx_ontap_file_system: Add `384`, `768`, `1536`, `3072`, and `6144` as valid values for `throughput_capacity` +``` + +```release-note:enhancement +resource/aws_fsx_ontap_file_system: Add `384`, `768`, and `1536` as valid values for `throughput_capacity_per_ha_pair` +``` \ No newline at end of file diff --git a/.changelog/38323.txt b/.changelog/38323.txt new file mode 100644 index 00000000000..5871ffbf3f4 --- /dev/null +++ b/.changelog/38323.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_pinpoint_app: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panic when `campaign_hook` is empty (`{}`) +``` \ No newline at end of file diff --git a/.changelog/38328.txt b/.changelog/38328.txt new file mode 100644 index 00000000000..664e14ecce9 --- /dev/null +++ b/.changelog/38328.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_cloudwatch_log_account_policy +``` \ No newline at end of file diff --git a/.changelog/38332.txt b/.changelog/38332.txt new file mode 100644 index 00000000000..182f58142f7 --- /dev/null +++ b/.changelog/38332.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lb_trust_store: Wait until trust store is `ACTIVE` on resource Create +``` \ No newline at end of file diff --git a/.changelog/38345.txt b/.changelog/38345.txt new file mode 100644 index 00000000000..ff9672bd132 --- /dev/null +++ b/.changelog/38345.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_datazone_project +``` \ No newline at end of file diff --git a/.changelog/38350.txt b/.changelog/38350.txt new file mode 100644 index 00000000000..8c5d3433844 --- /dev/null +++ b/.changelog/38350.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_kinesisanalyticsv2_application: Support `FLINK-1_19` as a valid value for `runtime_environment` +``` diff --git a/.changelog/38372.txt b/.changelog/38372.txt new file mode 100644 index 00000000000..f64ed972f96 --- /dev/null +++ b/.changelog/38372.txt @@ -0,0 +1,3 @@ +```release-note:bug +data-source/aws_efs_access_point: Set `id` the the access point ID, not the file system ID. This fixes a regression introduced in [v5.58.0](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md#5580-july-11-2024) +``` \ No newline at end of file diff --git a/.ci/.semgrep-service-name0.yml b/.ci/.semgrep-service-name0.yml index bab47cef204..5fbda10b2a2 100644 --- a/.ci/.semgrep-service-name0.yml +++ b/.ci/.semgrep-service-name0.yml @@ -992,6 +992,67 @@ rules: patterns: - pattern-regex: "(?i)ApplicationInsights" severity: WARNING + - id: applicationsignals-in-func-name + languages: + - go + message: Do not use "ApplicationSignals" in func name inside applicationsignals package + paths: + include: + - internal/service/applicationsignals + exclude: + - internal/service/applicationsignals/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ApplicationSignals" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: applicationsignals-in-test-name + languages: + - go + message: Include "ApplicationSignals" in test name + paths: + include: + - internal/service/applicationsignals/*_test.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccApplicationSignals" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: applicationsignals-in-const-name + languages: + - go + message: Do not use "ApplicationSignals" in const name inside applicationsignals package + paths: + include: + - internal/service/applicationsignals + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ApplicationSignals" + severity: WARNING + - id: applicationsignals-in-var-name + languages: + - go + message: Do not use "ApplicationSignals" in var name inside applicationsignals package + paths: + include: + - internal/service/applicationsignals + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)ApplicationSignals" + severity: WARNING - id: appmesh-in-func-name languages: - go diff --git a/.ci/.semgrep-service-name1.yml b/.ci/.semgrep-service-name1.yml index ac35c8123f6..70475fde70b 100644 --- a/.ci/.semgrep-service-name1.yml +++ b/.ci/.semgrep-service-name1.yml @@ -567,6 +567,67 @@ rules: patterns: - pattern-regex: "(?i)databasemigrationservice" severity: WARNING + - id: databrew-in-func-name + languages: + - go + message: Do not use "DataBrew" in func name inside databrew package + paths: + include: + - internal/service/databrew + exclude: + - internal/service/databrew/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)DataBrew" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: databrew-in-test-name + languages: + - go + message: Include "DataBrew" in test name + paths: + include: + - internal/service/databrew/*_test.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccDataBrew" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: databrew-in-const-name + languages: + - go + message: Do not use "DataBrew" in const name inside databrew package + paths: + include: + - internal/service/databrew + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)DataBrew" + severity: WARNING + - id: databrew-in-var-name + languages: + - go + message: Do not use "DataBrew" in var name inside databrew package + paths: + include: + - internal/service/databrew + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)DataBrew" + severity: WARNING - id: dataexchange-in-func-name languages: - go @@ -3405,6 +3466,52 @@ rules: patterns: - pattern-regex: "(?i)Glue" severity: WARNING + - id: gluedatabrew-in-func-name + languages: + - go + message: Do not use "gluedatabrew" in func name inside databrew package + paths: + include: + - internal/service/databrew + exclude: + - internal/service/databrew/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)gluedatabrew" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: gluedatabrew-in-const-name + languages: + - go + message: Do not use "gluedatabrew" in const name inside databrew package + paths: + include: + - internal/service/databrew + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)gluedatabrew" + severity: WARNING + - id: gluedatabrew-in-var-name + languages: + - go + message: Do not use "gluedatabrew" in var name inside databrew package + paths: + include: + - internal/service/databrew + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)gluedatabrew" + severity: WARNING - id: grafana-in-func-name languages: - go @@ -4108,50 +4215,3 @@ rules: patterns: - pattern-regex: "(?i)InternetMonitor" severity: WARNING - - id: internetmonitor-in-var-name - languages: - - go - message: Do not use "InternetMonitor" in var name inside internetmonitor package - paths: - include: - - internal/service/internetmonitor - patterns: - - pattern: var $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)InternetMonitor" - severity: WARNING - - id: iot-in-func-name - languages: - - go - message: Do not use "IoT" in func name inside iot package - paths: - include: - - internal/service/iot - exclude: - - internal/service/iot/list_pages_gen.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)IoT" - - focus-metavariable: $NAME - - pattern-not: func $NAME($T *testing.T) - severity: WARNING - - id: iot-in-test-name - languages: - - go - message: Include "IoT" in test name - paths: - include: - - internal/service/iot/*_test.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-not-regex: "^TestAccIoT" - - pattern-regex: ^TestAcc.* - severity: WARNING diff --git a/.ci/.semgrep-service-name2.yml b/.ci/.semgrep-service-name2.yml index 00764317d3a..e713407e1b2 100644 --- a/.ci/.semgrep-service-name2.yml +++ b/.ci/.semgrep-service-name2.yml @@ -1,5 +1,52 @@ # Generated by internal/generate/servicesemgrep/main.go; DO NOT EDIT. rules: + - id: internetmonitor-in-var-name + languages: + - go + message: Do not use "InternetMonitor" in var name inside internetmonitor package + paths: + include: + - internal/service/internetmonitor + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)InternetMonitor" + severity: WARNING + - id: iot-in-func-name + languages: + - go + message: Do not use "IoT" in func name inside iot package + paths: + include: + - internal/service/iot + exclude: + - internal/service/iot/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)IoT" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: iot-in-test-name + languages: + - go + message: Include "IoT" in test name + paths: + include: + - internal/service/iot/*_test.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccIoT" + - pattern-regex: ^TestAcc.* + severity: WARNING - id: iot-in-const-name languages: - go @@ -2744,6 +2791,67 @@ rules: patterns: - pattern-regex: "(?i)NetworkManager" severity: WARNING + - id: networkmonitor-in-func-name + languages: + - go + message: Do not use "NetworkMonitor" in func name inside networkmonitor package + paths: + include: + - internal/service/networkmonitor + exclude: + - internal/service/networkmonitor/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)NetworkMonitor" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: networkmonitor-in-test-name + languages: + - go + message: Include "NetworkMonitor" in test name + paths: + include: + - internal/service/networkmonitor/*_test.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccNetworkMonitor" + - pattern-regex: ^TestAcc.* + severity: WARNING + - id: networkmonitor-in-const-name + languages: + - go + message: Do not use "NetworkMonitor" in const name inside networkmonitor package + paths: + include: + - internal/service/networkmonitor + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)NetworkMonitor" + severity: WARNING + - id: networkmonitor-in-var-name + languages: + - go + message: Do not use "NetworkMonitor" in var name inside networkmonitor package + paths: + include: + - internal/service/networkmonitor + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)NetworkMonitor" + severity: WARNING - id: oam-in-func-name languages: - go @@ -4111,64 +4219,3 @@ rules: - focus-metavariable: $NAME - pattern-not: func $NAME($T *testing.T) severity: WARNING - - id: recyclebin-in-const-name - languages: - - go - message: Do not use "recyclebin" in const name inside rbin package - paths: - include: - - internal/service/rbin - patterns: - - pattern: const $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)recyclebin" - severity: WARNING - - id: recyclebin-in-var-name - languages: - - go - message: Do not use "recyclebin" in var name inside rbin package - paths: - include: - - internal/service/rbin - patterns: - - pattern: var $NAME = ... - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)recyclebin" - severity: WARNING - - id: redshift-in-func-name - languages: - - go - message: Do not use "Redshift" in func name inside redshift package - paths: - include: - - internal/service/redshift - exclude: - - internal/service/redshift/list_pages_gen.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-regex: "(?i)Redshift" - - focus-metavariable: $NAME - - pattern-not: func $NAME($T *testing.T) - severity: WARNING - - id: redshift-in-test-name - languages: - - go - message: Include "Redshift" in test name - paths: - include: - - internal/service/redshift/*_test.go - patterns: - - pattern: func $NAME( ... ) - - metavariable-pattern: - metavariable: $NAME - patterns: - - pattern-not-regex: "^TestAccRedshift" - - pattern-regex: ^TestAcc.* - severity: WARNING diff --git a/.ci/.semgrep-service-name3.yml b/.ci/.semgrep-service-name3.yml index b2b42d42639..f09c45aa954 100644 --- a/.ci/.semgrep-service-name3.yml +++ b/.ci/.semgrep-service-name3.yml @@ -1,5 +1,66 @@ # Generated by internal/generate/servicesemgrep/main.go; DO NOT EDIT. rules: + - id: recyclebin-in-const-name + languages: + - go + message: Do not use "recyclebin" in const name inside rbin package + paths: + include: + - internal/service/rbin + patterns: + - pattern: const $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)recyclebin" + severity: WARNING + - id: recyclebin-in-var-name + languages: + - go + message: Do not use "recyclebin" in var name inside rbin package + paths: + include: + - internal/service/rbin + patterns: + - pattern: var $NAME = ... + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)recyclebin" + severity: WARNING + - id: redshift-in-func-name + languages: + - go + message: Do not use "Redshift" in func name inside redshift package + paths: + include: + - internal/service/redshift + exclude: + - internal/service/redshift/list_pages_gen.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-regex: "(?i)Redshift" + - focus-metavariable: $NAME + - pattern-not: func $NAME($T *testing.T) + severity: WARNING + - id: redshift-in-test-name + languages: + - go + message: Include "Redshift" in test name + paths: + include: + - internal/service/redshift/*_test.go + patterns: + - pattern: func $NAME( ... ) + - metavariable-pattern: + metavariable: $NAME + patterns: + - pattern-not-regex: "^TestAccRedshift" + - pattern-regex: ^TestAcc.* + severity: WARNING - id: redshift-in-const-name languages: - go diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index 97f9d9ef96c..59febe30a6d 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -1,9 +1,9 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint -go 1.22 +go 1.22.5 require ( - github.com/aws/aws-sdk-go v1.54.1 + github.com/aws/aws-sdk-go v1.54.19 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 199b6a71785..5b3676e7422 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -9,8 +9,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.54.1 h1:+ULL7oLC+v3T00fOMIohUarPI3SR3oyDd6FBEvgdhvs= -github.com/aws/aws-sdk-go v1.54.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 54a93239e61..84dc7dc08e7 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -829,27 +829,48 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -859,6 +880,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -1079,6 +1106,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -1091,6 +1121,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -5351,6 +5384,18 @@ var awsPartition = partition{ Region: "ca-central-1", Variant: dualStackVariant, }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5393,6 +5438,51 @@ var awsPartition = partition{ Region: "eu-west-3", Variant: dualStackVariant, }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "cloud9-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", }: endpoint{}, @@ -5421,6 +5511,18 @@ var awsPartition = partition{ Region: "us-east-1", Variant: dualStackVariant, }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, @@ -5428,6 +5530,18 @@ var awsPartition = partition{ Region: "us-east-2", Variant: dualStackVariant, }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, @@ -5435,6 +5549,18 @@ var awsPartition = partition{ Region: "us-west-1", Variant: dualStackVariant, }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -5442,6 +5568,18 @@ var awsPartition = partition{ Region: "us-west-2", Variant: dualStackVariant, }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloud9-fips.us-west-2.api.aws", + }, }, }, "cloudcontrolapi": service{ @@ -5449,78 +5587,216 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-west-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.ca-west-1.api.aws", + }, endpointKey{ Region: "ca-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -5578,51 +5854,123 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-west-2.api.aws", + }, }, }, "clouddirectory": service{ @@ -7050,6 +7398,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -7080,6 +7431,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -7195,6 +7549,9 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -7225,6 +7582,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14900,6 +15260,18 @@ var awsPartition = partition{ }, }, }, + "globalaccelerator": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "glue": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20938,6 +21310,9 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -20956,6 +21331,21 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-central-1-fips", + }: endpoint{ + Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22509,6 +22899,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "oidc.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -23152,91 +23550,490 @@ var awsPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "af-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.af-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-northeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-northeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-3.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ap-southeast-4", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ap-southeast-4.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "ca-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.ca-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-central-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-central-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-north-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-south-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-south-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "eu-west-3", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.eu-west-3.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "pi-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "pi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "il-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.il-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-central-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-central-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "me-south-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.me-south-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "sa-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.sa-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-east-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-east-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-east-2.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-west-2", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-west-2.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-west-2.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -30386,6 +31183,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "sms-voice-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -32826,36 +33626,96 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.ca-central-1.api.aws", + }, endpointKey{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.eu-west-3.api.aws", + }, endpointKey{ Region: "fips-ca-central-1", }: endpoint{ @@ -32904,39 +33764,87 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-east-2.amazonaws.com", }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-east-2.api.aws", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-west-2.amazonaws.com", }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-west-2.api.aws", + }, }, }, "thinclient": service{ @@ -33562,6 +34470,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1-fips", + }: endpoint{ + Hostname: "translate-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -36075,6 +36998,21 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "acm-pca": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + Protocols: []string{"https"}, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "airflow": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36351,9 +37289,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "cloudformation": service{ @@ -37412,10 +38362,28 @@ var awscnPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, endpointKey{ Region: "cn-northwest-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn", + Protocols: []string{"https"}, + }, }, }, "pipes": service{ @@ -39233,21 +40201,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws", + }, }, }, "clouddirectory": service{ @@ -41977,6 +42969,62 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "kinesisvideo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, "kms": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -42748,12 +43796,76 @@ var awsusgovPartition = partition{ }, "pi": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-east-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "pi.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.amazonaws.com", + Protocols: []string{"https"}, + }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "pi-fips.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, }, }, "pinpoint": service{ @@ -44761,21 +45873,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "textract.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "textract-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "textract-fips.us-gov-west-1.api.aws", + }, }, }, "transcribe": service{ @@ -46048,40 +47184,20 @@ var awsisoPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-iso-east-1", + Region: "us-iso-east-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", + Hostname: "redshift.us-iso-east-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-east-1", }, - Deprecated: boxedTrue, }, endpointKey{ - Region: "fips-us-iso-west-1", + Region: "us-iso-west-1", }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", + Hostname: "redshift.us-iso-west-1.c2s.ic.gov", CredentialScope: credentialScope{ Region: "us-iso-west-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-iso-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-east-1.c2s.ic.gov", - }, - endpointKey{ - Region: "us-iso-west-1", - }: endpoint{}, - endpointKey{ - Region: "us-iso-west-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-iso-west-1.c2s.ic.gov", }, }, }, @@ -47057,22 +48173,12 @@ var awsisobPartition = partition{ "redshift": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "fips-us-isob-east-1", + Region: "us-isob-east-1", }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", + Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov", CredentialScope: credentialScope{ Region: "us-isob-east-1", }, - Deprecated: boxedTrue, - }, - endpointKey{ - Region: "us-isob-east-1", - }: endpoint{}, - endpointKey{ - Region: "us-isob-east-1", - Variant: fipsVariant, - }: endpoint{ - Hostname: "redshift-fips.us-isob-east-1.sc2s.sgov.gov", }, }, }, diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index 6cad5424ce1..ffe99ec31c3 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -28,7 +28,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.54.1 +# github.com/aws/aws-sdk-go v1.54.19 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints diff --git a/.ci/semgrep/errors/error-checks.go b/.ci/semgrep/errors/error-checks.go new file mode 100644 index 00000000000..d2a615b2a43 --- /dev/null +++ b/.ci/semgrep/errors/error-checks.go @@ -0,0 +1,183 @@ +package main + +import ( + "context" + "errors" + "time" + + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func test1() { + _, err := call() + + // ruleid: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } + + return +} + +func test2() { + _, err := call() + + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } + + if err != nil { + return + } + + return +} + +func test3() { + _, err := call() + + if err != nil { + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } + return + } + + return +} + +func test4() { + _, err := call() + + if err == nil { + return + // ok: notfound-without-err-checks + } else if tfresource.NotFound(err) { + return + } else { + return + } +} + +func test5() { + _, err := call() + + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } else if err != nil { + return + } else { + return + } +} + +func test6() error { + _, err := call() + + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } + + return err +} + +func test7() { + for { + _, err := call() + + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + continue + } + } + + return err +} + +func test8() { + _, err := call() + + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } + + if tfawserr.ErrCodeEquals(err, "SomeError") { + return + } + + if err != nil { + return + } + + return +} + +func test9() { + _, err := call() + + if err != nil { + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } else { + return + } + } + + return +} + +func test10() { + _, err := call() + + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } else if err != nil { + return + } + + return +} + +func test11() { + ctx := context.Background() + + tfresource.RetryWhen(ctx, 1*time.Second, nil, func(err error) (bool error) { + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return true, err + } + + return false, err + }) +} + +func test12() { + _, err := call() + + // ok: notfound-without-err-checks + if tfresource.NotFound(err) { + return + } + + if PreCheckSkipError(err) { + return + } + + if err != nil { + return + } + + return +} + +func call() (any, error) { + return nil, errors.New("error") +} diff --git a/.ci/semgrep/errors/error-checks.yml b/.ci/semgrep/errors/error-checks.yml new file mode 100644 index 00000000000..cc7967d7251 --- /dev/null +++ b/.ci/semgrep/errors/error-checks.yml @@ -0,0 +1,59 @@ +rules: + - id: notfound-without-err-checks + languages: [go] + message: When checking for tfresource.NotFound() errors, typically other error conditions should be checked. + patterns: + - pattern: | + if tfresource.NotFound($ERR) { ... } + - pattern-not-inside: | + if tfresource.NotFound($ERR) { ... } + if $ERR != nil { ... } + - pattern-not-inside: | + if tfresource.NotFound($ERR) { ... } + if $FUNC($ERR, ...) { ... } + if $ERR != nil { ... } + - pattern-not-inside: | + if err != nil { + if tfresource.NotFound($ERR) { ... } + return ... + } + - pattern-not-inside: | + if err != nil { + if tfresource.NotFound($ERR) { + ... + } else { + ... + } + } + - pattern-not-inside: | + if err == nil { + ... + } else if tfresource.NotFound($ERR) { + ... + } else { ... } + - pattern-not-inside: | + if tfresource.NotFound($ERR) { + ... + } else if err != nil { + ... + } else { + ... + } + - pattern-not-inside: | + if tfresource.NotFound($ERR) { + ... + } + return $ERR + - pattern-not-inside: | + if tfresource.NotFound($ERR) { + continue + } + - pattern-not-inside: | + if tfresource.NotFound($ERR) { + ... + } else if err != nil { + ... + } + - pattern-not-inside: | + tfresource.RetryWhen(...) + severity: ERROR diff --git a/.ci/semgrep/migrate/context.yml b/.ci/semgrep/migrate/context.yml index 571b98a0343..8bdc830bc96 100644 --- a/.ci/semgrep/migrate/context.yml +++ b/.ci/semgrep/migrate/context.yml @@ -30,6 +30,7 @@ rules: - pattern-not: conn.Options() - pattern-not: codestarconnections_sdkv2.$API() - pattern-not: connectcases_sdkv2.$API() + - pattern-not: kafkaconnect_sdkv2.$API() - pattern-not: mediaconnect_sdkv2.$API() - pattern-not: pcaconnectorad_sdkv2.$API() severity: ERROR diff --git a/.ci/semgrep/migrate/paginator.yml b/.ci/semgrep/migrate/paginator.yml new file mode 100644 index 00000000000..cc65b5dbe2e --- /dev/null +++ b/.ci/semgrep/migrate/paginator.yml @@ -0,0 +1,7 @@ +rules: + - id: paginator-more-pages + languages: [go] + message: Use `for pages.HasMorePages()`, not `if pages.HasMorePages()` + patterns: + - pattern-regex: 'if\s+pages\.HasMorePages\(\)' + severity: ERROR diff --git a/.ci/tools/go.mod b/.ci/tools/go.mod index 0b365f33266..1fd3c8d798c 100644 --- a/.ci/tools/go.mod +++ b/.ci/tools/go.mod @@ -1,17 +1,17 @@ module github.com/hashicorp/terraform-provider-aws/tools -go 1.22.2 +go 1.22.5 require ( github.com/YakDriver/tfproviderdocs v0.13.0 github.com/client9/misspell v0.3.4 github.com/golangci/golangci-lint v1.59.1 - github.com/hashicorp/copywrite v0.18.0 + github.com/hashicorp/copywrite v0.19.0 github.com/hashicorp/go-changelog v0.0.0-20240306190400-974418b4aaa3 github.com/katbyte/terrafmt v0.5.3 github.com/pavius/impi v0.0.3 github.com/rhysd/actionlint v1.7.1 - github.com/terraform-linters/tflint v0.51.1 + github.com/terraform-linters/tflint v0.52.0 github.com/uber-go/gopatch v0.4.0 mvdan.cc/gofumpt v0.6.0 ) @@ -51,7 +51,7 @@ require ( github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect - github.com/aws/aws-sdk-go v1.44.122 // indirect + github.com/aws/aws-sdk-go v1.54.19 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect github.com/bgentry/speakeasy v0.1.0 // indirect @@ -135,16 +135,16 @@ require ( github.com/gostaticanalysis/nilerr v0.1.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-getter v1.7.4 // indirect + github.com/hashicorp/go-getter v1.7.5 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-plugin v1.6.1 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hc-install v0.6.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/hcl/v2 v2.20.1 // indirect + github.com/hashicorp/hcl/v2 v2.21.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.20.0 // indirect github.com/hashicorp/terraform-json v0.21.0 // indirect @@ -159,7 +159,7 @@ require ( github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/jedib0t/go-pretty v4.3.0+incompatible // indirect github.com/jedib0t/go-pretty/v6 v6.4.6 // indirect - github.com/jessevdk/go-flags v1.5.0 // indirect + github.com/jessevdk/go-flags v1.6.1 // indirect github.com/jgautheron/goconst v1.7.1 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect @@ -268,7 +268,7 @@ require ( github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tdakkota/asciicheck v0.2.0 // indirect github.com/terraform-linters/tflint-plugin-sdk v0.20.0 // indirect - github.com/terraform-linters/tflint-ruleset-terraform v0.7.0 // indirect + github.com/terraform-linters/tflint-ruleset-terraform v0.8.0 // indirect github.com/tetafro/godot v1.4.16 // indirect github.com/thanhpk/randstr v1.0.4 // indirect github.com/thlib/go-timezone-local v0.0.0-20210907160436-ef149e42d28e // indirect @@ -311,7 +311,7 @@ require ( golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect @@ -320,9 +320,9 @@ require ( golang.org/x/tools v0.22.0 // indirect google.golang.org/api v0.162.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/grpc v1.63.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/grpc v1.64.0 // indirect google.golang.org/protobuf v1.34.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect diff --git a/.ci/tools/go.sum b/.ci/tools/go.sum index 1d30c7876a6..0020a79d899 100644 --- a/.ci/tools/go.sum +++ b/.ci/tools/go.sum @@ -278,8 +278,9 @@ github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8ger github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/aws/aws-sdk-go v1.44.122 h1:p6mw01WBaNpbdP2xrisz5tIkcNwzj/HysobNoaAHjgo= github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= @@ -367,8 +368,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -470,8 +471,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= -github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= +github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= @@ -659,8 +660,8 @@ github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslC github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= -github.com/hashicorp/copywrite v0.18.0 h1:6f3aBDyQLBXhD6cdGSnsEM37vCDi3JJrkbR9HPBJf5c= -github.com/hashicorp/copywrite v0.18.0/go.mod h1:6wvQH+ICDoD2bpjO1RJ6fi+h3aY5NeLEM12oTkEtFoc= +github.com/hashicorp/copywrite v0.19.0 h1:f9LVxTDBfFYeQmdBpOsZ+HWknXonI8ZwubbO/RwyuCo= +github.com/hashicorp/copywrite v0.19.0/go.mod h1:6wvQH+ICDoD2bpjO1RJ6fi+h3aY5NeLEM12oTkEtFoc= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -668,8 +669,8 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-getter v1.7.4 h1:3yQjWuxICvSpYwqSayAdKRFcvBl1y/vogCxczWSmix0= -github.com/hashicorp/go-getter v1.7.4/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-getter v1.7.5 h1:dT58k9hQ/vbxNMwoI5+xFYAJuv6152UNvdHokfI5wE4= +github.com/hashicorp/go-getter v1.7.5/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.8.0/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= @@ -682,8 +683,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.0.1/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI= +github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0= github.com/hashicorp/go-retryablehttp v0.5.4/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -707,8 +708,8 @@ github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkm github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= -github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= @@ -756,8 +757,8 @@ github.com/jedib0t/go-pretty v4.3.0+incompatible/go.mod h1:XemHduiw8R651AF9Pt4Fw github.com/jedib0t/go-pretty/v6 v6.4.6 h1:v6aG9h6Uby3IusSSEjHaZNXpHFhzqMmjXcPq1Rjl9Jw= github.com/jedib0t/go-pretty/v6 v6.4.6/go.mod h1:Ndk3ase2CkQbXLLNf5QDHoYb6J9WtVfmHZu9n8rk2xs= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= +github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/jgautheron/goconst v1.7.1 h1:VpdAG7Ca7yvvJk5n8dMwQhfEZJh95kl/Hl9S1OI5Jkk= github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= @@ -1134,12 +1135,12 @@ github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/terraform-linters/tflint v0.51.1 h1:+8qTeeLDUZALlXn5l25ZK+yce67+vR3mDhBgRWFjiz8= -github.com/terraform-linters/tflint v0.51.1/go.mod h1:HyTb6IKcVkJG0eiI2mD9LffDOlkmgMSPh4bZDrAOI0w= +github.com/terraform-linters/tflint v0.52.0 h1:ZHzquuGCTpDXQUoZ8XCTnBeogZ9+V7fzS7iKv8GaUws= +github.com/terraform-linters/tflint v0.52.0/go.mod h1:W2gKr4g7zdG8zL+7xIYpKCkKtcQbdWP+hDNtUuJju4s= github.com/terraform-linters/tflint-plugin-sdk v0.20.0 h1:e7vfAI2rvAtClPx+eY0fd7kvmY0plVRDycgSQL9UQ4o= github.com/terraform-linters/tflint-plugin-sdk v0.20.0/go.mod h1:ySt9h+KoEhCM/zjjigIZC2J2Tboyzf53437PAYjrKxc= -github.com/terraform-linters/tflint-ruleset-terraform v0.7.0 h1:yV/sxzk1nO4hGikkaqWGZ9A4aKdRCigxWCAb4Y4Mcsg= -github.com/terraform-linters/tflint-ruleset-terraform v0.7.0/go.mod h1:nyZpkZu1x6n+l+XE8m14ADdQTpzitZVP+DZt/AHsQKQ= +github.com/terraform-linters/tflint-ruleset-terraform v0.8.0 h1:31zqg0fKSZYaH9Dgg/aiYSI76yp8n1awATVGdic6kxo= +github.com/terraform-linters/tflint-ruleset-terraform v0.8.0/go.mod h1:YoN4gNfVGPi4guQFyHZ/lxqpT5HyRMsl+TQKaS2YqWo= github.com/tetafro/godot v1.4.16 h1:4ChfhveiNLk4NveAZ9Pu2AN8QZ2nkUGFuadM9lrr5D0= github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= github.com/thanhpk/randstr v1.0.4 h1:IN78qu/bR+My+gHCvMEXhR/i5oriVHcTB/BJJIRTsNo= @@ -1209,8 +1210,8 @@ github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBv github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -github.com/zclconf/go-cty-debug v0.0.0-20240417160409-8c45e122ae1a h1:/o/Emn22dZIQ7AhyA0aLOKo528WG/WRAM5tqzIoQIOs= -github.com/zclconf/go-cty-debug v0.0.0-20240417160409-8c45e122ae1a/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= github.com/zclconf/go-cty-yaml v1.0.3 h1:og/eOQ7lvA/WWhHGFETVWNduJM7Rjsv2RRpx1sdFMLc= github.com/zclconf/go-cty-yaml v1.0.3/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= gitlab.com/bosi/decorder v0.4.2 h1:qbQaV3zgwnBZ4zPMhGLW4KZe7A7NwxEhJx39R3shffo= @@ -1417,8 +1418,8 @@ golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1816,10 +1817,10 @@ google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= +google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1857,8 +1858,8 @@ google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/.github/actionlint.yaml b/.github/actionlint.yml similarity index 100% rename from .github/actionlint.yaml rename to .github/actionlint.yml diff --git a/.github/actions/community_check/README.md b/.github/actions/community_check/README.md new file mode 100644 index 00000000000..b14cea8b708 --- /dev/null +++ b/.github/actions/community_check/README.md @@ -0,0 +1,41 @@ +# Community Check + +Check a username to see if it's in one of our community lists. We use this to help automate tasks within the repository. + +## Usage + +### Inputs + +| Input | Required | Description | +| ------------------- | -------- | --------------------------------------------- | +| `user_login` | true | The GitHub username to check | +| `core_contributors` | false | The base64 encoded list of Core Contributors | +| `maintainers` | false | The base64 encoded list of maintainers | +| `partners` | false | The base64 encoded list of partner contritors | + +### Outputs + +| Output | Default | Description | +| ------------------ | ------- | ----------------------------------------- | +| `core_contributor` | `null` | Whether the user is a Core Contributor | +| `maintainer` | `null` | Whether the user is a maintainer | +| `partner` | `null` | Whether the user is a partner contributor | + +### Example + +```yaml +steps: + - name: Checkout + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + sparse-checkout: .github/actions/community_check + + - name: Community Check + id: community_check + uses: ./.github/actions/community_check + with: + user_login: ${{ github.event.issue.user.login }} + maintainers: ${{ secrets.MAINTAINERS }} + core_contributors: ${{ secrets.CORE_CONTRIBUTORS }} + partners: ${{ secrets.PARTNERS }} +``` diff --git a/.github/actions/community_check/action.yml b/.github/actions/community_check/action.yml new file mode 100644 index 00000000000..ac3d8e3758d --- /dev/null +++ b/.github/actions/community_check/action.yml @@ -0,0 +1,53 @@ +name: Community Check +description: Check a username against our lists of groups within the community + +inputs: + user_login: + description: The GitHub username to check + required: true + + core_contributors: + description: The base64 encoded list of Core Contributors + required: false + + maintainers: + description: The base64 encoded list of maintainers + required: false + + partners: + description: The base64 encoded list of partners + required: false + +outputs: + core_contributor: + description: Whether the user is a Core Contributor + value: ${{ steps.core_contributor.outputs.check }} + + maintainer: + description: Whether the user is a maintainer + value: ${{ steps.maintainer.outputs.check }} + + partner: + description: Whether the user is a partner + value: ${{ steps.partner.outputs.check }} + +runs: + using: composite + steps: + - name: Core Contributor + id: core_contributor + if: inputs.core_contributors != '' + shell: bash + run: echo "check=$(echo $INPUT_CORE_CONTRIBUTORS | base64 --decode | jq --arg u $INPUT_USER_LOGIN '. | contains([$u])')" >> "$GITHUB_OUTPUT" + + - name: Maintainers + id: maintainer + if: inputs.maintainers != '' + shell: bash + run: echo "check=$(echo $INPUT_MAINTAINERS | base64 --decode | jq --arg u $INPUT_USER_LOGIN '. | contains([$u])')" >> "$GITHUB_OUTPUT" + + - name: Partners + id: partner + if: inputs.partners != '' + shell: bash + run: echo "check=$(echo $INPUT_PARTNERS | base64 --decode | jq --arg u $INPUT_USER_LOGIN '. | contains([$u])')" >> "$GITHUB_OUTPUT" diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d6717502a00..f87fa76f623 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,12 +1,18 @@ version: 2 updates: - - directory: "/" - package-ecosystem: "github-actions" + - package-ecosystem: "github-actions" + directory: "/" schedule: interval: "daily" - - directory: "/" - package-ecosystem: "gomod" + - package-ecosystem: "gomod" + directories: + - "/" + - "/.ci/providerlint" + - "/.ci/tools" + - "/skaff" + - "/tools/awssdkpatch" + - "/tools/tfsdk2fw" groups: aws-sdk-go: patterns: @@ -34,7 +40,6 @@ updates: - dependency-name: "github.com/hashicorp/terraform-plugin-log" # go-hclog should only be updated via terraform-plugin-log - dependency-name: "github.com/hashicorp/go-hclog" - - dependency-name: "golang.org/x/tools" # grpc should only be updated via terraform-plugin-go/terraform-plugin-framework - dependency-name: "google.golang.org/grpc" # protobuf should only be updated via terraform-plugin-go/terraform-plugin-framework @@ -43,54 +48,7 @@ updates: interval: "daily" open-pull-requests-limit: 30 - - directory: "/.ci/providerlint" - package-ecosystem: "gomod" - ignore: - - dependency-name: "golang.org/x/tools" - - dependency-name: "google.golang.org/grpc" - - dependency-name: "google.golang.org/protobuf" - schedule: - interval: "daily" - - - directory: "/.ci/tools" - package-ecosystem: "gomod" - ignore: - - dependency-name: "golang.org/x/tools" - - dependency-name: "google.golang.org/grpc" - - dependency-name: "google.golang.org/protobuf" - schedule: - interval: "daily" - - - directory: "/skaff" - package-ecosystem: "gomod" - ignore: - - dependency-name: "golang.org/x/tools" - - dependency-name: "google.golang.org/grpc" - - dependency-name: "google.golang.org/protobuf" - schedule: - interval: "daily" - - - directory: "/tools/awssdkpatch" - package-ecosystem: "gomod" - allow: - - dependency-type: direct - schedule: - interval: "daily" - - - directory: "/tools/tfsdk2fw" - package-ecosystem: "gomod" - allow: - - dependency-type: direct - ignore: - # terraform-plugin-sdk/v2 should only be updated via terraform-provider-aws - - dependency-name: "github.com/hashicorp/terraform-plugin-sdk/v2" - - dependency-name: "golang.org/x/tools" - - dependency-name: "google.golang.org/grpc" - - dependency-name: "google.golang.org/protobuf" - schedule: - interval: "daily" - - - directory: "/infrastructure/repository" - package-ecosystem: "terraform" + - package-ecosystem: "terraform" + directory: "/infrastructure/repository" schedule: interval: "daily" diff --git a/.github/labeler-issue-triage.yml b/.github/labeler-issue-triage.yml index 7aafc32619f..dd34122729f 100644 --- a/.github/labeler-issue-triage.yml +++ b/.github/labeler-issue-triage.yml @@ -69,6 +69,8 @@ service/applicationcostprofiler: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_applicationcostprofiler_' service/applicationinsights: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_applicationinsights_' +service/applicationsignals: + - '((\*|-)\s*`?|(data|resource)\s+"?)aws_applicationsignals_' service/appmesh: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_appmesh_' service/apprunner: @@ -485,6 +487,8 @@ service/networkfirewall: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_networkfirewall_' service/networkmanager: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_networkmanager_' +service/networkmonitor: + - '((\*|-)\s*`?|(data|resource)\s+"?)aws_networkmonitor_' service/nimble: - '((\*|-)\s*`?|(data|resource)\s+"?)aws_nimble_' service/oam: diff --git a/.github/labeler-pr-triage.yml b/.github/labeler-pr-triage.yml index b9b21a363f2..9b7bed4fdc3 100644 --- a/.github/labeler-pr-triage.yml +++ b/.github/labeler-pr-triage.yml @@ -240,6 +240,12 @@ service/applicationinsights: - any-glob-to-any-file: - 'internal/service/applicationinsights/**/*' - 'website/**/applicationinsights_*' +service/applicationsignals: + - any: + - changed-files: + - any-glob-to-any-file: + - 'internal/service/applicationsignals/**/*' + - 'website/**/applicationsignals_*' service/appmesh: - any: - changed-files: @@ -1532,6 +1538,12 @@ service/networkmanager: - any-glob-to-any-file: - 'internal/service/networkmanager/**/*' - 'website/**/networkmanager_*' +service/networkmonitor: + - any: + - changed-files: + - any-glob-to-any-file: + - 'internal/service/networkmonitor/**/*' + - 'website/**/networkmonitor_*' service/nimble: - any: - changed-files: diff --git a/.github/workflows/acctest-terraform-lint.yml b/.github/workflows/acctest-terraform-lint.yml index 2685e29efd0..f7a29a26816 100644 --- a/.github/workflows/acctest-terraform-lint.yml +++ b/.github/workflows/acctest-terraform-lint.yml @@ -24,7 +24,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 @@ -52,7 +52,7 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 diff --git a/.github/workflows/changelog_misspell.yml b/.github/workflows/changelog_misspell.yml index 587b83398b0..f8c83876cd6 100644 --- a/.github/workflows/changelog_misspell.yml +++ b/.github/workflows/changelog_misspell.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 diff --git a/.github/workflows/copyright.yml b/.github/workflows/copyright.yml index b2ea84d3dc3..61c9fd83f2c 100644 --- a/.github/workflows/copyright.yml +++ b/.github/workflows/copyright.yml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod # See also: https://github.com/actions/setup-go/issues/54 diff --git a/.github/workflows/dependencies.yml b/.github/workflows/dependencies.yml index 6f7c8cbc674..af25d229584 100644 --- a/.github/workflows/dependencies.yml +++ b/.github/workflows/dependencies.yml @@ -77,7 +77,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: .go-version - name: go mod diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 35e1d3a8ffc..a0d554e4ba4 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -44,7 +44,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 65c2dba6212..e06b18f1139 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -31,7 +31,7 @@ jobs: with: path: ~/go/pkg/mod key: ${{ runner.os }}-go-pkg-mod-${{ hashFiles('go.sum') }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod @@ -80,7 +80,7 @@ jobs: with: path: ~/go/pkg/mod key: ${{ runner.os }}-go-pkg-mod-${{ hashFiles('go.sum') }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - name: go build diff --git a/.github/workflows/firewatch.yml b/.github/workflows/firewatch.yml index 349e8f06c5e..d4c8c647d2a 100644 --- a/.github/workflows/firewatch.yml +++ b/.github/workflows/firewatch.yml @@ -17,7 +17,7 @@ jobs: slack_token: ${{ secrets.SLACK_BOT_TOKEN }} slack_channel: ${{ secrets.SLACK_CHANNEL }} - name: UploadArtifact - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: firewatch path: firewatch.data diff --git a/.github/workflows/generate_changelog.yml b/.github/workflows/generate_changelog.yml index e83702a7bde..ca5e9997871 100644 --- a/.github/workflows/generate_changelog.yml +++ b/.github/workflows/generate_changelog.yml @@ -8,7 +8,7 @@ jobs: if: github.event.pull_request.merged || github.event_name == 'workflow_dispatch' runs-on: ubuntu-latest steps: - - uses: actions/create-github-app-token@c8f55efbd427e7465d6da1106e7979bc8aaee856 # v1.10.1 + - uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 id: app-token with: app-id: ${{ secrets.APP_ID }} @@ -17,7 +17,7 @@ jobs: with: fetch-depth: 0 token: ${{ steps.app-token.outputs.token }} - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: .ci/tools/go.mod - run: cd .ci/tools && go install github.com/hashicorp/go-changelog/cmd/changelog-build diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 27379466a1e..c4e89147571 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -28,7 +28,7 @@ jobs: runs-on: custom-linux-large steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod cache: false @@ -51,7 +51,7 @@ jobs: runs-on: custom-linux-xl steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod cache: false diff --git a/.github/workflows/goreleaser-ci.yml b/.github/workflows/goreleaser-ci.yml index db580de597d..7b63f625b5d 100644 --- a/.github/workflows/goreleaser-ci.yml +++ b/.github/workflows/goreleaser-ci.yml @@ -38,7 +38,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 @@ -58,7 +58,7 @@ jobs: runs-on: custom-linux-small steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 diff --git a/.github/workflows/issues.yml b/.github/workflows/issues.yml index b7fc8b68ab1..d8c356ee7eb 100644 --- a/.github/workflows/issues.yml +++ b/.github/workflows/issues.yml @@ -48,12 +48,6 @@ jobs: GH_TOKEN: ${{ steps.token.outputs.token }} run: gh issue edit ${{ env.ISSUE_URL }} --add-label prioritized - - name: 'Add prioritized to regressions' - if: github.event.label.name == 'regression' - env: - GH_TOKEN: ${{ steps.token.outputs.token }} - run: gh issue edit ${{ env.ISSUE_URL }} --add-label prioritized - - name: 'Remove unnecessary labels on closure' if: github.event.action == 'closed' env: @@ -111,21 +105,6 @@ jobs: PROJECT_ITEM_ID=$(gh project item-add ${{ env.PROJECT_NUMBER }} --owner "hashicorp" --url ${{ env.ITEM_URL }} --format json | jq '.id') gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.VIEW_FIELD_ID }} --single-select-option-id ${{ vars.team_project_view_working_board }} - - name: 'Labeled Regression' - if: github.event.label.name == 'regression' - env: - GH_TOKEN: ${{ steps.token.outputs.token }} - run: | - PROJECT_ITEM_ID=$(gh project item-add ${{ env.PROJECT_NUMBER }} --owner "hashicorp" --url ${{ env.ITEM_URL }} --format json | jq '.id') - gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.VIEW_FIELD_ID }} --single-select-option-id ${{ vars.team_project_view_working_board }} - - gh api graphql -F itemId="$PROJECT_ITEM_ID" -F projectId=${{ env.PROJECT_ID }} -f query=' - mutation($itemId:ID!, $projectId:ID!) { - updateProjectV2ItemPosition(input:{itemId:$itemId, projectId:$projectId}) { - clientMutationId - } - }' - - name: 'Labeled Engineering Initiative' if: github.event.label.name == 'engineering-initiative' env: diff --git a/.github/workflows/provider.yml b/.github/workflows/provider.yml index e00f8ff97eb..4e2dedd886d 100644 --- a/.github/workflows/provider.yml +++ b/.github/workflows/provider.yml @@ -38,7 +38,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 @@ -65,7 +65,7 @@ jobs: path: terraform-plugin-dir key: ${{ runner.os }}-terraform-plugin-dir-${{ hashFiles('go.sum') }}-${{ hashFiles('internal/**') }} - if: steps.cache-terraform-plugin-dir.outputs.cache-hit != 'true' || steps.cache-terraform-plugin-dir.outcome == 'failure' - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod # See also: https://github.com/actions/setup-go/issues/54 @@ -93,7 +93,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod # See also: https://github.com/actions/setup-go/issues/54 @@ -127,7 +127,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod # See also: https://github.com/actions/setup-go/issues/54 @@ -154,7 +154,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod # See also: https://github.com/actions/setup-go/issues/54 @@ -187,7 +187,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod # See also: https://github.com/actions/setup-go/issues/54 @@ -278,7 +278,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 @@ -311,7 +311,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 diff --git a/.github/workflows/providerlint.yml b/.github/workflows/providerlint.yml index 2518231d1cc..904499882a1 100644 --- a/.github/workflows/providerlint.yml +++ b/.github/workflows/providerlint.yml @@ -24,7 +24,7 @@ jobs: runs-on: custom-linux-medium steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - name: go env diff --git a/.github/workflows/pull_request_target.yml b/.github/workflows/pull_request_target.yml index eef65318e05..b52c2d25fbe 100644 --- a/.github/workflows/pull_request_target.yml +++ b/.github/workflows/pull_request_target.yml @@ -9,190 +9,201 @@ on: - labeled - opened - ready_for_review +env: + ISSUE_URL: ${{ github.event.pull_request.html_url }} jobs: - community_check: - name: 'Community Check' - uses: ./.github/workflows/community-check.yml - secrets: inherit - with: - # This is a ternary that sets the variable to the assigned user's login on assigned events, - # and otherwise sets it to the username of the pull request's author. For more information: - # https://docs.github.com/en/actions/learn-github-actions/expressions#example - username: ${{ github.event.action == 'assigned' && github.event.assignee.login || github.event.pull_request.user.login }} - - labeler: - name: 'Automation Labeler' - needs: community_check + labels: + name: Labelers runs-on: ubuntu-latest - env: - PR_URL: ${{ github.event.pull_request.html_url }} steps: - - name: 'Generate Token' + - name: Generate GitHub App Token id: token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 + with: + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PEM }} + + - name: Checkout + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + sparse-checkout: .github + + - name: Apply Service Labels + if: contains(fromJSON('["opened", "edited"]'), github.event.action) + uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0 with: - app_id: ${{ secrets.APP_ID }} - installation_retrieval_mode: id - installation_retrieval_payload: ${{ secrets.INSTALLATION_ID }} - private_key: ${{ secrets.APP_PEM }} + configuration-path: .github/labeler-pr-triage.yml + repo-token: ${{ steps.token.outputs.token }} - - name: 'Add needs-triage for non-maintainers' - if: github.event.action == 'opened' && needs.community_check.outputs.maintainer == 'false' + - name: Apply Size Labels + if: contains(fromJSON('["opened", "edited"]'), github.event.action) + uses: codelytv/pr-size-labeler@56f6f0fc35c7cc0f72963b8467729e1120cb4bed # v1.10.0 + with: + GITHUB_TOKEN: ${{ steps.token.outputs.token }} + xs_label: "size/XS" + xs_max_size: "30" + s_label: "size/S" + s_max_size: "60" + m_label: "size/M" + m_max_size: "150" + l_label: "size/L" + l_max_size: "300" + xl_label: "size/XL" + message_if_xl: "" + + - name: "Community Check: Author" + id: author + if: github.event.action == 'opened' + uses: ./.github/actions/community_check + with: + user_login: ${{ github.event.pull_request.user.login }} + maintainers: ${{ secrets.MAINTAINERS }} + core_contributors: ${{ secrets.CORE_CONTRIBUTORS }} + partners: ${{ secrets.PARTNERS }} + + - name: Indicate That Triage is Required + if: | + github.event.action == 'opened' + && steps.author.outputs.maintainer == 'false' env: GH_TOKEN: ${{ steps.token.outputs.token }} - run: gh pr edit ${{ env.PR_URL }} --add-label needs-triage + run: gh pr edit "$ISSUE_URL" --add-label needs-triage - - name: 'Add prioritized to pull requests authored by or assigned to maintainers' - # This conditional is basically an exact copy of an example provided by GitHub: - # https://docs.github.com/en/actions/learn-github-actions/expressions#example-matching-an-array-of-strings - if: contains(fromJSON('["opened", "assigned"]'), github.event.action) && needs.community_check.outputs.maintainer == 'true' + - name: Add prioritized to Maintainer Contributions + if: | + github.event.action == 'opened' + && steps.author.outputs.maintainer == 'true' env: GH_TOKEN: ${{ steps.token.outputs.token }} - run: gh pr edit ${{ env.PR_URL }} --add-label prioritized + run: gh pr edit "$ISSUE_URL" --add-label prioritized - - name: 'Add partner to partner pull requests' - if: github.event.action == 'opened' && needs.community_check.outputs.partner == 'true' + - name: Credit Core Contributor Contributions + if: | + github.event.action == 'opened' + && steps.author.outputs.core_contributor == 'true' env: GH_TOKEN: ${{ steps.token.outputs.token }} - run: gh pr edit ${{ env.PR_URL }} --add-label partner + run: | + gh pr edit "$ISSUE_URL" --add-label external-maintainer - - name: 'Add external-maintainer to external maintainer pull requests' - if: github.event.action == 'opened' && needs.community_check.outputs.core_contributor == 'true' + - name: Credit Partner Contributions + if: | + github.event.action == 'opened' + && steps.author.outputs.partner == 'true' env: GH_TOKEN: ${{ steps.token.outputs.token }} - run: gh pr edit ${{ env.PR_URL }} --add-label external-maintainer + run: | + gh pr edit "$ISSUE_URL" --add-label partner + + - name: "Community Check: Assignee" + id: assignee + if: github.event.action == 'assigned' + uses: ./.github/actions/community_check + with: + user_login: ${{ github.event.assignee.login }} + maintainers: ${{ secrets.MAINTAINERS }} - - name: 'Add prioritized to regressions' - if: github.event.label.name == 'regression' + - name: Add prioritized to Maintainer Assignments + if: | + github.event.action == 'assigned' + && steps.assignee.outputs.maintainer == 'true' env: GH_TOKEN: ${{ steps.token.outputs.token }} - run: gh pr edit ${{ env.PR_URL }} --add-label prioritized + run: | + gh pr edit "$ISSUE_URL" --add-label prioritized - - name: 'Remove unnecessary labels on closure' - if: github.event.action == 'closed' + - name: Remove Triage Labels on Closure + if: | + github.event.action == 'closed' + && (contains(github.event.pull_request.labels.*.name, 'needs-triage') || contains(github.event.pull_request.labels.*.name, 'waiting-response')) env: GH_TOKEN: ${{ steps.token.outputs.token }} - run: gh pr edit ${{ env.PR_URL }} --remove-label needs-triage,waiting-response + run: gh pr edit "$ISSUE_URL" --remove-label needs-triage,waiting-response - service_labeler: - name: 'Service Labeler' - if: contains(fromJSON('["opened", "edited"]'), github.event.action) + project: + name: Maintainer Work Board runs-on: ubuntu-latest - steps: - - name: 'Checkout Repo' - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - - name: 'Apply Labels' - uses: actions/labeler@8558fd74291d67161a8a78ce36a881fa63b766a9 # v5.0.0 - with: - configuration-path: .github/labeler-pr-triage.yml - repo-token: ${{ secrets.GITHUB_TOKEN }} - - size_labeler: - name: 'Size Labeler' - if: contains(fromJSON('["opened", "edited"]'), github.event.action) - runs-on: ubuntu-latest - steps: - - name: 'Apply Size Labels' - uses: codelytv/pr-size-labeler@56f6f0fc35c7cc0f72963b8467729e1120cb4bed # v1.10.0 - with: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - xs_label: 'size/XS' - xs_max_size: '30' - s_label: 'size/S' - s_max_size: '60' - m_label: 'size/M' - m_max_size: '150' - l_label: 'size/L' - l_max_size: '300' - xl_label: 'size/XL' - message_if_xl: '' - - add_to_project: - name: 'Add to Project' - runs-on: ubuntu-latest - needs: community_check env: # Some gh project calls take the project's ID, some take the project's number PROJECT_ID: "PVT_kwDOAAuecM4AF-7h" PROJECT_NUMBER: "196" STATUS_FIELD_ID: "PVTSSF_lADOAAuecM4AF-7hzgDcsQA" VIEW_FIELD_ID: "PVTSSF_lADOAAuecM4AF-7hzgMRB34" - ITEM_URL: ${{ github.event.pull_request.html_url }} + steps: - - name: 'Generate Token' + - name: Generate GitHub App Token id: token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 + uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 with: - app_id: ${{ secrets.APP_ID }} - installation_retrieval_mode: id - installation_retrieval_payload: ${{ secrets.INSTALLATION_ID }} - private_key: ${{ secrets.APP_PEM }} + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PEM }} - - name: 'Maintainer Pull Requests' - if: github.event.action == 'opened' && needs.community_check.outputs.maintainer == 'true' + - name: Checkout + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + sparse-checkout: .github/actions/community_check + + - name: Community Check + id: community_check + if: github.event.action == 'opened' + uses: ./.github/actions/community_check + with: + user_login: ${{ github.event.action == 'assigned' && github.event.assignee.login || github.event.pull_request.user.login }} + maintainers: ${{ secrets.MAINTAINERS }} + + - name: Maintainer Pull Requests + if: | + github.event.action == 'opened' + && steps.community_check.outputs.maintainer == 'true' env: GH_TOKEN: ${{ steps.token.outputs.token }} run: | # In order to update the item's Status field, we need to capture the project item id from the output - PROJECT_ITEM_ID=$(gh project item-add ${{ env.PROJECT_NUMBER }} --owner "hashicorp" --url ${{ env.ITEM_URL }} --format json | jq '.id') - gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.STATUS_FIELD_ID }} --single-select-option-id ${{ vars.team_project_status_maintainer_pr }} - gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.VIEW_FIELD_ID }} --single-select-option-id ${{ vars.team_project_view_working_board }} - - - name: 'Assigned to Maintainers' - if: github.event.action == 'assigned' && needs.community_check.outputs.maintainer == 'true' + PROJECT_ITEM_ID=$(gh project item-add "$PROJECT_NUMBER" --owner "hashicorp" --url "$ISSUE_URL" --format json | jq '.id') + gh project item-edit --id "$PROJECT_ITEM_ID" --project-id "$PROJECT_ID" --field-id "$STATUS_FIELD_ID" --single-select-option-id ${{ vars.team_project_status_maintainer_pr }} + gh project item-edit --id "$PROJECT_ITEM_ID" --project-id "$PROJECT_ID" --field-id "$VIEW_FIELD_ID "--single-select-option-id ${{ vars.team_project_view_working_board }} + + - name: Assigned to Maintainers + if: | + github.event.action == 'assigned' + && steps.community_check.outputs.maintainer == 'true' env: GH_TOKEN: ${{ steps.token.outputs.token }} run: | - PROJECT_ITEM_ID=$(gh project item-add ${{ env.PROJECT_NUMBER }} --owner "hashicorp" --url ${{ env.ITEM_URL }} --format json | jq '.id') - gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.STATUS_FIELD_ID }} --single-select-option-id ${{ vars.team_project_status_in_progress }} - gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.VIEW_FIELD_ID }} --single-select-option-id ${{ vars.team_project_view_working_board }} + PROJECT_ITEM_ID=$(gh project item-add "$PROJECT_NUMBER" --owner "hashicorp" --url "$ISSUE_URL" --format json | jq '.id') + gh project item-edit --id "$PROJECT_ITEM_ID" --project-id "$PROJECT_ID" --field-id "$STATUS_FIELD_ID" --single-select-option-id ${{ vars.team_project_status_in_progress }} + gh project item-edit --id "$PROJECT_ITEM_ID" --project-id "$PROJECT_ID" --field-id "$VIEW_FIELD_ID" --single-select-option-id ${{ vars.team_project_view_working_board }} - - name: 'Labeled Prioritized' + - name: Labeled Prioritized if: github.event.label.name == 'prioritized' env: GH_TOKEN: ${{ steps.token.outputs.token }} run: | - PROJECT_ITEM_ID=$(gh project item-add ${{ env.PROJECT_NUMBER }} --owner "hashicorp" --url ${{ env.ITEM_URL }} --format json | jq '.id') - gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.VIEW_FIELD_ID }} --single-select-option-id ${{ vars.team_project_view_working_board }} + PROJECT_ITEM_ID=$(gh project item-add "$PROJECT_NUMBER" --owner "hashicorp" --url "$ISSUE_URL" --format json | jq '.id') + gh project item-edit --id "$PROJECT_ITEM_ID" --project-id "$PROJECT_ID" --field-id "$VIEW_FIELD_ID" --single-select-option-id ${{ vars.team_project_view_working_board }} - - name: 'Labeled Regression' - if: github.event.label.name == 'regression' - env: - GH_TOKEN: ${{ steps.token.outputs.token }} - run: | - PROJECT_ITEM_ID=$(gh project item-add ${{ env.PROJECT_NUMBER }} --owner "hashicorp" --url ${{ env.ITEM_URL }} --format json | jq '.id') - gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.VIEW_FIELD_ID }} --single-select-option-id ${{ vars.team_project_view_working_board }} - - gh api graphql -F itemId="$PROJECT_ITEM_ID" -F projectId=${{ env.PROJECT_ID }} -f query=' - mutation($itemId:ID!, $projectId:ID!) { - updateProjectV2ItemPosition(input:{itemId:$itemId, projectId:$projectId}) { - clientMutationId - } - }' - - - name: 'Labeled Engineering Initiative' + - name: Labeled Engineering Initiative if: github.event.label.name == 'engineering-initiative' env: GH_TOKEN: ${{ steps.token.outputs.token }} run: | - PROJECT_ITEM_ID=$(gh project item-add ${{ env.PROJECT_NUMBER }} --owner "hashicorp" --url ${{ env.ITEM_URL }} --format json | jq '.id') - gh project item-edit --id "$PROJECT_ITEM_ID" --project-id ${{ env.PROJECT_ID }} --field-id ${{ env.VIEW_FIELD_ID }} --single-select-option-id ${{ vars.team_project_view_engineering_initiative }} + PROJECT_ITEM_ID=$(gh project item-add "$PROJECT_NUMBER" --owner "hashicorp" --url "$ISSUE_URL" --format json | jq '.id') + gh project item-edit --id "$PROJECT_ITEM_ID" --project-id "$PROJECT_ID" --field-id "$VIEW_FIELD_ID" --single-select-option-id ${{ vars.team_project_view_engineering_initiative }} add_to_milestone: - name: 'Add Merged Pull Requests and Related Issues to Milestone' + name: Add Merged Pull Requests and Related Issues to Milestone if: github.event.action == 'closed' && github.event.pull_request.merged runs-on: ubuntu-latest steps: - - name: 'Checkout' + - name: Checkout uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - name: 'Get Current Milestone Name' + - name: Get Current Milestone Name id: get-current-milestone run: echo "current_milestone=v$(head -1 CHANGELOG.md | cut -d " " -f 2)" >> "$GITHUB_OUTPUT" - - name: 'Add Items to Milestone' + - name: Add Items to Milestone env: GH_TOKEN: ${{ github.token }} MILESTONE: ${{ steps.get-current-milestone.outputs.current_milestone }} @@ -201,11 +212,11 @@ jobs: run: ./.ci/scripts/add-to-milestone.sh community_note: - name: 'Community Note' + name: Community Note if: github.event.action == 'opened' runs-on: ubuntu-latest steps: - - name: 'Add community note to new Pull Requests' + - name: Add community note to new Pull Requests uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0 with: issue-number: ${{ github.event.pull_request.number }} @@ -225,11 +236,11 @@ jobs: * Whether or not the branch has been rebased will **not** impact prioritization, but doing so is always a welcome surprise. first_contribution_note: - name: 'New Contributor Note' + name: New Contributor Note if: github.event.action == 'opened' runs-on: ubuntu-latest steps: - - name: 'Add comment to add helpful context for new contributors' + - name: Add comment to add helpful context for new contributors uses: actions/first-interaction@34f15e814fe48ac9312ccf29db4e74fa767cbab7 # v1.3.0 with: repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -243,12 +254,26 @@ jobs: Thanks again, and welcome to the community! :smiley: permissions_check: - name: 'Verify Maintainers Editable' - needs: community_check - if: github.event.action == 'opened' && needs.community_check.outputs.maintainer == 'false' && !github.event.pull_request.maintainer_can_modify + name: Verify Maintainers Can Edit runs-on: ubuntu-latest + if: github.event.action == 'opened' steps: - - name: 'Comment if maintainers cannot edit' + - name: Checkout + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + sparse-checkout: .github/actions/community_check + + - name: Community Check + id: community_check + uses: ./.github/actions/community_check + with: + user_login: ${{ github.event.action == 'assigned' && github.event.assignee.login || github.event.pull_request.user.login }} + maintainers: ${{ secrets.MAINTAINERS }} + + - name: Comment if Not + if: | + steps.community_check.outputs.maintainer == 'false' + && !github.event.pull_request.maintainer_can_modify uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4.0.0 with: issue-number: ${{ github.event.pull_request.number }} diff --git a/.github/workflows/regressions.yml b/.github/workflows/regressions.yml index 766499469b4..c75c445572b 100644 --- a/.github/workflows/regressions.yml +++ b/.github/workflows/regressions.yml @@ -1,23 +1,30 @@ -name: "Regressions Slack Notifier" +name: Handle Regressions +permissions: {} + on: issues: types: - labeled + pull_request_target: types: - labeled + jobs: - slack-notification: - name: Slack Notifier + process: + name: Process Regression if: github.event.label.name == 'regression' runs-on: ubuntu-latest + + env: + ISSUE_URL: ${{ github.event.issue.html_url || github.event.pull_request.html_url }} + steps: - name: Send Slack Notification uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0 env: + ISSUE_TITLE: ${{ toJSON(github.event.issue.title || github.event.pull_request.title) }} SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }} - EVENT_URL: ${{ github.event.issue.html_url || github.event.pull_request.html_url }} - EVENT_TITLE: ${{ toJSON(github.event.issue.title || github.event.pull_request.title) }} with: channel-id: ${{ secrets.SLACK_CHANNEL }} payload: | @@ -34,8 +41,43 @@ jobs: "type": "section", "text": { "type": "mrkdwn", - "text": ${{ toJSON(format('<{0}|{1}>', env.EVENT_URL, env.EVENT_TITLE)) }} + "text": ${{ toJSON(format('<{0}|{1}>', env.ISSUE_URL, env.ISSUE_TITLE)) }} } } ] } + + - name: Generate GitHub App Token + id: token + uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 + with: + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PEM }} + + - name: Add prioritized Label + env: + GH_CLI_SUBCOMMAND: ${{ github.event_name == 'pull_request_target' && 'pr' || 'issue' }} + GH_TOKEN: ${{ steps.token.outputs.token }} + run: | + gh $GH_CLI_SUBCOMMAND edit "$ISSUE_URL" --add-label prioritized + + - name: Move to the Top of the Team Working Board + env: + GH_TOKEN: ${{ steps.token.outputs.token }} + run: | + PROJECT_ITEM_ID=$(gh project item-add 196 --owner "hashicorp" --url "$ISSUE_URL" --format json --jq '.id') + + gh project item-edit \ + --id "$PROJECT_ITEM_ID" \ + --project-id "PVT_kwDOAAuecM4AF-7h" \ + --field-id "PVTSSF_lADOAAuecM4AF-7hzgMRB34" \ + --single-select-option-id "${{ vars.team_project_view_working_board }}" + + gh api graphql \ + -F itemId="$PROJECT_ITEM_ID" \ + -F projectId="PVT_kwDOAAuecM4AF-7h" \ + -f query='mutation($itemId:ID!, $projectId:ID!) { + updateProjectV2ItemPosition(input:{itemId:$itemId, projectId:$projectId}) { + clientMutationId + } + }' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 5a3f31e682a..977229d8885 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,7 +17,7 @@ jobs: fetch-depth: 0 - name: Generate Release Notes run: sed -n -e "1{/# /d;}" -e "2{/^$/d;}" -e "/# $(git describe --abbrev=0 --exclude="$(git describe --abbrev=0 --match='v*.*.*' --tags)" --match='v*.*.*' --tags | tr -d v)/q;p" CHANGELOG.md > release-notes.txt - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: release-notes path: release-notes.txt @@ -25,7 +25,7 @@ jobs: terraform-provider-release: name: 'Terraform Provider Release' needs: [release-notes] - uses: hashicorp/ghaction-terraform-provider-release/.github/workflows/hashicorp.yml@v4 + uses: hashicorp/ghaction-terraform-provider-release/.github/workflows/hashicorp.yml@v5 secrets: hc-releases-key-prod: '${{ secrets.HC_RELEASES_KEY_PROD }}' hc-releases-key-staging: '${{ secrets.HC_RELEASES_KEY_STAGING }}' @@ -66,10 +66,16 @@ jobs: if: github.ref_name == needs.highest-version-tag.outputs.tag runs-on: macos-latest steps: + - uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 + id: app-token + with: + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PEM }} - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 ref: main + token: ${{ steps.app-token.outputs.token }} - name: Update Changelog Header run: | CHANGELOG_FILE_NAME="CHANGELOG.md" @@ -103,7 +109,7 @@ jobs: steps: - name: Save Release Tag run: echo ${{ github.ref_name }} > release-tag.data - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: release-tag path: release-tag.data diff --git a/.github/workflows/resource-counts.yml b/.github/workflows/resource-counts.yml index 5c0e1b8ce68..ed3e50d1fe2 100644 --- a/.github/workflows/resource-counts.yml +++ b/.github/workflows/resource-counts.yml @@ -40,7 +40,7 @@ jobs: rm .terraform.lock.hcl rm -rf .terraform - name: Create Pull Request - uses: peter-evans/create-pull-request@6d6857d36972b65feb161a90e484f2984215f83e # v6.0.5 + uses: peter-evans/create-pull-request@c5a7806660adbe173f04e3e038b0ccdcd758773c # v6.1.0 with: token: ${{ steps.generate_token.outputs.token }} branch: "resource-counts" diff --git a/.github/workflows/semgrep-ci.yml b/.github/workflows/semgrep-ci.yml index ad2934c4d01..e0e020cf3ab 100644 --- a/.github/workflows/semgrep-ci.yml +++ b/.github/workflows/semgrep-ci.yml @@ -24,8 +24,34 @@ env: SEMGREP_ARGS: --error --quiet jobs: + semgrep-validate: + name: Validate Code Quality Rules + runs-on: ubuntu-latest + container: + image: "returntocorp/semgrep:1.52.0" + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - run: | + semgrep --validate \ + --config .ci/.semgrep.yml \ + --config .ci/.semgrep-constants.yml \ + --config .ci/.semgrep-test-constants.yml \ + --config .ci/semgrep/ + + semgrep-test: + name: Semgrep Rule Tests + needs: [semgrep-validate] + runs-on: ubuntu-latest + container: + image: "returntocorp/semgrep:1.52.0" + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - run: | + semgrep --quiet --test .ci/semgrep/ + semgrep: name: Code Quality Scan + needs: [semgrep-test] runs-on: ubuntu-latest container: image: "returntocorp/semgrep:1.52.0" @@ -33,6 +59,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - run: | semgrep $SEMGREP_ARGS \ + --exclude .ci/semgrep/**/*.go \ --config .ci/.semgrep.yml \ --config .ci/.semgrep-constants.yml \ --config .ci/.semgrep-test-constants.yml \ diff --git a/.github/workflows/skaff.yml b/.github/workflows/skaff.yml index 6fb5734f19b..58975dc14ea 100644 --- a/.github/workflows/skaff.yml +++ b/.github/workflows/skaff.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: skaff/go.mod # See also: https://github.com/actions/setup-go/issues/54 diff --git a/.github/workflows/snapshot.yml b/.github/workflows/snapshot.yml index a59b5545c3d..33fb1e5836e 100644 --- a/.github/workflows/snapshot.yml +++ b/.github/workflows/snapshot.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 @@ -22,7 +22,8 @@ jobs: - name: goreleaser release uses: goreleaser/goreleaser-action@286f3b13b1b49da4ac219696163fb8c1c93e1200 # v6.0.0 with: - args: release --rm-dist --skip-sign --snapshot --timeout 2h + args: release --clean --skip=sign --snapshot --timeout 2h + version: "~> v2" - name: artifact naming id: naming run: | @@ -36,7 +37,7 @@ jobs: ARTIFACT="${GITHUB_REF}";; esac echo "artifact=$ARTIFACT-$(date -u +'%Y-%m-%dT%H-%M')" >> "$GITHUB_OUTPUT" - - uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 + - uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 with: name: ${{steps.naming.outputs.artifact}} path: dist/*.zip diff --git a/.github/workflows/team_slack_bot.yml b/.github/workflows/team_slack_bot.yml index c7e8f11c93b..b0959a0a465 100644 --- a/.github/workflows/team_slack_bot.yml +++ b/.github/workflows/team_slack_bot.yml @@ -10,7 +10,7 @@ jobs: name: open-pr-stats if: github.repository_owner == 'hashicorp' steps: - - uses: actions/create-github-app-token@c8f55efbd427e7465d6da1106e7979bc8aaee856 # v1.10.1 + - uses: actions/create-github-app-token@31c86eb3b33c9b601a1f60f98dcbfd1d70f379b4 # v1.10.3 id: app-token with: app-id: ${{ secrets.APP_ID }} diff --git a/.github/workflows/website.yml b/.github/workflows/website.yml index 7e3ad3505f4..8a5e39b5e05 100644 --- a/.github/workflows/website.yml +++ b/.github/workflows/website.yml @@ -87,7 +87,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: .ci/tools/go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 @@ -103,7 +103,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: .ci/tools/go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 @@ -121,7 +121,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: .ci/tools/go.mod - uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 diff --git a/.github/workflows/workflow-lint.yml b/.github/workflows/workflow-lint.yml index 2689629b4e5..569667ab950 100644 --- a/.github/workflows/workflow-lint.yml +++ b/.github/workflows/workflow-lint.yml @@ -20,7 +20,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - uses: actions/setup-go@cdcb36043654635271a94b9a6d1392de5bb323a7 # v5.0.1 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version-file: .ci/tools/go.mod - name: Install actionlint diff --git a/.go-version b/.go-version index 6fee2fedb0a..da9594fd66f 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.2 +1.22.5 diff --git a/.goreleaser.yml b/.goreleaser.yml index df0ef29b7e4..19194ae225d 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -1,3 +1,4 @@ +version: 2 archives: - files: # Ensure only built binary and license file are archived diff --git a/.teamcity/components/generated/services_all.kt b/.teamcity/components/generated/services_all.kt index ec71c011d0b..379fdaf3a10 100644 --- a/.teamcity/components/generated/services_all.kt +++ b/.teamcity/components/generated/services_all.kt @@ -5,7 +5,7 @@ val services = mapOf( "account" to ServiceSpec("Account Management"), "acm" to ServiceSpec("ACM (Certificate Manager)"), "acmpca" to ServiceSpec("ACM PCA (Certificate Manager Private Certificate Authority)"), - "amp" to ServiceSpec("AMP (Managed Prometheus)"), + "amp" to ServiceSpec("AMP (Managed Prometheus)", parallelismOverride = 10), "amplify" to ServiceSpec("Amplify"), "apigateway" to ServiceSpec("API Gateway", vpcLock = true), "apigatewayv2" to ServiceSpec("API Gateway V2", vpcLock = true), @@ -15,6 +15,7 @@ val services = mapOf( "appflow" to ServiceSpec("AppFlow"), "appintegrations" to ServiceSpec("AppIntegrations"), "applicationinsights" to ServiceSpec("CloudWatch Application Insights"), + "applicationsignals" to ServiceSpec("Application Signals"), "appmesh" to ServiceSpec("App Mesh"), "apprunner" to ServiceSpec("App Runner"), "appstream" to ServiceSpec("AppStream 2.0", vpcLock = true, parallelismOverride = 10), @@ -26,8 +27,8 @@ val services = mapOf( "backup" to ServiceSpec("Backup"), "batch" to ServiceSpec("Batch", vpcLock = true), "bcmdataexports" to ServiceSpec("BCM Data Exports"), - "bedrock" to ServiceSpec("Amazon Bedrock"), - "bedrockagent" to ServiceSpec("Agents for Amazon Bedrock"), + "bedrock" to ServiceSpec("Bedrock"), + "bedrockagent" to ServiceSpec("Bedrock Agents"), "budgets" to ServiceSpec("Web Services Budgets"), "ce" to ServiceSpec("CE (Cost Explorer)"), "chatbot" to ServiceSpec("Chatbot"), @@ -64,6 +65,7 @@ val services = mapOf( "costoptimizationhub" to ServiceSpec("Cost Optimization Hub"), "cur" to ServiceSpec("Cost and Usage Report", regionOverride = "us-east-1"), "customerprofiles" to ServiceSpec("Connect Customer Profiles"), + "databrew" to ServiceSpec("Glue DataBrew"), "dataexchange" to ServiceSpec("Data Exchange"), "datapipeline" to ServiceSpec("Data Pipeline"), "datasync" to ServiceSpec("DataSync", vpcLock = true), @@ -157,6 +159,7 @@ val services = mapOf( "neptunegraph" to ServiceSpec("Neptune Analytics"), "networkfirewall" to ServiceSpec("Network Firewall", vpcLock = true), "networkmanager" to ServiceSpec("Network Manager", vpcLock = true), + "networkmonitor" to ServiceSpec("CloudWatch Network Monitor"), "oam" to ServiceSpec("CloudWatch Observability Access Manager"), "opensearch" to ServiceSpec("OpenSearch", vpcLock = true), "opensearchserverless" to ServiceSpec("OpenSearch Serverless"), diff --git a/CHANGELOG.md b/CHANGELOG.md index d09cb353d8f..e8ab4969ffd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,16 +1,205 @@ -## 5.55.0 (Unreleased) +## 5.59.0 (Unreleased) + +FEATURES: + +* **New Data Source:** `aws_cloudfront_origin_access_control` ([#36301](https://github.com/hashicorp/terraform-provider-aws/issues/36301)) +* **New Data Source:** `aws_timestreamwrite_database` ([#36368](https://github.com/hashicorp/terraform-provider-aws/issues/36368)) +* **New Data Source:** `aws_timestreamwrite_table` ([#36599](https://github.com/hashicorp/terraform-provider-aws/issues/36599)) +* **New Resource:** `aws_datazone_project` ([#38345](https://github.com/hashicorp/terraform-provider-aws/issues/38345)) +* **New Resource:** `aws_grafana_workspace_service_account` ([#38101](https://github.com/hashicorp/terraform-provider-aws/issues/38101)) +* **New Resource:** `aws_grafana_workspace_service_account_token` ([#38101](https://github.com/hashicorp/terraform-provider-aws/issues/38101)) +* **New Resource:** `aws_rds_certificate` ([#35003](https://github.com/hashicorp/terraform-provider-aws/issues/35003)) + +ENHANCEMENTS: + +* data-source/aws_lakeformation_data_lake_settings: Add `allow_full_table_external_data_access` attribute ([#34474](https://github.com/hashicorp/terraform-provider-aws/issues/34474)) +* data-source/aws_msk_cluster: Add `broker_node_group_info` attribute ([#37705](https://github.com/hashicorp/terraform-provider-aws/issues/37705)) +* resource/aws_codebuild_webhook: Add `scope_configuration` argument ([#38199](https://github.com/hashicorp/terraform-provider-aws/issues/38199)) +* resource/aws_db_instance: Add `engine_lifecycle_support` argument ([#37708](https://github.com/hashicorp/terraform-provider-aws/issues/37708)) +* resource/aws_ecs_cluster: Add `configuration.managed_storage_configuration` argument ([#37932](https://github.com/hashicorp/terraform-provider-aws/issues/37932)) +* resource/aws_emrserverless_application: Add `interactive_configuration` argument ([#37889](https://github.com/hashicorp/terraform-provider-aws/issues/37889)) +* resource/aws_fis_experiment_template: Add `experiment_options` configuration block ([#36900](https://github.com/hashicorp/terraform-provider-aws/issues/36900)) +* resource/aws_imagebuilder_image_pipeline: Add `execution_role` and `workflow` arguments ([#37317](https://github.com/hashicorp/terraform-provider-aws/issues/37317)) +* resource/aws_kinesisanalyticsv2_application: Support `FLINK-1_19` as a valid value for `runtime_environment` ([#38350](https://github.com/hashicorp/terraform-provider-aws/issues/38350)) +* resource/aws_lakeformation_data_lake_settings: Add `allow_full_table_external_data_access` attribute ([#34474](https://github.com/hashicorp/terraform-provider-aws/issues/34474)) +* resource/aws_lb_target_group: Add `target_group_health` configuration block ([#37082](https://github.com/hashicorp/terraform-provider-aws/issues/37082)) +* resource/aws_msk_replicator: Add `starting_position` argument ([#36968](https://github.com/hashicorp/terraform-provider-aws/issues/36968)) +* resource/aws_rds_cluster: Add `engine_lifecycle_support` argument ([#37708](https://github.com/hashicorp/terraform-provider-aws/issues/37708)) +* resource/aws_rds_global_cluster: Add `engine_lifecycle_support` argument ([#37708](https://github.com/hashicorp/terraform-provider-aws/issues/37708)) +* resource/aws_vpclattice_listener: Support `TLS_PASSTHROUGH` as a valid value for `protocol` ([#37964](https://github.com/hashicorp/terraform-provider-aws/issues/37964)) +* resource/aws_wafv2_web_acl: Add `enable_machine_learning` to `aws_managed_rules_bot_control_rule_set` configuration block ([#37006](https://github.com/hashicorp/terraform-provider-aws/issues/37006)) + +BUG FIXES: + +* data-source/aws_efs_access_point: Set `id` the the access point ID, not the file system ID. This fixes a regression introduced in [v5.58.0](https://github.com/hashicorp/terraform-provider-aws/blob/main/CHANGELOG.md#5580-july-11-2024) ([#38372](https://github.com/hashicorp/terraform-provider-aws/issues/38372)) +* data-source/aws_lb_listener: Correctly set `default_action.target_group_arn` ([#37348](https://github.com/hashicorp/terraform-provider-aws/issues/37348)) +* resource/aws_codebuild_project: Fix unsetting `concurrent_build_limit` ([#37748](https://github.com/hashicorp/terraform-provider-aws/issues/37748)) +* resource/aws_ecs_service: Change `volume_configuration.managed_ebs_volume.throughput` from `TypeString` to `TypeInt` ([#38109](https://github.com/hashicorp/terraform-provider-aws/issues/38109)) +* resource/aws_lb_target_group: Use the configured `ip_address_type` value when `target_type` is `instance` ([#36423](https://github.com/hashicorp/terraform-provider-aws/issues/36423)) +* resource/aws_lb_trust_store: Wait until trust store is `ACTIVE` on resource Create ([#38332](https://github.com/hashicorp/terraform-provider-aws/issues/38332)) +* resource/aws_pinpoint_app: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panic when `campaign_hook` is empty (`{}`) ([#38323](https://github.com/hashicorp/terraform-provider-aws/issues/38323)) + +## 5.58.0 (July 11, 2024) + +FEATURES: + +* **New Resource:** `aws_cloudwatch_log_account_policy` ([#38328](https://github.com/hashicorp/terraform-provider-aws/issues/38328)) +* **New Resource:** `aws_verifiedpermissions_identity_source` ([#38181](https://github.com/hashicorp/terraform-provider-aws/issues/38181)) + +ENHANCEMENTS: + +* data-source/aws_launch_template: Add `network_interfaces.primary_ipv6` attribute ([#37142](https://github.com/hashicorp/terraform-provider-aws/issues/37142)) +* data-source/aws_mskconnect_connector: Add `tags` attribute ([#38270](https://github.com/hashicorp/terraform-provider-aws/issues/38270)) +* data-source/aws_mskconnect_custom_plugin: Add `tags` attribute ([#38270](https://github.com/hashicorp/terraform-provider-aws/issues/38270)) +* data-source/aws_mskconnect_worker_configuration: Add `tags` attribute ([#38270](https://github.com/hashicorp/terraform-provider-aws/issues/38270)) +* data-source/aws_oam_link: Add `link_configuration` attribute ([#38277](https://github.com/hashicorp/terraform-provider-aws/issues/38277)) +* resource/aws_cloudformation_stack_set_instance: Extend `deployment_targets` argument. ([#37898](https://github.com/hashicorp/terraform-provider-aws/issues/37898)) +* resource/aws_cloudtrail_event_data_store: Add `billing_mode` argument ([#38273](https://github.com/hashicorp/terraform-provider-aws/issues/38273)) +* resource/aws_db_instance: Fix `InvalidParameterCombination: A parameter group can't be specified during Read Replica creation for the following DB engine: postgres` errors ([#38227](https://github.com/hashicorp/terraform-provider-aws/issues/38227)) +* resource/aws_ec2_capacity_reservation: Add configurable timeouts ([#36754](https://github.com/hashicorp/terraform-provider-aws/issues/36754)) +* resource/aws_ec2_capacity_reservation: Retry `InsufficientInstanceCapacity` errors ([#36754](https://github.com/hashicorp/terraform-provider-aws/issues/36754)) +* resource/aws_eks_cluster: Add `bootstrap_self_managed_addons` argument ([#38162](https://github.com/hashicorp/terraform-provider-aws/issues/38162)) +* resource/aws_fms_policy: Add `resource_set_ids` attribute ([#38161](https://github.com/hashicorp/terraform-provider-aws/issues/38161)) +* resource/aws_fsx_ontap_file_system: Add `384`, `768`, `1536`, `3072`, and `6144` as valid values for `throughput_capacity` ([#38308](https://github.com/hashicorp/terraform-provider-aws/issues/38308)) +* resource/aws_fsx_ontap_file_system: Add `384`, `768`, and `1536` as valid values for `throughput_capacity_per_ha_pair` ([#38308](https://github.com/hashicorp/terraform-provider-aws/issues/38308)) +* resource/aws_fsx_ontap_file_system: Add `MULTI_AZ_2` as a valid value for `deployment_type` ([#38308](https://github.com/hashicorp/terraform-provider-aws/issues/38308)) +* resource/aws_globalaccelerator_cross_account_attachment: Add `cidr_block` argument to `resource` configuration block ([#38196](https://github.com/hashicorp/terraform-provider-aws/issues/38196)) +* resource/aws_iam_server_certificate: Add configurable `delete` timeout ([#38212](https://github.com/hashicorp/terraform-provider-aws/issues/38212)) +* resource/aws_launch_template: Add `network_interfaces.primary_ipv6` argument ([#37142](https://github.com/hashicorp/terraform-provider-aws/issues/37142)) +* resource/aws_mskconnect_connector: Add `tags` argument and `tags_all` attribute ([#38270](https://github.com/hashicorp/terraform-provider-aws/issues/38270)) +* resource/aws_mskconnect_custom_plugin: Add `tags` argument and `tags_all` attribute ([#38270](https://github.com/hashicorp/terraform-provider-aws/issues/38270)) +* resource/aws_mskconnect_worker_configuration: Add `tags` argument and `tags_all` attribute ([#38270](https://github.com/hashicorp/terraform-provider-aws/issues/38270)) +* resource/aws_mskconnect_worker_configuration: Add resource deletion logic ([#38270](https://github.com/hashicorp/terraform-provider-aws/issues/38270)) +* resource/aws_oam_link: Add `link_configuration` argument ([#38277](https://github.com/hashicorp/terraform-provider-aws/issues/38277)) +* resource/aws_rds_cluster: Add `ca_certificate_identifier` argument and `ca_certificate_valid_till` attribute ([#37108](https://github.com/hashicorp/terraform-provider-aws/issues/37108)) +* resource/aws_ssm_association: Add `tags` argument and `tags_all` attribute ([#38271](https://github.com/hashicorp/terraform-provider-aws/issues/38271)) + +BUG FIXES: + +* aws_dx_lag: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_dynamodb_kinesis_streaming_destination: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_ec2_capacity_block_reservation: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_opensearchserverless_access_policy: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_opensearchserverless_collection: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_opensearchserverless_security_config: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_opensearchserverless_security_policy: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_opensearchserverless_vpc_endpoint: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_ram_principal_association: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* aws_route_table: Checks for errors other than NotFound when reading. ([#38292](https://github.com/hashicorp/terraform-provider-aws/issues/38292)) +* data-source/aws_ecr_repository: Fix issue where the `tags` attribute is not set ([#38272](https://github.com/hashicorp/terraform-provider-aws/issues/38272)) +* data-source/aws_eks_cluster: Add `access_config.bootstrap_cluster_creator_admin_permissions` attribute ([#38295](https://github.com/hashicorp/terraform-provider-aws/issues/38295)) +* resource/aws_appstream_fleet: Support `0` as a valid value for `idle_disconnect_timeout_in_seconds` ([#38274](https://github.com/hashicorp/terraform-provider-aws/issues/38274)) +* resource/aws_cloudformation_stack_set_instance: Add `ForceNew` to deployment_targets attributes to ensure a new resource is recreated when the deployment_targets argument is changed, which was not the case previously. ([#37898](https://github.com/hashicorp/terraform-provider-aws/issues/37898)) +* resource/aws_db_instance: Correctly mark incomplete instances as [tainted](https://developer.hashicorp.com/terraform/cli/state/taint#the-tainted-status) during creation ([#38252](https://github.com/hashicorp/terraform-provider-aws/issues/38252)) +* resource/aws_eks_cluster: Set `access_config.bootstrap_cluster_creator_admin_permissions` to `true` on Read for clusters with no `access_config` configured. This allows in-place updates of existing clusters when `access_config` is configured ([#38295](https://github.com/hashicorp/terraform-provider-aws/issues/38295)) +* resource/aws_elasticache_serverless_cache: Allow `cache_usage_limits.data_storage.maximum`, `cache_usage_limits.data_storage.minimum`, `cache_usage_limits.ecpu_per_second.maximum` and `cache_usage_limits.ecpu_per_second.minimum` to be updated in-place ([#38269](https://github.com/hashicorp/terraform-provider-aws/issues/38269)) +* resource/aws_mskconnect_connector: Fix `interface conversion: interface {} is nil, not map[string]interface {}` panic when `log_delivery.worker_log_delivery` is empty (`{}`) ([#38270](https://github.com/hashicorp/terraform-provider-aws/issues/38270)) + +## 5.57.0 (July 4, 2024) + +FEATURES: + +* **New Data Source:** `aws_appstream_image` ([#38225](https://github.com/hashicorp/terraform-provider-aws/issues/38225)) +* **New Data Source:** `aws_cognito_user_pool` ([#37399](https://github.com/hashicorp/terraform-provider-aws/issues/37399)) +* **New Data Source:** `aws_ec2_transit_gateway_peering_attachments` ([#25743](https://github.com/hashicorp/terraform-provider-aws/issues/25743)) +* **New Data Source:** `aws_transfer_connector` ([#38213](https://github.com/hashicorp/terraform-provider-aws/issues/38213)) + +ENHANCEMENTS: + +* data-source/aws_backup_plan: Add `rule` attribute ([#37890](https://github.com/hashicorp/terraform-provider-aws/issues/37890)) +* resource/aws_amplify_domain_association: Add `certificate_settings` argument ([#37105](https://github.com/hashicorp/terraform-provider-aws/issues/37105)) +* resource/aws_ec2_transit_gateway_peering_attachment: Add `options` argument ([#36902](https://github.com/hashicorp/terraform-provider-aws/issues/36902)) +* resource/aws_iot_authorizer: Add `tags` argument ([#37152](https://github.com/hashicorp/terraform-provider-aws/issues/37152)) +* resource/aws_iot_topic_rule: Add `cloudwatch_logs.batch_mode` and `error_action.cloudwatch_logs.batch_mode` arguments ([#36772](https://github.com/hashicorp/terraform-provider-aws/issues/36772)) +* resource/aws_sagemaker_endpoint_configuration: Add support for `InputAndOutput` in `capture_mode` ([#37726](https://github.com/hashicorp/terraform-provider-aws/issues/37726)) + +BUG FIXES: + +* resource/aws_iot_provisioning_template: Fix `pre_provisioning_hook` update operation ([#37152](https://github.com/hashicorp/terraform-provider-aws/issues/37152)) +* resource/aws_iot_topic_rule: Retry IAM eventual consistency errors on Update ([#36286](https://github.com/hashicorp/terraform-provider-aws/issues/36286)) + +## 5.56.1 (June 28, 2024) + +BUG FIXES: + +* data-source/aws_cognito_user_pool_client: Fix `InvalidParameterException: 2 validation errors detected` errors on Read ([#38168](https://github.com/hashicorp/terraform-provider-aws/issues/38168)) +* resource/aws_cognito_user: Fix a bug that caused resource recreation for resources imported with certain [import ID](https://developer.hashicorp.com/terraform/language/import#import-id) formats ([#38182](https://github.com/hashicorp/terraform-provider-aws/issues/38182)) +* resource/aws_cognito_user_pool: Fix `runtime error: index out of range [0] with length 0` panic when adding `lambda_config` ([#38184](https://github.com/hashicorp/terraform-provider-aws/issues/38184)) + +## 5.56.0 (June 27, 2024) + +FEATURES: + +* **New Resource:** `aws_appfabric_app_authorization_connection` ([#38084](https://github.com/hashicorp/terraform-provider-aws/issues/38084)) +* **New Resource:** `aws_appfabric_ingestion` ([#37291](https://github.com/hashicorp/terraform-provider-aws/issues/37291)) +* **New Resource:** `aws_appfabric_ingestion_destination` ([#37627](https://github.com/hashicorp/terraform-provider-aws/issues/37627)) +* **New Resource:** `aws_networkfirewall_tls_inspection_configuration` ([#35168](https://github.com/hashicorp/terraform-provider-aws/issues/35168)) +* **New Resource:** `aws_networkmonitor_monitor` ([#35722](https://github.com/hashicorp/terraform-provider-aws/issues/35722)) +* **New Resource:** `aws_networkmonitor_probe` ([#35722](https://github.com/hashicorp/terraform-provider-aws/issues/35722)) ENHANCEMENTS: +* resource/aws_controltower_control: Add `parameters` argument and `arn` attribute ([#38071](https://github.com/hashicorp/terraform-provider-aws/issues/38071)) +* resource/aws_networkfirewall_logging_configuration: Add plan-time validation of `firewall_arn` ([#35168](https://github.com/hashicorp/terraform-provider-aws/issues/35168)) +* resource/aws_quicksight_account_subscription: Add `iam_identity_center_instance_arn` attribute ([#36830](https://github.com/hashicorp/terraform-provider-aws/issues/36830)) +* resource/aws_route53_resolver_firewall_rule: Add `firewall_domain_redirection_action` argument ([#37242](https://github.com/hashicorp/terraform-provider-aws/issues/37242)) +* resource/aws_route53_resolver_firewall_rule: Add `q_type` argument ([#38074](https://github.com/hashicorp/terraform-provider-aws/issues/38074)) +* resource/aws_sagemaker_domain: Add `default_user_settings.canvas_app_settings.generative_ai_settings` configuration block ([#37139](https://github.com/hashicorp/terraform-provider-aws/issues/37139)) +* resource/aws_sagemaker_domain: Add `default_user_settings.code_editor_app_settings.custom_image` configuration block ([#37153](https://github.com/hashicorp/terraform-provider-aws/issues/37153)) +* resource/aws_sagemaker_endpoint_configuration: Add `production_variants.inference_ami_version` and `shadow_production_variants.inference_ami_version` arguments ([#38085](https://github.com/hashicorp/terraform-provider-aws/issues/38085)) +* resource/aws_sagemaker_user_profile: Add `user_settings.canvas_app_settings.generative_ai_settings` configuration block ([#37139](https://github.com/hashicorp/terraform-provider-aws/issues/37139)) +* resource/aws_sagemaker_user_profile: Add `user_settings.code_editor_app_settings.custom_image` configuration block ([#37153](https://github.com/hashicorp/terraform-provider-aws/issues/37153)) +* resource/aws_sagemaker_workforce: add `oidc_config.authentication_request_extra_params` and `oidc_config.scope` arguments ([#38078](https://github.com/hashicorp/terraform-provider-aws/issues/38078)) +* resource/aws_sagemaker_workteam: Add `worker_access_configuration` attribute ([#38087](https://github.com/hashicorp/terraform-provider-aws/issues/38087)) +* resource/aws_wafv2_web_acl: Add `sensitivity_level` argument to `sqli_match_statement` configuration block ([#38077](https://github.com/hashicorp/terraform-provider-aws/issues/38077)) + +BUG FIXES: + +* data-source/aws_ecs_service: Correctly set `tags` ([#38067](https://github.com/hashicorp/terraform-provider-aws/issues/38067)) +* resource/aws_drs_replication_configuration_template: Fix issues preventing creation and deletion ([#38143](https://github.com/hashicorp/terraform-provider-aws/issues/38143)) + +## 5.55.0 (June 20, 2024) + +FEATURES: + +* **New Resource:** `aws_drs_replication_configuration_template` ([#26399](https://github.com/hashicorp/terraform-provider-aws/issues/26399)) + +ENHANCEMENTS: + +* data-source/aws_autoscaling_group: Add `mixed_instances_policy.launch_template.override.instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` attribute ([#38003](https://github.com/hashicorp/terraform-provider-aws/issues/38003)) * data-source/aws_glue_catalog_table: Add `additional_locations` argument in `storage_descriptor` ([#37891](https://github.com/hashicorp/terraform-provider-aws/issues/37891)) +* data-source/aws_launch_template: Add `instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` attribute ([#38003](https://github.com/hashicorp/terraform-provider-aws/issues/38003)) +* data-source/aws_networkmanager_core_network_policy_document: Add `attachment_policies.action.add_to_network_function_group` argument ([#38013](https://github.com/hashicorp/terraform-provider-aws/issues/38013)) +* data-source/aws_networkmanager_core_network_policy_document: Add `network_function_groups` configuration block ([#38013](https://github.com/hashicorp/terraform-provider-aws/issues/38013)) +* data-source/aws_networkmanager_core_network_policy_document: Add `send-via` and `send-to` as valid values for `segment_actions.action` ([#38013](https://github.com/hashicorp/terraform-provider-aws/issues/38013)) +* data-source/aws_networkmanager_core_network_policy_document: Add `single-hop` and `dual-hop` as valid values for `segment_actions.mode` ([#38013](https://github.com/hashicorp/terraform-provider-aws/issues/38013)) +* data-source/aws_networkmanager_core_network_policy_document: Add `when_sent_to` and `via` configuration blocks to `segment_actions` ([#38013](https://github.com/hashicorp/terraform-provider-aws/issues/38013)) +* resource/aws_api_gateway_integration: Increase maximum value of `timeout_milliseconds` from `29000` (29 seconds) to `300000` (5 minutes) ([#38010](https://github.com/hashicorp/terraform-provider-aws/issues/38010)) +* resource/aws_appsync_api_key: Add `api_key_id` attribute ([#36568](https://github.com/hashicorp/terraform-provider-aws/issues/36568)) +* resource/aws_autoscaling_group: Add `mixed_instances_policy.launch_template.override.instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` argument ([#38003](https://github.com/hashicorp/terraform-provider-aws/issues/38003)) +* resource/aws_autoscaling_group: Add plan-time validation of `warm_pool.max_group_prepared_capacity` and `warm_pool.min_size` ([#37174](https://github.com/hashicorp/terraform-provider-aws/issues/37174)) * resource/aws_docdb_cluster: Add `restore_to_point_in_time` argument ([#37716](https://github.com/hashicorp/terraform-provider-aws/issues/37716)) +* resource/aws_dynamodb_table: Adds validation for `ttl` values. ([#37991](https://github.com/hashicorp/terraform-provider-aws/issues/37991)) +* resource/aws_ec2_fleet: Add `launch_template_config.override.instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` argument ([#38003](https://github.com/hashicorp/terraform-provider-aws/issues/38003)) * resource/aws_glue_catalog_table: Add `additional_locations` argument in `storage_descriptor` ([#37891](https://github.com/hashicorp/terraform-provider-aws/issues/37891)) +* resource/aws_glue_job: Add `maintenance_window` argument ([#37760](https://github.com/hashicorp/terraform-provider-aws/issues/37760)) +* resource/aws_launch_template: Add `instance_requirements.max_spot_price_as_percentage_of_optimal_on_demand_price` argument ([#38003](https://github.com/hashicorp/terraform-provider-aws/issues/38003)) + +BUG FIXES: + +* data-source/aws_networkmanager_core_network_policy_document: Add correct `except` values to the returned JSON document when `segment_actions.share_with_except` is configured ([#38013](https://github.com/hashicorp/terraform-provider-aws/issues/38013)) +* provider: Now falls back to non-FIPS endpoint if `use_fips_endpoint` is set and no FIPS endpoint is available ([#38057](https://github.com/hashicorp/terraform-provider-aws/issues/38057)) +* resource/aws_autoscaling_group: Fix bug updating `warm_pool.max_group_prepared_capacity` to `0` ([#37174](https://github.com/hashicorp/terraform-provider-aws/issues/37174)) +* resource/aws_dynamodb_table: Fixes perpetual diff when `ttl.attribute_name` is set when `ttl.enabled` is not set. ([#37991](https://github.com/hashicorp/terraform-provider-aws/issues/37991)) +* resource/aws_ec2_network_insights_path: Mark `destination` as Optional ([#36966](https://github.com/hashicorp/terraform-provider-aws/issues/36966)) +* resource/aws_lambda_event_source_mapping: Remove the upper limit on `scaling_config.maximum_concurrency` ([#37980](https://github.com/hashicorp/terraform-provider-aws/issues/37980)) +* service/transitgateway: Fix resource Read pagination regression causing `NotFound` errors ([#38011](https://github.com/hashicorp/terraform-provider-aws/issues/38011)) ## 5.54.1 (June 14, 2024) BUG FIXES: -* data-source/aws_ami: Fix `interface conversion: interface {} is types.ProductCodeValues, not string` panic ([######](https://github.com/hashicorp/terraform-provider-aws/issues/#####)) +* data-source/aws_ami: Fix `interface conversion: interface {} is types.ProductCodeValues, not string` panic ([#37977](https://github.com/hashicorp/terraform-provider-aws/issues/37977)) * resource/aws_codebuild_project: Increase maximum values of `build_batch_config.timeout_in_mins` and `build_timeout` from `480` (8 hours) to `2160` (36 hours) ([#37970](https://github.com/hashicorp/terraform-provider-aws/issues/37970)) ## 5.54.0 (June 14, 2024) diff --git a/CODEOWNERS b/CODEOWNERS index 52983e83292..492c6f41bf1 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,3 @@ -* @hashicorp/terraform-aws -/.github/ @breathingdust @justinretzolk \ No newline at end of file +* @hashicorp/terraform-aws +/.github/ @breathingdust @justinretzolk +/.github/*.yml @hashicorp/terraform-aws diff --git a/GNUmakefile b/GNUmakefile index b3b79301d84..ef922ec18b1 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -458,10 +458,11 @@ sanity: prereq-go ## Run sanity check (failures allowed) semgrep: semgrep-code-quality semgrep-naming semgrep-naming-cae semgrep-service-naming ## [CI] Run all CI Semgrep checks -semgrep-all: semgrep-validate ## Run semgrep on all files +semgrep-all: semgrep-test semgrep-validate ## Run semgrep on all files @echo "make: Running Semgrep checks locally (must have semgrep installed)..." @semgrep $(SEMGREP_ARGS) \ $(if $(filter-out $(origin PKG), undefined),--include $(PKG_NAME),) \ + --exclude .ci/semgrep/**/*.go \ --config .ci/.semgrep.yml \ --config .ci/.semgrep-constants.yml \ --config .ci/.semgrep-test-constants.yml \ @@ -479,11 +480,12 @@ semgrep-all: semgrep-validate ## Run semgrep on all files --config 'r/dgryski.semgrep-go.oddifsequence' \ --config 'r/dgryski.semgrep-go.oserrors' -semgrep-code-quality: semgrep-validate ## [CI] Semgrep Checks / Code Quality Scan +semgrep-code-quality: semgrep-test semgrep-validate ## [CI] Semgrep Checks / Code Quality Scan @echo "make: Semgrep Checks / Code Quality Scan..." @echo "make: Running Semgrep checks locally (must have semgrep installed)" - semgrep $(SEMGREP_ARGS) \ + @semgrep $(SEMGREP_ARGS) \ $(if $(filter-out $(origin PKG), undefined),--include $(PKG_NAME),) \ + --exclude .ci/semgrep/**/*.go \ --config .ci/.semgrep.yml \ --config .ci/.semgrep-constants.yml \ --config .ci/.semgrep-test-constants.yml \ @@ -512,6 +514,7 @@ semgrep-fix: semgrep-validate ## Fix Semgrep issues that have fixes @echo "make: WARNING: This will not fix rules that don't have autofixes" @semgrep $(SEMGREP_ARGS) --autofix \ $(if $(filter-out $(origin PKG), undefined),--include $(PKG_NAME),) \ + --exclude .ci/semgrep/**/*.go \ --config .ci/.semgrep.yml \ --config .ci/.semgrep-constants.yml \ --config .ci/.semgrep-test-constants.yml \ @@ -543,6 +546,11 @@ semgrep-naming-cae: semgrep-validate ## [CI] Semgrep Checks / Naming Scan Caps/A $(if $(filter-out $(origin PKG), undefined),--include $(PKG_NAME),) \ --config .ci/.semgrep-caps-aws-ec2.yml +semgrep-test: semgrep-validate ## Test Semgrep configuration files + @echo "make: Running Semgrep rule tests..." + @semgrep --quiet \ + --test .ci/semgrep/ + semgrep-service-naming: semgrep-validate ## [CI] Semgrep Checks / Service Name Scan A-Z @echo "make: Semgrep Checks / Service Name Scan A-Z..." @echo "make: Running Semgrep checks locally (must have semgrep installed)" @@ -604,7 +612,7 @@ sweeper-unlinked: go-build ## [CI] Provider Checks / Sweeper Functions Not Linke grep --count --extended-regexp 'internal/service/[a-zA-Z0-9]+\.sweep[a-zA-Z0-9]+$$'` ; \ echo "make: sweeper-unlinked: found $$count, expected 0" ; \ [ $$count -eq 0 ] || \ - (echo "Expected `strings` to detect no sweeper function names in provider binary."; exit 1) + (echo "Expected `strings` to detect no sweeper function names in provider binary."; exit 1) t: prereq-go fmt-check ## Run acceptance tests (similar to testacc) TF_ACC=1 $(GO_VER) test ./$(PKG_NAME)/... -v -count $(TEST_COUNT) -parallel $(ACCTEST_PARALLELISM) $(RUNARGS) $(TESTARGS) -timeout $(ACCTEST_TIMEOUT) @@ -637,8 +645,8 @@ testacc: prereq-go fmt-check ## Run acceptance tests testacc-lint: ## [CI] Acceptance Test Linting / terrafmt @echo "make: Acceptance Test Linting / terrafmt..." @find $(SVC_DIR) -type f -name '*_test.go' \ - | sort -u \ - | xargs -I {} terrafmt diff --check --fmtcompat {} + | sort -u \ + | xargs -I {} terrafmt diff --check --fmtcompat {} testacc-lint-fix: ## Fix acceptance test linter findings @echo "make: Fixing Acceptance Test Linting / terrafmt..." diff --git a/docs/add-a-new-service.md b/docs/add-a-new-service.md index 934cf57072a..c91ba78e5f2 100644 --- a/docs/add-a-new-service.md +++ b/docs/add-a-new-service.md @@ -19,20 +19,20 @@ Before new resources are submitted, please raise a separate pull request contain To add an AWS SDK for Go service client: -1. Check the file `names/data/names_data.csv` for the service. +1. Check the file `names/data/names_data.hcl` for the service. -1. If the service is there and there is no value in the `NotImplmented` column, you are ready to implement the first [resource](./add-a-new-resource.md) or [data source](./add-a-new-datasource.md). +1. If the service is there and the `not_implemented` attribute does not exist, you are ready to implement the first [resource](./add-a-new-resource.md) or [data source](./add-a-new-datasource.md). -1. If the service is there and there is a value in the `NotImplemented` column, remove it and submit the client pull request as described below. +1. If the service is there and the `not_implemented` attribute is true, remove it and submit the client pull request as described below. 1. Otherwise, determine the service identifier using the rule described in [the Naming Guide](naming.md#service-identifier). -1. In `names/data/names_data.csv`, add a new line with all the requested information for the service following the guidance in the [`names` README](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/README.md). +1. In `names/data/names_data.hcl`, add a new hcl block with all the requested information for the service following the guidance in the [`names` README](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/README.md). !!! tip - Be very careful when adding or changing data in `names_data.csv`! + Be very careful when adding or changing data in `names_data.hcl`! The Provider and generators depend on the file being correct. - We strongly recommend using an editor with CSV support. + We strongly recommend using an editor with HCL support. Once the names data is ready, create a new service directory with the appropriate service name. @@ -70,7 +70,7 @@ Once the service client has been added, implement the first [resource](./add-a-n If an AWS service must be created in a non-standard way, for example, the service API's endpoint must be accessed via a single AWS Region, then: -1. Add an `x` in the **SkipClientGenerate** column for the service in [`names/data/names_data.csv`](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/README.md) +1. Make the `skip_client_generate` attribute `true` for the service in [`names/data/names_data.hcl`](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/README.md) 1. Run `make gen` @@ -93,14 +93,22 @@ If an AWS service must be created in a non-standard way, for example, the servic func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*costoptimizationhub.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return costoptimizationhub.NewFromConfig(cfg, func(o *costoptimizationhub.Options) { - if endpoint := config["endpoint"].(string); endpoint != "" { - o.BaseEndpoint = aws.String(endpoint) - } else if config["partition"].(string) == names.StandardPartitionID { - // Cost Optimization Hub endpoint is available only in us-east-1 Region. - o.Region = names.USEast1RegionID - } - }), nil + return costoptimizationhub.NewFromConfig(cfg, + costoptimizationhub.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *costoptimizationhub.Options) { + if config["partition"].(string) == names.StandardPartitionID { + // Cost Optimization Hub endpoint is available only in us-east-1 Region. + if cfg.Region != names.USEast1RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USEast1RegionID, + }) + o.Region = names.USEast1RegionID + } + } + }, + ), nil } ``` @@ -121,11 +129,27 @@ If an AWS service must be created in a non-standard way, for example, the servic // NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. func (p *servicePackage) NewConn(ctx context.Context) (*globalaccelerator_sdkv1.GlobalAccelerator, error) { sess := p.config["session"].(*session_sdkv1.Session) - config := &aws_sdkv1.Config{Endpoint: aws_sdkv1.String(p.config["endpoint"].(string))} + + cfg := aws.Config{} + + if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + cfg.Endpoint = aws.String(endpoint) + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) + } // Force "global" services to correct Regions. - if p.config["partition"].(string) == endpoints_sdkv1.AwsPartitionID { - config.Region = aws_sdkv1.String(endpoints_sdkv1.UsWest2RegionID) + if config["partition"].(string) == endpoints.AwsPartitionID { + if aws.StringValue(cfg.Region) != endpoints.UsWest2RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": aws.StringValue(cfg.Region), + "override_region": endpoints.UsWest2RegionID, + }) + cfg.Region = aws.String(endpoints.UsWest2RegionID) + } } return globalaccelerator_sdkv1.New(sess.Copy(config)), nil diff --git a/docs/aws-go-sdk-migrations.md b/docs/aws-go-sdk-migrations.md index 803afaafba3..323e7ae4831 100644 --- a/docs/aws-go-sdk-migrations.md +++ b/docs/aws-go-sdk-migrations.md @@ -13,7 +13,7 @@ For community members interested in contributing to this effort, this guide docu ### Re-generate Service Client -When fully replacing the client, [`names/data/names_data.csv`](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/data/names_data.csv) should be updated to remove the v1 indicator and add v2 (ie. delete the `1` in the `ClientSDKV1` column and add a `2` in the `ClientSDKV2` column). +When fully replacing the client, [`names/data/names_data.hcl`](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/data/names_data.hcl) should be updated to remove the v1 indicator and add v2 (ie. delete the `1` in the `ClientSDKV1` column and add a `2` in the `ClientSDKV2` column). Once complete, re-generate the client. ```console diff --git a/docs/retries-and-waiters.md b/docs/retries-and-waiters.md index ad2e6451ca5..37d9bdb8203 100644 --- a/docs/retries-and-waiters.md +++ b/docs/retries-and-waiters.md @@ -85,16 +85,20 @@ When custom service client configurations are applied, these will be defined in func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*s3_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return s3_sdkv2.NewFromConfig(cfg, func(o *s3_sdkv2.Options) { - // ..other configuration.. - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws_sdkv2.RetryerV2), retry_sdkv2.IsErrorRetryableFunc(func(err error) aws_sdkv2.Ternary { - if tfawserr_sdkv2.ErrMessageContains(err, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { - return aws_sdkv2.TrueTernary - } - return aws_sdkv2.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return s3_sdkv2.NewFromConfig(cfg, + s3.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *s3_sdkv2.Options) { + // ..other configuration.. + + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws_sdkv2.RetryerV2), retry_sdkv2.IsErrorRetryableFunc(func(err error) aws_sdkv2.Ternary { + if tfawserr_sdkv2.ErrMessageContains(err, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { + return aws_sdkv2.TrueTernary + } + return aws_sdkv2.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } ``` diff --git a/examples/drs-initialize/README.md b/examples/drs-initialize/README.md new file mode 100644 index 00000000000..88be993caed --- /dev/null +++ b/examples/drs-initialize/README.md @@ -0,0 +1,9 @@ +# DRS Initialize Example + +This is an example of using the Terraform AWS Provider to manually initialize your account to use DRS. For more information see the [AWS instructions](https://docs.aws.amazon.com/drs/latest/userguide/getting-started-initializing.html). + +Running the example: + +1. Run `terraform apply` +2. Assume the `AWSElasticDisasterRecoveryInitializerRole` role +3. Use the AWS CLI to initialize: `aws drs initialize-service` diff --git a/examples/drs-initialize/main.tf b/examples/drs-initialize/main.tf new file mode 100644 index 00000000000..8b56ff3d4db --- /dev/null +++ b/examples/drs-initialize/main.tf @@ -0,0 +1,291 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +terraform { + required_version = ">= 0.12" +} + +data "aws_caller_identity" "current" {} + +resource "aws_iam_role" "AWSElasticDisasterRecoveryAgentRole" { + name = "AWSElasticDisasterRecoveryAgentRole" + path = "/service-role/" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Service = "drs.amazonaws.com" + } + Action = [ + "sts:AssumeRole", + "sts:SetSourceIdentity" + ] + Condition = { + StringLike = { + "sts:SourceIdentity" = "s-*", + "aws:SourceAccount" = data.aws_caller_identity.current.account_id + } + } + } + ] + }) +} + +resource "aws_iam_role" "AWSElasticDisasterRecoveryFailbackRole" { + name = "AWSElasticDisasterRecoveryFailbackRole" + path = "/service-role/" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Service = "drs.amazonaws.com" + } + Action = [ + "sts:AssumeRole", + "sts:SetSourceIdentity" + ] + Condition = { + StringLike = { + "sts:SourceIdentity" = "i-*", + "aws:SourceAccount" = data.aws_caller_identity.current.account_id + } + } + } + ] + }) +} + +resource "aws_iam_role" "AWSElasticDisasterRecoveryConversionServerRole" { + name = "AWSElasticDisasterRecoveryConversionServerRole" + path = "/service-role/" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + Action = "sts:AssumeRole" + } + ] + }) +} + +resource "aws_iam_role" "AWSElasticDisasterRecoveryRecoveryInstanceRole" { + name = "AWSElasticDisasterRecoveryRecoveryInstanceRole" + path = "/service-role/" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + Action = "sts:AssumeRole" + } + ] + }) +} + +resource "aws_iam_role" "AWSElasticDisasterRecoveryReplicationServerRole" { + name = "AWSElasticDisasterRecoveryReplicationServerRole" + path = "/service-role/" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + Action = "sts:AssumeRole" + } + ] + }) +} + +resource "aws_iam_role" "AWSElasticDisasterRecoveryRecoveryInstanceWithLaunchActionsRole" { + name = "AWSElasticDisasterRecoveryRecoveryInstanceWithLaunchActionsRole" + path = "/service-role/" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + Service = "ec2.amazonaws.com" + } + Action = "sts:AssumeRole" + } + ] + }) +} + +data "aws_iam_policy" "AWSElasticDisasterRecoveryAgentPolicy" { + name = "AWSElasticDisasterRecoveryAgentPolicy" +} + +data "aws_iam_policy" "AWSElasticDisasterRecoveryFailbackPolicy" { + name = "AWSElasticDisasterRecoveryFailbackPolicy" +} + +data "aws_iam_policy" "AWSElasticDisasterRecoveryConversionServerPolicy" { + name = "AWSElasticDisasterRecoveryConversionServerPolicy" +} + +data "aws_iam_policy" "AWSElasticDisasterRecoveryRecoveryInstancePolicy" { + name = "AWSElasticDisasterRecoveryRecoveryInstancePolicy" +} + +data "aws_iam_policy" "AWSElasticDisasterRecoveryReplicationServerPolicy" { + name = "AWSElasticDisasterRecoveryReplicationServerPolicy" +} + +data "aws_iam_policy" "AmazonSSMManagedInstanceCore" { + name = "AmazonSSMManagedInstanceCore" +} + +resource "aws_iam_role_policy_attachment" "AWSElasticDisasterRecoveryAgentRole" { + role = aws_iam_role.AWSElasticDisasterRecoveryAgentRole.name + policy_arn = data.aws_iam_policy.AWSElasticDisasterRecoveryAgentPolicy.arn +} + +resource "aws_iam_role_policy_attachment" "AWSElasticDisasterRecoveryFailbackRole" { + role = aws_iam_role.AWSElasticDisasterRecoveryFailbackRole.name + policy_arn = data.aws_iam_policy.AWSElasticDisasterRecoveryFailbackPolicy.arn +} + +resource "aws_iam_role_policy_attachment" "AWSElasticDisasterRecoveryConversionServerRole" { + role = aws_iam_role.AWSElasticDisasterRecoveryConversionServerRole.name + policy_arn = data.aws_iam_policy.AWSElasticDisasterRecoveryConversionServerPolicy.arn +} + +resource "aws_iam_role_policy_attachment" "AWSElasticDisasterRecoveryRecoveryInstanceRole" { + role = aws_iam_role.AWSElasticDisasterRecoveryRecoveryInstanceRole.name + policy_arn = data.aws_iam_policy.AWSElasticDisasterRecoveryRecoveryInstancePolicy.arn +} + +resource "aws_iam_role_policy_attachment" "AWSElasticDisasterRecoveryReplicationServerRole" { + role = aws_iam_role.AWSElasticDisasterRecoveryReplicationServerRole.name + policy_arn = data.aws_iam_policy.AWSElasticDisasterRecoveryReplicationServerPolicy.arn +} + +resource "aws_iam_role_policy_attachment" "AWSElasticDisasterRecoveryRecoveryInstanceWithLaunchActionsRole1" { + role = aws_iam_role.AWSElasticDisasterRecoveryRecoveryInstanceWithLaunchActionsRole.name + policy_arn = data.aws_iam_policy.AWSElasticDisasterRecoveryRecoveryInstancePolicy.arn +} + +resource "aws_iam_role_policy_attachment" "AWSElasticDisasterRecoveryRecoveryInstanceWithLaunchActionsRole2" { + role = aws_iam_role.AWSElasticDisasterRecoveryRecoveryInstanceWithLaunchActionsRole.name + policy_arn = data.aws_iam_policy.AmazonSSMManagedInstanceCore.arn +} + + +resource "aws_iam_role" "AWSElasticDisasterRecoveryInitializerRole" { + name = "AWSElasticDisasterRecoveryInitializerRole" + path = "/" + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Principal = { + AWS = data.aws_caller_identity.current.user_id + } + Action = "sts:AssumeRole" + } + ] + }) +} + +resource "aws_iam_policy" "InitializePolicy" { + name = "InitializePolicy" + description = "Policy for initializing the AWS Elastic Disaster Recovery service" + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = "iam:AttachRolePolicy" + Resource = "*" + Condition = { + "ForAnyValue:ArnEquals" = { + "iam:PolicyARN" = [ + data.aws_iam_policy.AWSElasticDisasterRecoveryAgentPolicy.arn, + data.aws_iam_policy.AWSElasticDisasterRecoveryFailbackPolicy.arn, + data.aws_iam_policy.AWSElasticDisasterRecoveryConversionServerPolicy.arn, + data.aws_iam_policy.AWSElasticDisasterRecoveryRecoveryInstancePolicy.arn, + data.aws_iam_policy.AWSElasticDisasterRecoveryReplicationServerPolicy.arn + ] + } + } + }, + { + Effect = "Allow" + Action = "iam:PassRole" + Resource = "arn:aws:iam::*:role/*" + Condition = { + "ForAnyValue:StringLike" = { + "iam:PassedToService" = [ + "ec2.amazonaws.com", + "drs.amazonaws.com" + ] + } + } + }, + { + Effect = "Allow" + Action = [ + "drs:InitializeService", + "drs:ListTagsForResource", + "drs:GetReplicationConfiguration", + "drs:CreateLaunchConfigurationTemplate", + "drs:GetLaunchConfiguration", + "drs:CreateReplicationConfigurationTemplate", + "drs:*ReplicationConfigurationTemplate*", + "iam:TagRole", + "iam:CreateRole", + "iam:GetServiceLinkedRoleDeletionStatus", + "iam:ListAttachedRolePolicies", + "iam:ListRolePolicies", + "iam:GetRole", + "iam:DeleteRole", + "iam:DeleteServiceLinkedRole", + "ec2:*", + "sts:DecodeAuthorizationMessage", + ] + Resource = "*" + }, + { + Effect = "Allow" + Action = "iam:CreateServiceLinkedRole" + Resource = "arn:aws:iam::*:role/aws-service-role/drs.amazonaws.com/AWSServiceRoleForElasticDisasterRecovery" + }, + { + Effect = "Allow" + Action = [ + "iam:CreateInstanceProfile", + "iam:ListInstanceProfilesForRole", + "iam:GetInstanceProfile", + "iam:ListInstanceProfiles", + "iam:AddRoleToInstanceProfile" + ] + Resource = [ + "arn:aws:iam::*:instance-profile/*", + "arn:aws:iam::*:role/*" + ] + } + ] + }) +} + +resource "aws_iam_role_policy_attachment" "Initializer" { + role = aws_iam_role.AWSElasticDisasterRecoveryInitializerRole.name + policy_arn = aws_iam_policy.InitializePolicy.arn +} diff --git a/go.mod b/go.mod index eccb08827fe..a06ee9a2225 100644 --- a/go.mod +++ b/go.mod @@ -1,206 +1,228 @@ module github.com/hashicorp/terraform-provider-aws -go 1.22.2 +go 1.22.5 require ( - github.com/ProtonMail/go-crypto v1.1.0-alpha.2 + github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton github.com/YakDriver/go-version v0.1.0 github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.54.1 - github.com/aws/aws-sdk-go-v2 v1.27.2 - github.com/aws/aws-sdk-go-v2/config v1.27.18 - github.com/aws/aws-sdk-go-v2/credentials v1.17.18 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24 - github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.30.0 - github.com/aws/aws-sdk-go-v2/service/account v1.17.1 - github.com/aws/aws-sdk-go-v2/service/acm v1.26.2 - github.com/aws/aws-sdk-go-v2/service/acmpca v1.30.3 - github.com/aws/aws-sdk-go-v2/service/amp v1.25.10 - github.com/aws/aws-sdk-go-v2/service/amplify v1.21.11 - github.com/aws/aws-sdk-go-v2/service/apigateway v1.23.12 - github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.20.10 - github.com/aws/aws-sdk-go-v2/service/appconfig v1.29.8 - github.com/aws/aws-sdk-go-v2/service/appfabric v1.7.10 - github.com/aws/aws-sdk-go-v2/service/appflow v1.41.10 - github.com/aws/aws-sdk-go-v2/service/appintegrations v1.25.10 - github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.27.10 - github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.24.10 - github.com/aws/aws-sdk-go-v2/service/apprunner v1.28.10 - github.com/aws/aws-sdk-go-v2/service/appstream v1.34.10 - github.com/aws/aws-sdk-go-v2/service/athena v1.41.2 - github.com/aws/aws-sdk-go-v2/service/auditmanager v1.33.0 - github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.11 - github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.11 - github.com/aws/aws-sdk-go-v2/service/batch v1.38.1 - github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.3.10 - github.com/aws/aws-sdk-go-v2/service/bedrock v1.8.8 - github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.12.2 - github.com/aws/aws-sdk-go-v2/service/budgets v1.23.6 - github.com/aws/aws-sdk-go-v2/service/chatbot v1.2.3 - github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.15.11 - github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.15.6 - github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.12.6 - github.com/aws/aws-sdk-go-v2/service/cloud9 v1.24.11 - github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.18.10 - github.com/aws/aws-sdk-go-v2/service/cloudformation v1.51.3 - github.com/aws/aws-sdk-go-v2/service/cloudfront v1.36.6 - github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.4.10 - github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.22.0 - github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.22.10 - github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.40.2 - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.6 - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.35.7 - github.com/aws/aws-sdk-go-v2/service/codeartifact v1.27.6 - github.com/aws/aws-sdk-go-v2/service/codebuild v1.37.3 - github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.13.7 - github.com/aws/aws-sdk-go-v2/service/codecommit v1.22.10 - github.com/aws/aws-sdk-go-v2/service/codedeploy v1.25.10 - github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.20.10 - github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.25.10 - github.com/aws/aws-sdk-go-v2/service/codepipeline v1.28.0 - github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.25.8 - github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.22.10 - github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.23.13 - github.com/aws/aws-sdk-go-v2/service/comprehend v1.31.10 - github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.34.7 - github.com/aws/aws-sdk-go-v2/service/configservice v1.46.11 - github.com/aws/aws-sdk-go-v2/service/connectcases v1.17.6 - github.com/aws/aws-sdk-go-v2/service/controltower v1.14.3 - github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.23.10 - github.com/aws/aws-sdk-go-v2/service/costexplorer v1.38.6 - github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.10 - github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.10 - github.com/aws/aws-sdk-go-v2/service/datasync v1.38.4 - github.com/aws/aws-sdk-go-v2/service/datazone v1.8.6 - github.com/aws/aws-sdk-go-v2/service/dax v1.19.10 - github.com/aws/aws-sdk-go-v2/service/devicefarm v1.22.10 - github.com/aws/aws-sdk-go-v2/service/devopsguru v1.30.10 - github.com/aws/aws-sdk-go-v2/service/directoryservice v1.24.10 - github.com/aws/aws-sdk-go-v2/service/dlm v1.24.10 - github.com/aws/aws-sdk-go-v2/service/docdb v1.34.7 - github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.9.9 - github.com/aws/aws-sdk-go-v2/service/drs v1.26.6 - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.8 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.0 - github.com/aws/aws-sdk-go-v2/service/ecr v1.28.5 - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.10 - github.com/aws/aws-sdk-go-v2/service/ecs v1.42.0 - github.com/aws/aws-sdk-go-v2/service/eks v1.43.1 - github.com/aws/aws-sdk-go-v2/service/elasticache v1.38.8 - github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10 - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3 - github.com/aws/aws-sdk-go-v2/service/emr v1.39.11 - github.com/aws/aws-sdk-go-v2/service/emrserverless v1.21.2 - github.com/aws/aws-sdk-go-v2/service/eventbridge v1.31.5 - github.com/aws/aws-sdk-go-v2/service/evidently v1.19.10 - github.com/aws/aws-sdk-go-v2/service/finspace v1.24.7 - github.com/aws/aws-sdk-go-v2/service/firehose v1.29.1 - github.com/aws/aws-sdk-go-v2/service/fis v1.24.8 - github.com/aws/aws-sdk-go-v2/service/fms v1.33.7 - github.com/aws/aws-sdk-go-v2/service/glacier v1.22.10 - github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.24.1 - github.com/aws/aws-sdk-go-v2/service/groundstation v1.27.6 - github.com/aws/aws-sdk-go-v2/service/guardduty v1.43.0 - github.com/aws/aws-sdk-go-v2/service/healthlake v1.24.6 - github.com/aws/aws-sdk-go-v2/service/iam v1.32.6 - github.com/aws/aws-sdk-go-v2/service/identitystore v1.23.12 - github.com/aws/aws-sdk-go-v2/service/inspector2 v1.26.6 - github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.14.6 - github.com/aws/aws-sdk-go-v2/service/ivschat v1.12.11 - github.com/aws/aws-sdk-go-v2/service/kafka v1.33.2 - github.com/aws/aws-sdk-go-v2/service/kendra v1.50.7 - github.com/aws/aws-sdk-go-v2/service/keyspaces v1.10.10 - github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.10 - github.com/aws/aws-sdk-go-v2/service/kms v1.33.0 - github.com/aws/aws-sdk-go-v2/service/lakeformation v1.33.3 - github.com/aws/aws-sdk-go-v2/service/lambda v1.54.6 - github.com/aws/aws-sdk-go-v2/service/launchwizard v1.4.2 - github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.43.10 - github.com/aws/aws-sdk-go-v2/service/lightsail v1.38.3 - github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.27.10 - github.com/aws/aws-sdk-go-v2/service/m2 v1.13.6 - github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.28.10 - github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.53.7 - github.com/aws/aws-sdk-go-v2/service/medialive v1.52.6 - github.com/aws/aws-sdk-go-v2/service/mediapackage v1.30.11 - github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.12.0 - github.com/aws/aws-sdk-go-v2/service/mediastore v1.20.10 - github.com/aws/aws-sdk-go-v2/service/mq v1.22.11 - github.com/aws/aws-sdk-go-v2/service/mwaa v1.27.4 - github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.8.7 - github.com/aws/aws-sdk-go-v2/service/oam v1.11.6 - github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.11.13 - github.com/aws/aws-sdk-go-v2/service/organizations v1.27.9 - github.com/aws/aws-sdk-go-v2/service/osis v1.10.0 - github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.10.6 - github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.5.10 - github.com/aws/aws-sdk-go-v2/service/pipes v1.12.1 - github.com/aws/aws-sdk-go-v2/service/polly v1.40.5 - github.com/aws/aws-sdk-go-v2/service/pricing v1.28.7 - github.com/aws/aws-sdk-go-v2/service/qbusiness v1.6.6 - github.com/aws/aws-sdk-go-v2/service/qldb v1.21.10 - github.com/aws/aws-sdk-go-v2/service/ram v1.25.10 - github.com/aws/aws-sdk-go-v2/service/rbin v1.16.10 - github.com/aws/aws-sdk-go-v2/service/rds v1.79.6 - github.com/aws/aws-sdk-go-v2/service/redshift v1.44.7 - github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.25.10 - github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.18.8 - github.com/aws/aws-sdk-go-v2/service/rekognition v1.40.6 - github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.10.11 - github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.22.6 - github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.21.10 - github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.11.6 - github.com/aws/aws-sdk-go-v2/service/route53 v1.40.10 - github.com/aws/aws-sdk-go-v2/service/route53domains v1.23.10 - github.com/aws/aws-sdk-go-v2/service/route53profiles v1.0.7 - github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1 - github.com/aws/aws-sdk-go-v2/service/s3control v1.44.13 - github.com/aws/aws-sdk-go-v2/service/scheduler v1.8.10 - github.com/aws/aws-sdk-go-v2/service/schemas v1.24.10 - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.30.0 - github.com/aws/aws-sdk-go-v2/service/securityhub v1.49.2 - github.com/aws/aws-sdk-go-v2/service/securitylake v1.14.0 - github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.26.10 - github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.12 - github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.10 - github.com/aws/aws-sdk-go-v2/service/sesv2 v1.30.0 - github.com/aws/aws-sdk-go-v2/service/shield v1.25.10 - github.com/aws/aws-sdk-go-v2/service/signer v1.22.13 - github.com/aws/aws-sdk-go-v2/service/sns v1.29.11 - github.com/aws/aws-sdk-go-v2/service/sqs v1.32.6 - github.com/aws/aws-sdk-go-v2/service/ssm v1.50.6 - github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.22.10 - github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.30.10 - github.com/aws/aws-sdk-go-v2/service/ssmsap v1.13.5 - github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 - github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.25.11 - github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 - github.com/aws/aws-sdk-go-v2/service/swf v1.23.2 - github.com/aws/aws-sdk-go-v2/service/synthetics v1.24.10 - github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.0.8 - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.25.11 - github.com/aws/aws-sdk-go-v2/service/transcribe v1.37.6 - github.com/aws/aws-sdk-go-v2/service/transfer v1.48.3 - github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.15.0 - github.com/aws/aws-sdk-go-v2/service/vpclattice v1.8.6 - github.com/aws/aws-sdk-go-v2/service/waf v1.20.10 - github.com/aws/aws-sdk-go-v2/service/wafregional v1.21.10 - github.com/aws/aws-sdk-go-v2/service/wafv2 v1.49.3 - github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.30.6 - github.com/aws/aws-sdk-go-v2/service/workspaces v1.39.6 - github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.18.6 - github.com/aws/aws-sdk-go-v2/service/xray v1.25.10 - github.com/aws/smithy-go v1.20.2 + github.com/aws/aws-sdk-go v1.54.19 + github.com/aws/aws-sdk-go-v2 v1.30.3 + github.com/aws/aws-sdk-go-v2/config v1.27.26 + github.com/aws/aws-sdk-go-v2/credentials v1.17.26 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 + github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.32.3 + github.com/aws/aws-sdk-go-v2/service/account v1.19.3 + github.com/aws/aws-sdk-go-v2/service/acm v1.28.4 + github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0 + github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 + github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3 + github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.3 + github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.3 + github.com/aws/aws-sdk-go-v2/service/appconfig v1.31.3 + github.com/aws/aws-sdk-go-v2/service/appfabric v1.9.3 + github.com/aws/aws-sdk-go-v2/service/appflow v1.43.3 + github.com/aws/aws-sdk-go-v2/service/appintegrations v1.27.3 + github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.30.4 + github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.26.3 + github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.2.3 + github.com/aws/aws-sdk-go-v2/service/apprunner v1.30.3 + github.com/aws/aws-sdk-go-v2/service/appstream v1.36.3 + github.com/aws/aws-sdk-go-v2/service/appsync v1.34.3 + github.com/aws/aws-sdk-go-v2/service/athena v1.44.3 + github.com/aws/aws-sdk-go-v2/service/auditmanager v1.35.3 + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.43.3 + github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.22.3 + github.com/aws/aws-sdk-go-v2/service/backup v1.36.3 + github.com/aws/aws-sdk-go-v2/service/batch v1.43.0 + github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.5.3 + github.com/aws/aws-sdk-go-v2/service/bedrock v1.12.0 + github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.16.0 + github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3 + github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3 + github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3 + github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.17.3 + github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.14.3 + github.com/aws/aws-sdk-go-v2/service/cloud9 v1.26.3 + github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.20.3 + github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.3 + github.com/aws/aws-sdk-go-v2/service/cloudfront v1.38.4 + github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.6.3 + github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.25.2 + github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.24.3 + github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.42.3 + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.37.3 + github.com/aws/aws-sdk-go-v2/service/codeartifact v1.30.3 + github.com/aws/aws-sdk-go-v2/service/codebuild v1.40.3 + github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.15.3 + github.com/aws/aws-sdk-go-v2/service/codecommit v1.24.3 + github.com/aws/aws-sdk-go-v2/service/codedeploy v1.27.3 + github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.22.3 + github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.27.3 + github.com/aws/aws-sdk-go-v2/service/codepipeline v1.30.3 + github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.27.3 + github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.24.3 + github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.25.5 + github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.41.4 + github.com/aws/aws-sdk-go-v2/service/comprehend v1.33.3 + github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.37.3 + github.com/aws/aws-sdk-go-v2/service/configservice v1.48.3 + github.com/aws/aws-sdk-go-v2/service/connectcases v1.19.3 + github.com/aws/aws-sdk-go-v2/service/controltower v1.16.3 + github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.26.3 + github.com/aws/aws-sdk-go-v2/service/costexplorer v1.40.3 + github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3 + github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3 + github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3 + github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3 + github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3 + github.com/aws/aws-sdk-go-v2/service/datapipeline v1.23.3 + github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3 + github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2 + github.com/aws/aws-sdk-go-v2/service/dax v1.21.3 + github.com/aws/aws-sdk-go-v2/service/detective v1.29.3 + github.com/aws/aws-sdk-go-v2/service/devicefarm v1.25.2 + github.com/aws/aws-sdk-go-v2/service/devopsguru v1.32.3 + github.com/aws/aws-sdk-go-v2/service/directoryservice v1.27.3 + github.com/aws/aws-sdk-go-v2/service/dlm v1.26.3 + github.com/aws/aws-sdk-go-v2/service/docdb v1.36.3 + github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.11.3 + github.com/aws/aws-sdk-go-v2/service/drs v1.28.3 + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0 + github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3 + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3 + github.com/aws/aws-sdk-go-v2/service/ecs v1.44.3 + github.com/aws/aws-sdk-go-v2/service/efs v1.31.3 + github.com/aws/aws-sdk-go-v2/service/eks v1.46.2 + github.com/aws/aws-sdk-go-v2/service/elasticache v1.40.3 + github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.26.2 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.26.3 + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3 + github.com/aws/aws-sdk-go-v2/service/emr v1.42.2 + github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3 + github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 + github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 + github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3 + github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3 + github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 + github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 + github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3 + github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.27.0 + github.com/aws/aws-sdk-go-v2/service/grafana v1.24.3 + github.com/aws/aws-sdk-go-v2/service/greengrass v1.25.3 + github.com/aws/aws-sdk-go-v2/service/groundstation v1.29.3 + github.com/aws/aws-sdk-go-v2/service/guardduty v1.45.3 + github.com/aws/aws-sdk-go-v2/service/healthlake v1.26.3 + github.com/aws/aws-sdk-go-v2/service/iam v1.34.3 + github.com/aws/aws-sdk-go-v2/service/identitystore v1.25.3 + github.com/aws/aws-sdk-go-v2/service/inspector2 v1.28.3 + github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.16.3 + github.com/aws/aws-sdk-go-v2/service/iot v1.55.3 + github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 + github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 + github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 + github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 + github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 + github.com/aws/aws-sdk-go-v2/service/kendra v1.52.3 + github.com/aws/aws-sdk-go-v2/service/keyspaces v1.12.3 + github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 + github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 + github.com/aws/aws-sdk-go-v2/service/lakeformation v1.35.3 + github.com/aws/aws-sdk-go-v2/service/lambda v1.56.3 + github.com/aws/aws-sdk-go-v2/service/launchwizard v1.6.3 + github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.45.3 + github.com/aws/aws-sdk-go-v2/service/lightsail v1.40.3 + github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.29.3 + github.com/aws/aws-sdk-go-v2/service/m2 v1.15.3 + github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0 + github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3 + github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3 + github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3 + github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3 + github.com/aws/aws-sdk-go-v2/service/mediastore v1.22.3 + github.com/aws/aws-sdk-go-v2/service/mq v1.25.3 + github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4 + github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3 + github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3 + github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3 + github.com/aws/aws-sdk-go-v2/service/oam v1.13.3 + github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.13.3 + github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2 + github.com/aws/aws-sdk-go-v2/service/osis v1.12.3 + github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.12.3 + github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.7.3 + github.com/aws/aws-sdk-go-v2/service/pipes v1.14.3 + github.com/aws/aws-sdk-go-v2/service/polly v1.42.3 + github.com/aws/aws-sdk-go-v2/service/pricing v1.30.3 + github.com/aws/aws-sdk-go-v2/service/qbusiness v1.10.2 + github.com/aws/aws-sdk-go-v2/service/qldb v1.23.3 + github.com/aws/aws-sdk-go-v2/service/ram v1.27.3 + github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3 + github.com/aws/aws-sdk-go-v2/service/rds v1.81.4 + github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4 + github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3 + github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.20.3 + github.com/aws/aws-sdk-go-v2/service/rekognition v1.43.2 + github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.12.3 + github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.24.3 + github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.23.3 + github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.13.3 + github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3 + github.com/aws/aws-sdk-go-v2/service/route53domains v1.25.3 + github.com/aws/aws-sdk-go-v2/service/route53profiles v1.2.3 + github.com/aws/aws-sdk-go-v2/service/rum v1.19.3 + github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 + github.com/aws/aws-sdk-go-v2/service/s3control v1.46.3 + github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3 + github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3 + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3 + github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3 + github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.22.3 + github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.28.3 + github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.3 + github.com/aws/aws-sdk-go-v2/service/servicequotas v1.23.3 + github.com/aws/aws-sdk-go-v2/service/sesv2 v1.32.3 + github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3 + github.com/aws/aws-sdk-go-v2/service/shield v1.27.3 + github.com/aws/aws-sdk-go-v2/service/signer v1.24.3 + github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 + github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 + github.com/aws/aws-sdk-go-v2/service/ssm v1.52.3 + github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.24.3 + github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3 + github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3 + github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 + github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3 + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 + github.com/aws/aws-sdk-go-v2/service/swf v1.25.3 + github.com/aws/aws-sdk-go-v2/service/synthetics v1.26.3 + github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3 + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3 + github.com/aws/aws-sdk-go-v2/service/transcribe v1.39.3 + github.com/aws/aws-sdk-go-v2/service/transfer v1.50.3 + github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.17.3 + github.com/aws/aws-sdk-go-v2/service/vpclattice v1.10.3 + github.com/aws/aws-sdk-go-v2/service/waf v1.23.3 + github.com/aws/aws-sdk-go-v2/service/wafregional v1.23.3 + github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4 + github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.32.3 + github.com/aws/aws-sdk-go-v2/service/workspaces v1.44.2 + github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.21.3 + github.com/aws/aws-sdk-go-v2/service/xray v1.27.3 + github.com/aws/smithy-go v1.20.3 github.com/beevik/etree v1.4.0 github.com/cedar-policy/cedar-go v0.0.0-20240318205125-470d1fe984bb github.com/davecgh/go-spew v1.1.1 - github.com/dlclark/regexp2 v1.11.0 + github.com/dlclark/regexp2 v1.11.2 github.com/gertd/go-pluralize v0.2.1 github.com/google/go-cmp v0.6.0 github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0 - github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.53 - github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.54 + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.54 + github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.55 github.com/hashicorp/awspolicyequivalence v1.6.0 github.com/hashicorp/cli v1.1.6 github.com/hashicorp/go-cleanhttp v0.5.2 @@ -209,18 +231,18 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.7.0 - github.com/hashicorp/hcl/v2 v2.20.1 + github.com/hashicorp/hcl/v2 v2.21.0 github.com/hashicorp/terraform-json v0.22.1 - github.com/hashicorp/terraform-plugin-framework v1.9.0 + github.com/hashicorp/terraform-plugin-framework v1.10.0 github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0 github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 github.com/hashicorp/terraform-plugin-framework-timetypes v0.4.0 - github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 github.com/hashicorp/terraform-plugin-go v0.23.0 github.com/hashicorp/terraform-plugin-log v0.9.0 github.com/hashicorp/terraform-plugin-mux v0.16.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 - github.com/hashicorp/terraform-plugin-testing v1.8.0 + github.com/hashicorp/terraform-plugin-testing v1.9.0 github.com/jmespath/go-jmespath v0.4.0 github.com/mattbaird/jsonpatch v0.0.0-20230413205102-771768614e91 github.com/mitchellh/copystructure v1.2.0 @@ -229,9 +251,9 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/pquerna/otp v1.4.0 github.com/shopspring/decimal v1.4.0 - golang.org/x/crypto v0.24.0 + golang.org/x/crypto v0.25.0 golang.org/x/text v0.16.0 - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d + golang.org/x/tools v0.23.0 gopkg.in/dnaeon/go-vcr.v3 v3.2.0 gopkg.in/yaml.v2 v2.4.0 syreclabs.com/go/faker v1.2.3 @@ -244,23 +266,23 @@ require ( github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/boombuler/barcode v1.0.1 // indirect github.com/bufbuild/protocompile v0.6.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/evanphx/json-patch v0.5.2 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/frankban/quicktest v1.14.6 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -270,7 +292,7 @@ require ( github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-plugin v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.4 // indirect + github.com/hashicorp/hc-install v0.7.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect @@ -293,14 +315,14 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/zclconf/go-cty v1.14.4 // indirect - go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0 // indirect - go.opentelemetry.io/otel v1.26.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect - go.opentelemetry.io/otel/trace v1.26.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.25.0 // indirect + go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.27.0 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/grpc v1.63.2 // indirect diff --git a/go.sum b/go.sum index f6ee1a45623..6b34fe84473 100644 --- a/go.sum +++ b/go.sum @@ -9,8 +9,8 @@ github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= -github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton h1:0RXAi0EJFs81j+MMsqvHNuAUGWzeVfCO9LnHAfoQ8NA= +github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/YakDriver/go-version v0.1.0 h1:/x+Xg2+l89Mjtxl0VRf2+ue8cnHkw6jfYv49j6f7gZw= github.com/YakDriver/go-version v0.1.0/go.mod h1:LXwFAp1E3KBhS7FHO/FE8r3XCmvKizs/VXXXFWfoSYY= github.com/YakDriver/regexache v0.23.0 h1:kv3j4XKhbx/vqUilSBgizXDUXHvvH1KdYekdmGwz4C4= @@ -22,400 +22,444 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.54.1 h1:+ULL7oLC+v3T00fOMIohUarPI3SR3oyDd6FBEvgdhvs= -github.com/aws/aws-sdk-go v1.54.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8= -github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.27.18 h1:wFvAnwOKKe7QAyIxziwSKjmer9JBMH1vzIL6W+fYuKk= -github.com/aws/aws-sdk-go-v2/config v1.27.18/go.mod h1:0xz6cgdX55+kmppvPm2IaKzIXOheGJhAufacPJaXZ7c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.18 h1:D/ALDWqK4JdY3OFgA2thcPO1c9aYTT5STS/CvnkqY1c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.18/go.mod h1:JuitCWq+F5QGUrmMPsk945rop6bB57jdscu+Glozdnc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 h1:dDgptDO9dxeFkXy+tEgVkzSClHZje/6JkPW5aZyEvrQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5/go.mod h1:gjvE2KBUgUQhcv89jqxrIxH9GaKs1JbZzWejj/DaHGA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24 h1:FzNwpVTZDCvm597Ty6mGYvxTolyC1oup0waaKntZI4E= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24/go.mod h1:wM9NElT/Wn6n3CT1eyVcXtfCy8lSVjjQXfdawQbSShc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 h1:cy8ahBJuhtM8GTTSyOkfy6WVPV1IE+SS5/wfXUYuulw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9/go.mod h1:CZBXGLaJnEZI6EVNcPd7a6B5IC5cA/GkRWtu9fp3S6Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 h1:A4SYk07ef04+vxZToz9LWvAXl9LW0NClpPpMsi31cz0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9/go.mod h1:5jJcHuwDagxN+ErjQ3PU3ocf6Ylc/p9x+BLO/+X4iXw= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= +github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= +github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 h1:kNemAUX+bJFBSfPkGVZ8HFOKIadjLoI2Ua1ZKivhGSo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7/go.mod h1:71S2C1g/Zjn+ANmyoOqJ586OrPF9uC9iiHt9ZAT+MOw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9 h1:vHyZxoLVOgrI8GqX7OMHLXp4YYoxeEsrjweXKpye+ds= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9/go.mod h1:z9VXZsWA2BvZNH1dT0ToUYwMu/CR9Skkj/TBX+mceZw= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.30.0 h1:l5NODu13ZXBo3SIuWlSqM8W15UkmGb1CfoT9LMePiGQ= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.30.0/go.mod h1:+J6D4VAx1rypnSo1AI7XIx4v3al7RwEmTT45+hKtsuo= -github.com/aws/aws-sdk-go-v2/service/account v1.17.1 h1:4NJR1yu7rp5FxJqnqpRaSnIiq/EL26EBaGFnLh3TVlM= -github.com/aws/aws-sdk-go-v2/service/account v1.17.1/go.mod h1:RP2gSKo6kGbTkrDVhsK7BDmhobfBc+0O1dVI1VGNR0U= -github.com/aws/aws-sdk-go-v2/service/acm v1.26.2 h1:BAAPzljqPgzr4vJl1aI+qwWArot2Ev7jZy9i69Bysvo= -github.com/aws/aws-sdk-go-v2/service/acm v1.26.2/go.mod h1:UxBKNLjXNINYbDrT7DG7ZHYEK2qOT1m6XJeKY+LitbQ= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.30.3 h1:bQ+4RthpbM3w5+1Z9l/OoANG1J5Nl3oNVCL70dZbEV0= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.30.3/go.mod h1:JV4s3XObODdRk5gtgA5uKNSLhCqi5WRvQgnzhkMvJEk= -github.com/aws/aws-sdk-go-v2/service/amp v1.25.10 h1:aGSGpPg6aGoe/v42C2iSQqzDI778m0YpMolinM5SeIE= -github.com/aws/aws-sdk-go-v2/service/amp v1.25.10/go.mod h1:wyfNo2hj/f8yPSdberXMJv60eUG6xGr3cDLVF4jJivY= -github.com/aws/aws-sdk-go-v2/service/amplify v1.21.11 h1:B4BzoxzV8vio6V07yEDEqpVrhd2ciD3b4OkF2QGpgkA= -github.com/aws/aws-sdk-go-v2/service/amplify v1.21.11/go.mod h1:Ev3460rW8/OmH3bJBkMZDgZR48c6wl1d4DxA78h+CWM= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.23.12 h1:B9YQUaFlg5YAEukEogYG5E+C6GHHAMNbS1g82rgxRSg= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.23.12/go.mod h1:zwkGhImFmKYyfIjJb2jBVd+cQ+pq+APQNryk9Tk57Ps= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.20.10 h1:7rAYDeRvzVKJcnNDT/xOX1px9k/scn4Ya4NtonV6PWg= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.20.10/go.mod h1:hYMrp35CMcqnG1/+ZuaqOCl8YoGdb0+OfB2o/CbT7AU= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.29.8 h1:VlCuJtG4WFXaYWqqX/FK6L+yaS8hRJNA9Q3c0Vrv018= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.29.8/go.mod h1:n46CP0fdiMHscrLc9E4E/AW90LxtoD8KAs8GBOlh1ZU= -github.com/aws/aws-sdk-go-v2/service/appfabric v1.7.10 h1:ZH680e/x/CCEZuumTWovuPFKvHjxFe6FXOjD7JOYr7Y= -github.com/aws/aws-sdk-go-v2/service/appfabric v1.7.10/go.mod h1:gNOb1nyhDzbyNir5SOA+O502Gwy8HRLCZZiWF856+hw= -github.com/aws/aws-sdk-go-v2/service/appflow v1.41.10 h1:ozylppjAYagJKcnCEQL8pKPT2b4B0IeeOwCYy/ZMTO0= -github.com/aws/aws-sdk-go-v2/service/appflow v1.41.10/go.mod h1:MeLW0NK8MPEUQm7XnZniE5rQRLiGKbu49kHWWdd5lzI= -github.com/aws/aws-sdk-go-v2/service/appintegrations v1.25.10 h1:tCGbQBGGMcgHZmSLcRI4lvU/y3l36z1GHWd8w9Wl7uY= -github.com/aws/aws-sdk-go-v2/service/appintegrations v1.25.10/go.mod h1:DItbH9nkfmNQJKfARIjF8kktLUOv0lQ8oLeCoHX6P9Q= -github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.27.10 h1:ry4D6RPuF6FrVDaFaKgwkLYV5BrJE/rt3m6K6FQYZqw= -github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.27.10/go.mod h1:0pzgHdeoNmeBekRPJl+DRXNJD6D9FqTcD+tFkK81NRg= -github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.24.10 h1:5XwvSPLjQleCuojVnUqMqYiD7UHfrc29GOZmpj/bB0c= -github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.24.10/go.mod h1:Xrzju78vcomnDMXDJ9T6qWk6wHJ6HgGUQ4Fh9reSFL0= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.28.10 h1:wGqe+j9Ab0kSbrSTI0AlLbd1xMp8vj916/pAAe2F48I= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.28.10/go.mod h1:QqiGYjaeD3O+DGHeij4FZgMEW+pzqJUCQBbcseLQeJU= -github.com/aws/aws-sdk-go-v2/service/appstream v1.34.10 h1:JhIT4EGxtjpmIC9l9iLWGw0j8FZezbsqADa6XwkD870= -github.com/aws/aws-sdk-go-v2/service/appstream v1.34.10/go.mod h1:stS2ZSwmXOl+IeWEQWFyo8++JVSuKwuJpMF+EJTNLco= -github.com/aws/aws-sdk-go-v2/service/athena v1.41.2 h1:PiTlzkJrnYGHucoQQ8zDvgf/vKDgDps2FVr3GIWIWdE= -github.com/aws/aws-sdk-go-v2/service/athena v1.41.2/go.mod h1:XCkSMZRqquO7222ELibKBj+bDjg9QeS2wkVKcW7z2Mk= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.33.0 h1:vrvI5gUkDC9s2ogMPTgpLaAca3V49TMi5JkopstiOkA= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.33.0/go.mod h1:w6hDogXBS5N3C/OsuPFbmjzBH5B/MHnZkAsO5aerB6k= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.11 h1:n0OjQw2HMbBr1g2M3XzzNTV8srYSoLkYgV48jiLkqbQ= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.11/go.mod h1:qaQkZEptpHa0HhooCCONUjxvYbkgHtDuG/cCDvJt6UE= -github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.11 h1:EphDT9zNEntQAikIWgSm368R6CP403jtG+f7k9xrtLk= -github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.11/go.mod h1:9nh1OAv8xttmIE3AJ2hWAROcSdsrPMyHE+4tLW7BO90= -github.com/aws/aws-sdk-go-v2/service/batch v1.38.1 h1:AJUFYzHn6B6vYa3/MHZkdoAx+0QExCKXiO7YQSIsMN0= -github.com/aws/aws-sdk-go-v2/service/batch v1.38.1/go.mod h1:3EYTC8QgdDTgwytlDYvWUvSTgmyQ/4V5rCJlma5ZTvk= -github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.3.10 h1:oBaVBnBvkDh/7gNz7Fs6EbrVdMMfnysCoach9u9B0zQ= -github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.3.10/go.mod h1:ukyl81iTQhkgiZydbzFTdh6ddHza0HQO/vffH37X5GQ= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.8.8 h1:xYOVGI6TC1gfli10NShlRsd80pe1Fp/t+LXJNIshihI= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.8.8/go.mod h1:jlgZZlnucnhTwwkt/MLIYT9GRq+hgjkkaLNwWaqp7lk= -github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.12.2 h1:5mpsZ7TDvTw1TpT7DnSQTUDMluVPZdccKzhXGThQdho= -github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.12.2/go.mod h1:sR0KPW2UZmFP1A9xAIO9lQIwh/uzmGy9hTenzuyems0= -github.com/aws/aws-sdk-go-v2/service/budgets v1.23.6 h1:2NdUhw2XHwuT2sK1849T4FEl3dNB6mebOWYaQV/T++4= -github.com/aws/aws-sdk-go-v2/service/budgets v1.23.6/go.mod h1:X69Kb7PDBlJCYyAh1nUS5oEjLplyvIxxTOmEOXVZ7uI= -github.com/aws/aws-sdk-go-v2/service/chatbot v1.2.3 h1:MU/H6Bopqtfu7SOrVy1fZ/eZzX1gKKHqXIZAerKxomU= -github.com/aws/aws-sdk-go-v2/service/chatbot v1.2.3/go.mod h1:Nup6J+0ugC1ddxf04M4e+Tl8KStEJ2m8DIuwIaFwqBc= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.15.11 h1:QWZ/DP2bVhPMKHfeEI6sZ7QOxCXpW36HavOqKCo2wTg= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.15.11/go.mod h1:pQZUK8Lm31nCPFLsDnZUDvmRxw/GGLqF7GtZvEZPB3A= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.15.6 h1:u4fcjpNEk1X0K2x7BvmssdWgavB65KaeU+t3Qi3juUc= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.15.6/go.mod h1:Wh1ryEf52xU0QD97S9+IGGk8Rv8z0zNmMsXyu0ADTmM= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.12.6 h1:NxemArZLwYuKFSSbbD9tIci6qVvCQtJcEZc2jg/Nc08= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.12.6/go.mod h1:eNWgs4jaUQhfmFU6kail21dJ+zookZyxmQReFajmn7w= -github.com/aws/aws-sdk-go-v2/service/cloud9 v1.24.11 h1:tEWBfvLgInrnrNPIN1dHe5T4o6t7tPrh6wMGUaBA1S8= -github.com/aws/aws-sdk-go-v2/service/cloud9 v1.24.11/go.mod h1:HApCCrEvcY5kj+d0S/a7bjcn5XoD6JYKHGrReD+R4E4= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.18.10 h1:D8E8QEHZ/2yt7GEOdlsQMypCNYs6RoQLlV2UBDbBWV8= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.18.10/go.mod h1:lQc/tta6L/lJIOJEd+coKVFi5qum1oNe/8EXBNtK68I= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.51.3 h1:HlsyxSED4xEtAq7WsFh7oMuBg2OnK+Q2thz0MQR5uAY= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.51.3/go.mod h1:KiLdmslIONL5WXMrelwfAzisbZ5UckYT9FGtZJASKnk= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.36.6 h1:dYxK3oAOXbryNOs4qnWugEe6oWh50PWLPe/Y1CoJGzU= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.36.6/go.mod h1:tt1D2vhp2ZJbQ875VVxsXgx8z2OWaD4kgkSNqQd0EOc= -github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.4.10 h1:7aB96DEtCf33kX1i5zXE30UZNStVz6EuRk23e1gGfkY= -github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.4.10/go.mod h1:wt0o+YJBTQocmC/8rixGl9Ovddw5mfz0IghtpvS3sRw= -github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.22.0 h1:jYoIwVX+AfDz4du3FOcPqRxYG93a/5IsrGc1ZWyGy6c= -github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.22.0/go.mod h1:x2vWbMhG6oBV4SZ51ew4X0Wm8dig5d4zM5Z9W8HOCEc= -github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.22.10 h1:x2Z2nDm6Egfu9/VIHRWsHj9aeQe/XAc2Ox3uId/4/HU= -github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.22.10/go.mod h1:kZHeNQxC4Kynj/C/FN9L6I7UloX3l0geJrx66diSNq0= -github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.40.2 h1:oUpoMnt8H30Th/P+goSYB57aaIMHgO0ri0Bs/zFDo30= -github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.40.2/go.mod h1:NlPpu+9PsQp311DfPxg6gvE0NW2E4xdVSWZmu6pv1dc= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.6 h1:UVjxYe8VGpwXYcmBcciBHlQrNssdEvntXCPWmnRR15U= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.6/go.mod h1:4V6VDA0kZavRn71+sLpVna75oobnlG+gwtnNcBwZhu4= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.35.7 h1:kG3A4w9GMub28Cn9k0M5c0F1wQLbTCHMvsb9FlUXGu0= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.35.7/go.mod h1:Ibm/16D/pKg0k9InRCkG6DATLfHGMRWJ0QVS06ppVjs= -github.com/aws/aws-sdk-go-v2/service/codeartifact v1.27.6 h1:7h/vvPE3FmutPx1hz9ZiUWbIutlBnYe9cpnOvapV++s= -github.com/aws/aws-sdk-go-v2/service/codeartifact v1.27.6/go.mod h1:QFlahZ+Y+RempIF6zMcl/G9/r026ERriqikzRZjDI10= -github.com/aws/aws-sdk-go-v2/service/codebuild v1.37.3 h1:M9D+qSdebooflTy5FZKjjc0ScIu4rY8wft4pProSOfo= -github.com/aws/aws-sdk-go-v2/service/codebuild v1.37.3/go.mod h1:oLXvRVcYUh9Jct6B4yBtsOrj2FECvBXQcTMnpHZrUl4= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.13.7 h1:1ENRDfamQrcHJLuSRBuNoiSjMpmGHMXY944F/XN4wII= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.13.7/go.mod h1:JWrdgdMeoK2tKjF4HJ6YX9zA+7Ibnrs3UDjhAjAUQSg= -github.com/aws/aws-sdk-go-v2/service/codecommit v1.22.10 h1:vlf+RZWguYZJzbC95Zoddg3elMg3ZmH8nSip9LF4TkY= -github.com/aws/aws-sdk-go-v2/service/codecommit v1.22.10/go.mod h1:jN+rcF5OPMwDpAJ/uK16MAUis/ByjN1YB/fmPISRZ3U= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.25.10 h1:Luq+/0wysA7vYfrgp+z6K1sbSMvAGsM8lyfD+Ps/q3k= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.25.10/go.mod h1:Z/PUeQGN2+03OeszXPaNB1VPyDcPeaYlqVfV/pfpt4s= -github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.20.10 h1:ct/fxqFdGYXzIlX0p8mD046Mq4P0w5gckhZ7agfdQ/w= -github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.20.10/go.mod h1:7vAwI4YVdWl8cB+bmtoxL6UaJ/hsK8L5YJwYYDlN4Vc= -github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.25.10 h1:Za3irwaGoBfCKXJqeB9NjBlVu60a9FPk8aGI7c8KlsA= -github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.25.10/go.mod h1:Z+hqC2q0pc/cjVHEN/OGEQOiyIHHlHqajV6agFDWxAA= -github.com/aws/aws-sdk-go-v2/service/codepipeline v1.28.0 h1:DPb5NN5t7oG01Dskb1qaURIAMA6GG7Y7OuVJDZZnLHI= -github.com/aws/aws-sdk-go-v2/service/codepipeline v1.28.0/go.mod h1:wiyjnfFARpwbUaFukzDE/vFlIsT+18D34fR1jfZhLTk= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.25.8 h1:J6ToNokSFf2TooLPCbu0gE8pxNm2eCx1KPeiPQttI/o= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.25.8/go.mod h1:AbHoQZ/Q3D7EuTv0s9G8Hq2MnLPuKh7CtTBZpQeZJOA= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.22.10 h1:FRNxZelyamjes/KzIx34Gf3MDJQhrQVRPDeXdl4Vmno= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.22.10/go.mod h1:5O6onn9kBfuiAmKoQFRlwzyLtGL7esOY785J2RtporE= -github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.23.13 h1:pp2Id7OxLkuBt/RwxTljUnrZI/0bGPwvew1qiqRK06k= -github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.23.13/go.mod h1:hYHhbLzJbPEqtn5AFIX3gxUAVxjZiIX/k0qkrtYPMAE= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.31.10 h1:V4TT4lZvrK/+FWiauEzKhzkmcOihlWhLI99ok6DC2s4= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.31.10/go.mod h1:NMZf+QBFmS1wKKZe2usxSi2AQ/CMqauSFAawT8bWb9g= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.34.7 h1:j2o7wDgqlk0o1kYnnJAmfvRA7ZB8CfQv4bUBFe/0vc0= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.34.7/go.mod h1:a/820gyN3ykj8uh/a+W6QZtq7shWsI/BeYHsRtoQyxw= -github.com/aws/aws-sdk-go-v2/service/configservice v1.46.11 h1:oEpDPoRfF7H8kuRT3LiQ7cfXVAgvTSIcxkxxTllNpvQ= -github.com/aws/aws-sdk-go-v2/service/configservice v1.46.11/go.mod h1:9iyVzn5BgTmy78KTlYJPMqP9ZPm6ripPx9DlM0f3PDY= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.17.6 h1:xyRQg7ofUyvUvKTcFIoIkZjDnPyk9attgie20xf1TvA= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.17.6/go.mod h1:bCdstM5DmKcnyJ4WtXtuZ2pGW5Ysgj+jQgjcwI8gyFk= -github.com/aws/aws-sdk-go-v2/service/controltower v1.14.3 h1:5TQoE8Jqa1faLxxF3JEjrLnogd7yuXg/OQh87145qPc= -github.com/aws/aws-sdk-go-v2/service/controltower v1.14.3/go.mod h1:SRnSiyiSHUoo57mdNF8NwLhakUGYbD47FVa5nOi3QM0= -github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.23.10 h1:2v7SRmVjQkKUz/+Iz1o4CydiXYZ3YRnIT2otTXTQzAs= -github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.23.10/go.mod h1:6z5YYMxC98CbbGVnl4ZTSkKQ8doQGgG+vrMxgjepdHw= -github.com/aws/aws-sdk-go-v2/service/costexplorer v1.38.6 h1:QmRZhtv8MJjzwBvtYcNygr2qEy3+efdW9VPNVBdRtyI= -github.com/aws/aws-sdk-go-v2/service/costexplorer v1.38.6/go.mod h1:Hw7bdrxR6Whnc1Gm/dL+3O47yvxv6fq691QDuYP4CRk= -github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.10 h1:Dureguz7Rt4oCM6wjJ+8wnHVxzaMxE1yXoyx0dl6L40= -github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.10/go.mod h1:xHvn/2S7UoUmuCmhOjFhxUJvVIYcFveju/wDRTJLUGo= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.10 h1:ihZtKZKPLNUFBzvRoZ0kXNdO3scdNwmLrZlNDCkYIj0= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.10/go.mod h1:/9TqI2Eb2CsFDP+NT6364fdmhy6/ENIRuOnyVInI8tI= -github.com/aws/aws-sdk-go-v2/service/datasync v1.38.4 h1:XPbgYirjL7lgxww/Giiz5+sxvR+PsrUi7hEd0G2O7Tk= -github.com/aws/aws-sdk-go-v2/service/datasync v1.38.4/go.mod h1:rKicbpvp17KIjesRGNiZTrbKVPkcUvmgsHro0kD2xxw= -github.com/aws/aws-sdk-go-v2/service/datazone v1.8.6 h1:+HLFrID7P2vMu4LDXQ3E5O0r2hlz6CpdSD1Lw/3EBpE= -github.com/aws/aws-sdk-go-v2/service/datazone v1.8.6/go.mod h1:qPKGqWEw4jbUCQbDg05JUtAct5X3N3tEfNF1JUPSqYY= -github.com/aws/aws-sdk-go-v2/service/dax v1.19.10 h1:0HyYc5poHunpMVyLao0aFUPx1T6S7OoD42T5/BanD6I= -github.com/aws/aws-sdk-go-v2/service/dax v1.19.10/go.mod h1:e28ilym+zzgzWaINgcaGR6xhZDk/JD6YzhNlOCLvYwg= -github.com/aws/aws-sdk-go-v2/service/devicefarm v1.22.10 h1:xMDzASghupXMJCyD08fHbGzT0lXYghMvbGGtNcgWc2o= -github.com/aws/aws-sdk-go-v2/service/devicefarm v1.22.10/go.mod h1:2dpVfQeot1pkyC3nlxLa/Re+Cj3+nBkyTmLV/QDQSkE= -github.com/aws/aws-sdk-go-v2/service/devopsguru v1.30.10 h1:uvlI0w0PGHmHMEjvEfUyBWpR9xdabJoPSjX1mps3Z9M= -github.com/aws/aws-sdk-go-v2/service/devopsguru v1.30.10/go.mod h1:jORIT/Q3NE4NFozKMvf5WUH0agl9oyB0w8nundUs5x0= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.24.10 h1:z3dYRIakCsFQtjjR7nUYSHnzBPnSPdUYH22xt3EFUtA= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.24.10/go.mod h1:/QLtpNRcVdFov0Lg8hwzryhLzdoLHS6pdsy9nT36oOo= -github.com/aws/aws-sdk-go-v2/service/dlm v1.24.10 h1:8ON2Utun4Q4FW2K6fI7EunVNiNipDQTZHd7VtwifGyw= -github.com/aws/aws-sdk-go-v2/service/dlm v1.24.10/go.mod h1:U24MUfNJt2URjXoFLu2NMPKPDgRUt7ZiAiYZ2jApx8Y= -github.com/aws/aws-sdk-go-v2/service/docdb v1.34.7 h1:1foSApaBUak26Y9xinJKRuf+On2wKQpfCdCeH7BIGpc= -github.com/aws/aws-sdk-go-v2/service/docdb v1.34.7/go.mod h1:2hCT2jx7fl7DyrY0oZjO3OOK7h+/SCvLUWnkU7zUm1A= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.9.9 h1:W4e41cUvIN/2f9sAhmDMdL5uqQo7V8nofT+TxdjjXhE= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.9.9/go.mod h1:xvM7Frdhg94+HGSNoOU3dj9s/YeB/e+AgUgG+E44wqc= -github.com/aws/aws-sdk-go-v2/service/drs v1.26.6 h1:MdrimlaasKFQNc5R4P7KPHs88oI/S8s/DqeW/46qkR4= -github.com/aws/aws-sdk-go-v2/service/drs v1.26.6/go.mod h1:SOC8l4nWwE5t4tvgiXQdPkcMye8creUQA/dOr68pWaY= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.8 h1:yOosUCdI/P+gfBd8uXk6lvZmrp7z2Xs8s1caIDP33lo= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.8/go.mod h1:4sYs0Krug9vn4cfDly4ExdbXJRqqZZBVDJNtBHGxCpQ= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.0 h1:kN8Jd9H1LD/zlZEaoLpHJjsaKQjzYA1TgzlCB12BCw8= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.0/go.mod h1:gYk1NtyvkH1SxPcndDtfro3lwbiE5t0tW4eRki5YnOQ= -github.com/aws/aws-sdk-go-v2/service/ecr v1.28.5 h1:dvvTFXpWSv9+8lTNPl1EPNZL6BCUV6MgVckEMvXaOgk= -github.com/aws/aws-sdk-go-v2/service/ecr v1.28.5/go.mod h1:Ogt6AOZ/sPBlJZpVFJgOK+jGGREuo8DMjNg+O/7gpjI= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.10 h1:dNXYTooy/H6NSIJ/zZqAVk/Ri4G4mqEWoz3btXhqI7E= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.10/go.mod h1:6JWi6AO/j/YgTOdu+XM2fRfoZTmferahXDwmravqSwQ= -github.com/aws/aws-sdk-go-v2/service/ecs v1.42.0 h1:FvAROmrc7vjL5I90Ag4JR0/7NRnXYU9OpPOLUSzxnos= -github.com/aws/aws-sdk-go-v2/service/ecs v1.42.0/go.mod h1:qxSuZNUGNmgr4Yt6rK2n8F9w7pWn5eOqo8C+NmF9rmg= -github.com/aws/aws-sdk-go-v2/service/eks v1.43.1 h1:RfpqqfRmDw4RMvNHmPesDBuMeaVDQhWgepAn6tP0aYI= -github.com/aws/aws-sdk-go-v2/service/eks v1.43.1/go.mod h1:oxKaTqwF6pHUbgA6/aOwVEZFK+Okv4tZMdb9m6AHjlg= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.38.8 h1:y8kZastREinFhp2jcLjh+TeDQY4WpQ5qlB55XoDOj5o= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.38.8/go.mod h1:kg37oVoLxcdwfXXAsboA9cj6IfgFoc0PWwltp9xy/rY= -github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10 h1:6MoPaz2J4C47Gieucud6SFEqhX4yZ9+hKQZzZvLbSy8= -github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10/go.mod h1:uW7bugGF+vIsQdE22S+akMpsB+eZsSjJ6Kv/1lKQT50= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3 h1:Avh8YS+sgb2OKRht0wdNwY8tqtsCzVrmc8dG8Wfy9LI= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3/go.mod h1:HbtHaw/hnNPaiqcyYnheILVyn81wOZiX9n2gYF5tPmM= -github.com/aws/aws-sdk-go-v2/service/emr v1.39.11 h1:PLsio+PhcBMUVjRypTYnZUAZ3qPYVWKmIgp3B8ZZxRM= -github.com/aws/aws-sdk-go-v2/service/emr v1.39.11/go.mod h1:c4P6499AxhWdFqbnZ25WX77JfVEWFHWqWj9wITeFqlI= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.21.2 h1:kl5gXTCoi2dEUplPE+p+dpdD/BiOWsp1zKNfd3Onhn4= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.21.2/go.mod h1:Z2lS6azbbFQslXAH586gQoU2Lup1IviscRXROJMeL6k= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.31.5 h1:2Qpq1XOClfrQglKh5SgQMSGMD0KLII9pbAw8FRgK/Fs= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.31.5/go.mod h1:BNzkR8iCd5MUGeo3oMLx8wo+S4EtAsIX2XnAuSdBX/0= -github.com/aws/aws-sdk-go-v2/service/evidently v1.19.10 h1:kRXBNhlhmAihqmXWQD3WCzlq69G+4kaaymDjDSIWQMU= -github.com/aws/aws-sdk-go-v2/service/evidently v1.19.10/go.mod h1:xmn6CgBAvNyXpku7wbOV5BXF/tN/Q0pKF3n9P/Nf5QA= -github.com/aws/aws-sdk-go-v2/service/finspace v1.24.7 h1:dlGh182hZoJIFxlwNjRTUJUQkKvRLoUOiDyGkc6F7No= -github.com/aws/aws-sdk-go-v2/service/finspace v1.24.7/go.mod h1:XPu6lBGrnwZyH2qn5Twk1x8IVYzRWQvXzQx/uRChk+s= -github.com/aws/aws-sdk-go-v2/service/firehose v1.29.1 h1:EULt+Eb7La2to3yiwC/m3Sn2+qEjaFN7IOQxjFk2290= -github.com/aws/aws-sdk-go-v2/service/firehose v1.29.1/go.mod h1:ahhanMBeTZy6yRPzKVybiothdO77NvOCyZMpEMfj2ow= -github.com/aws/aws-sdk-go-v2/service/fis v1.24.8 h1:ajYYW5orv4QkEm9Hr2elpJ2OoTIlcLDa7q9nIEMgXGY= -github.com/aws/aws-sdk-go-v2/service/fis v1.24.8/go.mod h1:0GkfIF1n+BIh/xeWbpWoWlD+Mhk7haXQNH11G7BQTGM= -github.com/aws/aws-sdk-go-v2/service/fms v1.33.7 h1:SMf+LPFIiq1tfNo0rhV6YrlgnL7H6w7CSgMuJwqClEQ= -github.com/aws/aws-sdk-go-v2/service/fms v1.33.7/go.mod h1:NxMT3if6WnGIRRqEn74imFVzImksNcVl+NHKvlBvdT0= -github.com/aws/aws-sdk-go-v2/service/glacier v1.22.10 h1:E19vpAzC5QDng2IlfM6aNMBljv1kFx9O7iydbvMUk14= -github.com/aws/aws-sdk-go-v2/service/glacier v1.22.10/go.mod h1:14pqq/Xg2S/hlu9q67ePsGw0OB6SJppEqDJwxLEivvI= -github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.24.1 h1:bDVYY5tSzBnLAcdY/9nZd1gM4O+a8IVk2tUfcS0gJ1A= -github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.24.1/go.mod h1:ct31bulbJED7Z4Vdtr+Jtvt6bPRB5PdeH96NNm4wkOc= -github.com/aws/aws-sdk-go-v2/service/groundstation v1.27.6 h1:2GSPMCtOlEVwltVhqUT1x6CFKpFi/5D2yFhd/PqaSic= -github.com/aws/aws-sdk-go-v2/service/groundstation v1.27.6/go.mod h1:gFFqhE7646BA034Im+oTpkfnefC1AR/E4ZUTs/sV7lc= -github.com/aws/aws-sdk-go-v2/service/guardduty v1.43.0 h1:Jz/FJc/n27a9j1du1JxtBaMb/Wg/dSkWPbrfn2Y7CT4= -github.com/aws/aws-sdk-go-v2/service/guardduty v1.43.0/go.mod h1:tNfynl7aA5gEHA7yJZiEICHYMkITKSc0Z+vic+YpW0M= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.24.6 h1:AlmacWcocqb7vowwTlYtVR9AbYWW4vFExIoD7+kFR4g= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.24.6/go.mod h1:jp0Co1hHoXMEQTzyRICGBHvN8owh1QISx56d79dulFU= -github.com/aws/aws-sdk-go-v2/service/iam v1.32.6 h1:NRlKKQ/BPHPqsuN2Hy6v4WA8/bsRTP0j8/BFPBC5+SU= -github.com/aws/aws-sdk-go-v2/service/iam v1.32.6/go.mod h1:S+s7/UH0UIqRX4GyXvZihMJNR9nqlB0kxO4NKSFeRak= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.23.12 h1:UPOu53s56w1lIOKMaVfvOF4/4Ku3j5ZwKc9gWLkLUEM= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.23.12/go.mod h1:zx7M4pSjEGDxTwwREKVb0apz/2amwWoiewD+PztFvps= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.26.6 h1:uNhB5VBE/O72F3Z7sg86R6CytbceBm32gfjO6PXfILw= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.26.6/go.mod h1:h+gR0kPQnx2Tm5YPrYhb3W8ufqOTM/jlbHS/4WfUQgo= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11 h1:4vt9Sspk59EZyHCAEMaktHKiq0C09noRTQorXD/qV+s= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11/go.mod h1:5jHR79Tv+Ccq6rwYh+W7Nptmw++WiFafMfR42XhwNl8= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.10 h1:+ijk29Q2FlKCinEzG6GE3IcOyBsmPNUmFq/L82pSyhI= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.10/go.mod h1:D9WZXFWtJD76gmV2ZciWcY8BJBFdCblqdfF9OmkrwVU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 h1:o4T+fKxA3gTMcluBNZZXE9DNaMkJuUL1O3mffCUjoJo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11/go.mod h1:84oZdJ+VjuJKs9v1UTC9NaodRZRseOXCTgku+vQJWR8= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9 h1:TE2i0A9ErH1YfRSvXfCr2SQwfnqsoJT9nPQ9kj0lkxM= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9/go.mod h1:9TzXX3MehQNGPwCZ3ka4CpwQsoAMWSF48/b+De9rfVM= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.14.6 h1:S+tywpOd723Gqg0xIg5QePGWKQ179kdj8yc0cI0ChI0= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.14.6/go.mod h1:xBJfeB8hPTEVyxGeBrZn9lO11UjFlC6yN8fm+LMuDl0= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.12.11 h1:EBpzcF6XrSgCUWvPSJBPxcRxgU0FbZya2KmHXnrXhOg= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.12.11/go.mod h1:EtWeluyFt+mbORnwybcy/0gmm3psrGPWUOZI4771q0A= -github.com/aws/aws-sdk-go-v2/service/kafka v1.33.2 h1:MP0DahXgJWKGv1/lFkWnO+Koj4fCVxe0Tcap6KlmpYw= -github.com/aws/aws-sdk-go-v2/service/kafka v1.33.2/go.mod h1:hxzW4JuArNI/W5i8scwr0BvYhJXhtntyMNSXnxJ4rcc= -github.com/aws/aws-sdk-go-v2/service/kendra v1.50.7 h1:aH+HH9kXs3AFj51H+NT4izexEDYULpoG4L+wKZ9SXAw= -github.com/aws/aws-sdk-go-v2/service/kendra v1.50.7/go.mod h1:VEnRGR182kFe23M6tA7B+3JN8bvtrDNkBLvlnTpKcbM= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.10.10 h1:aytBO6+Ex86UOstDfm4KxTD3sPFxdWcT9ImgbdPht4c= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.10.10/go.mod h1:p8edp/FOKMmGTWOSj4KWtum5Rgv9iE4p7cpdUoz0N+w= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.10 h1:lmp5qBDoJCLsPwKrYNe6zbHnNvW5jzz/xS+H0jkoSYg= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.10/go.mod h1:CUWfw8B25XToRN7+sg092F9Ywjvz0PT4veHXBQ2KE0A= -github.com/aws/aws-sdk-go-v2/service/kms v1.33.0 h1:VFbLMhXy61mHzCjgBaJFnGOhOvcwZWnYemtAiXiQEh0= -github.com/aws/aws-sdk-go-v2/service/kms v1.33.0/go.mod h1:uQiZ8PiSsPZuVC+hYKe/bSDZEhejdQW8GRemyUp0hio= -github.com/aws/aws-sdk-go-v2/service/lakeformation v1.33.3 h1:wieZjsYWmw330AVbgkIbTQXWacUmTZFrVKqnWBef7WU= -github.com/aws/aws-sdk-go-v2/service/lakeformation v1.33.3/go.mod h1:h7rs2zd6iDs8a9zjQ+JZ1hYBStUxUm+8jTNwpfSZY7E= -github.com/aws/aws-sdk-go-v2/service/lambda v1.54.6 h1:UMu5aeSubjM9geSuPCGOgBAZa0JvsXxJBFXmKgUuisM= -github.com/aws/aws-sdk-go-v2/service/lambda v1.54.6/go.mod h1:fWbFM4/v+IgUW+p4TooAXuhmiQyC5qxMV5gUqxDII2g= -github.com/aws/aws-sdk-go-v2/service/launchwizard v1.4.2 h1:OavF0RBMhcuArrkGSGnRsk7BDZAqg3BmDI6E7KgAcVs= -github.com/aws/aws-sdk-go-v2/service/launchwizard v1.4.2/go.mod h1:DIcTjNG5V6jZxpFWwYkG4/k0CbsqPNJlj5koUQnmu+g= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.43.10 h1:uX6vAyjLRTlvnrp+MdU2pJQ8EYMbv561PVRCh6QG++w= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.43.10/go.mod h1:F9+N41US+/MkvlC/NGxptK/MiUfKe1dweqsBl38ev/U= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.38.3 h1:YdA5QgoYa2wNblkWyZfPlLLYsAEKCwLfdMxpWu16wpM= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.38.3/go.mod h1:T0LiPG5vKHZ7DmOq4Cmw0Kku3tMkaR9AknskS2hUXvI= -github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.27.10 h1:A3MzGDmkAyV2jRVSCHmTjMsuiYrRjrKxQiHsVts1jas= -github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.27.10/go.mod h1:BuKoVF3AykN1SAsKigr+aL8UovHFQJs2M7vylsOj8xY= -github.com/aws/aws-sdk-go-v2/service/m2 v1.13.6 h1:NxZs0J3l2p+PY+lPjHFVeY08lmTrv1vHzSfLNWqMfJc= -github.com/aws/aws-sdk-go-v2/service/m2 v1.13.6/go.mod h1:h0ksPg7Jqgml26JZoUs87A2sqx5/gRLH3hrN7p3ww8g= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.28.10 h1:PF1Q1JOKpyMPAjhBBcxUxOXafaHMZkXjN2Su+yPSj2M= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.28.10/go.mod h1:I2QK9o927+sKJn0yNFn3L0GVnXyZWwguOTdOy69wqRY= -github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.53.7 h1:9GzfkCQV6VLgtCjQQc8Bhz2QJLyae9b3kNN6N9qYVwU= -github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.53.7/go.mod h1:jpXXeDQerb6Md4Yg3LxscyQrqOzL+h1xi5SizhXCY9w= -github.com/aws/aws-sdk-go-v2/service/medialive v1.52.6 h1:VUfwXW95Om8NRrNuPBY8+tUpv2pLeMHoHNds2dPoI9s= -github.com/aws/aws-sdk-go-v2/service/medialive v1.52.6/go.mod h1:+kfONJ/rwJ7Qxizw2VNciswVk19vpXg9ngsEpfARusg= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.30.11 h1:28cpMq1VSS+d1vVYtrXQDzeuz+/P+Dxj2n2c0BrkQ4A= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.30.11/go.mod h1:v3DYFGJr+U/7XqOVLA5IBHXBUoHksVjfCrCEHQg6Usg= -github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.12.0 h1:btGNTjZuqksKW8LEa1ooGQMhNYq8fKV2xQe2MZ8XFsQ= -github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.12.0/go.mod h1:pg6xc5VIhx1ViJIsiQetZqdBKYCNRnsl8kcaWuJptZs= -github.com/aws/aws-sdk-go-v2/service/mediastore v1.20.10 h1:iheOfN0czGrzE96ZtlF9RvFG4sSNfRercZCxENO+BKw= -github.com/aws/aws-sdk-go-v2/service/mediastore v1.20.10/go.mod h1:ivRaBAFCc5B2vHuHJKlYyC6dDk0Q2cZpGO45Mbl2UPc= -github.com/aws/aws-sdk-go-v2/service/mq v1.22.11 h1:dI5u7KbpjslchKz46vHkQlfYFfcVRXQ53tBp3qdlOH8= -github.com/aws/aws-sdk-go-v2/service/mq v1.22.11/go.mod h1:hQ/8Uo+sQySjHie+oGZxYaDMVsAJYYea7fDWtxOW25g= -github.com/aws/aws-sdk-go-v2/service/mwaa v1.27.4 h1:8smXN5gAGZKBjervH0VZiR/dpP9G2nOiSakKNL+A2xY= -github.com/aws/aws-sdk-go-v2/service/mwaa v1.27.4/go.mod h1:n5E3bv5OwgyzXa8wN4dBiQ9chq4427i8mIL0DOGQ08U= -github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.8.7 h1:gmQ5UpIRqclaYFHyh+nWlx5NITsvVLR5aOzU9JnVPhU= -github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.8.7/go.mod h1:2ZVLdyzUl10QKonLIE4j8hsu9rePk62iO1HW7ND7cIw= -github.com/aws/aws-sdk-go-v2/service/oam v1.11.6 h1:AWbX6Q0CThDhgn6MIm2XPCnw3uA00yFkOKzfkGjDvwI= -github.com/aws/aws-sdk-go-v2/service/oam v1.11.6/go.mod h1:zh+/YaGPYtYIsy83eib8QYUCLNmTTRNnPKSy++MoVx0= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.11.13 h1:eqytt4h4+NG5eSYjHy/gxQeTYmH6kyB2BiNOqVdLWIU= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.11.13/go.mod h1:9tkQ/yUzFFBjIM7IbMqpsESMwNZkfO9ZtlXXJVfC/h4= -github.com/aws/aws-sdk-go-v2/service/organizations v1.27.9 h1:KNXacqpLvkK4oAMqSNhG2ETQzrVK4mKETAeNeo+dWyk= -github.com/aws/aws-sdk-go-v2/service/organizations v1.27.9/go.mod h1:hcr6lPG6K2l0WiKyu2ag/JrHbiIOUMg3tdNPtpTe+PM= -github.com/aws/aws-sdk-go-v2/service/osis v1.10.0 h1:2unOPcW9Eh/gcOEWdIcU5rmpwTFtqZ0YLaHzApPwiTI= -github.com/aws/aws-sdk-go-v2/service/osis v1.10.0/go.mod h1:1H+iBuiqX8snPxlOViytBuIKDHY2y+ZHzJ/gIqb+JEs= -github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.10.6 h1:dAtMkOI1E/+uOf5Md0TV5DzaOhUWbVsajcbiyjXS8Ng= -github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.10.6/go.mod h1:AlYMkLQ4e0iExjXDf8TPosjt8fjsmYu/2nv9xs9MbDs= -github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.5.10 h1:cn/ly7rE/rpG4XW7GFxs970D+PglbESF9f8vI/oUC+M= -github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.5.10/go.mod h1:3TF4rQ87enheeIx4c/vSFoqQXdoI9eOr6RDPvW0x0mM= -github.com/aws/aws-sdk-go-v2/service/pipes v1.12.1 h1:srOxtOzUntiGKtVCahEL13JYuBHGvuDlBJcHI81dgZA= -github.com/aws/aws-sdk-go-v2/service/pipes v1.12.1/go.mod h1:JHoN0tGkx3ZIYwOvF3WtepKYAMmSfC5o61ijIk6YuKo= -github.com/aws/aws-sdk-go-v2/service/polly v1.40.5 h1:nJowt8m2IcbcLkQnghrnro33nCBaPvGUOxzp2XrGbvE= -github.com/aws/aws-sdk-go-v2/service/polly v1.40.5/go.mod h1:NlZSQx5MgRlTRxuTB1UklQbkXSX/Rjk+nEJR2ClTjrM= -github.com/aws/aws-sdk-go-v2/service/pricing v1.28.7 h1:OimFd9B78+2BO35rJpIni3zEt3xXp+l/YuLwgULMNVE= -github.com/aws/aws-sdk-go-v2/service/pricing v1.28.7/go.mod h1:kdbauXuTWNaItPgeKT1uycVDvVlfD9FAEmKjAmAEiWM= -github.com/aws/aws-sdk-go-v2/service/qbusiness v1.6.6 h1:QtIdssfJjPLUGSc1UEl99uDbtW3WvP+bJ3ZmZPtGS6c= -github.com/aws/aws-sdk-go-v2/service/qbusiness v1.6.6/go.mod h1:J5k2cOgnRLFlQOX4Z0bBWhG8nb45vwMemPjegzvTdto= -github.com/aws/aws-sdk-go-v2/service/qldb v1.21.10 h1:JiA51DS5fOSXCbkaVurMcNAHSXTicEWlpy/343xdp1g= -github.com/aws/aws-sdk-go-v2/service/qldb v1.21.10/go.mod h1:KjLu3xgMrrGMgEpMvft7A0zPTn0EXVA5ys7KiF9/E44= -github.com/aws/aws-sdk-go-v2/service/ram v1.25.10 h1:eTSTspyVeFIjVvKEkhrF8xlTkcv2xRVih8H0ZL/wIGU= -github.com/aws/aws-sdk-go-v2/service/ram v1.25.10/go.mod h1:u82AB4OuZSlMIADLmySpervL3v6El3RYqSh3vjjOa2g= -github.com/aws/aws-sdk-go-v2/service/rbin v1.16.10 h1:4CSjB4CbP+WvGn9ow2ZyBSQ/JDpp2RmKkO2wpiFrBno= -github.com/aws/aws-sdk-go-v2/service/rbin v1.16.10/go.mod h1:PQuHOX24ueFRaxXKMVl+tAsbSPd/Ue5VhF4ispb5zdc= -github.com/aws/aws-sdk-go-v2/service/rds v1.79.6 h1:NX0OiCFYFc/p1Ufimr+kJkXCCFZe9FnUoQmG5mMrYfg= -github.com/aws/aws-sdk-go-v2/service/rds v1.79.6/go.mod h1:fZ+i+g1q3unIVP0qfYYyJd80W8aiyQJ6Wsij/HFj9W0= -github.com/aws/aws-sdk-go-v2/service/redshift v1.44.7 h1:sXKgb/ks0eeSHH2arXiUgMGIew7ka8fhplTLJx1Df48= -github.com/aws/aws-sdk-go-v2/service/redshift v1.44.7/go.mod h1:DNoffDrn/ZewuTyFUolU33+1w6vOieC8mhzF2Yi46PY= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.25.10 h1:FZiVA6SGDCxNUjoJ/CizSudFScdPvPQNbLtPgmrlUUk= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.25.10/go.mod h1:jnDZbfq7zPFvAnigSNc6iaOQ2TTAnzzQdNJQgHvg29s= -github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.18.8 h1:NtKuvfbHtkVQi/NDtGiDxmv6rL2ZcA93NXARmHkudDU= -github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.18.8/go.mod h1:lGVq1ZIzcwgjxeXlfXkY3DNC582SqYLwTjXZ1KGmAkM= -github.com/aws/aws-sdk-go-v2/service/rekognition v1.40.6 h1:v/UrTB1CHz+CXXPpE0jjGkgHT1sbpsEKs7/XsmrVa4k= -github.com/aws/aws-sdk-go-v2/service/rekognition v1.40.6/go.mod h1:AzDdeMyTSSSlZ+VO1S788x9x3lJVmVdqYlZxZ3rmi2U= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.10.11 h1:Ejmh88QYLOxgyh+kzoQUbLNyUbD4P7SLWmQ8Jx7qmmE= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.10.11/go.mod h1:+iMxqfKvnJVrbiHxDGyf47c7FI8TDukqjoMsLqoLrRw= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.22.6 h1:+oQIusl/699jbxbWeSI9fQ5ACZUxH6eeKxiXHtHjztQ= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.22.6/go.mod h1:hECEgQ2nBryyGTtts2k1m6MUjbaFJpoUd1wmNXpkEaY= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.21.10 h1:Lz66AebKV//iN8kelcsBe0fQekLmCkIzZSq/Yr/S+C4= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.21.10/go.mod h1:6u29rN3TBB89EOtTnEsjywjOmjA4nmUV8elhfLwinaw= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.11.6 h1:CnOWQ/6BKnBPcVTb9P7p6SsbFHsUvJJ2UbcQnZuIG+c= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.11.6/go.mod h1:urcaaPlew4LHXM66eEZeaWJBhCjKWzOCAmE2XInleA8= -github.com/aws/aws-sdk-go-v2/service/route53 v1.40.10 h1:J9uHribwEgHmesH5r0enxsZYyiGBWd2AaExSW2SydqE= -github.com/aws/aws-sdk-go-v2/service/route53 v1.40.10/go.mod h1:tdzmlLwRjsHJjd4XXoSSnubCkVdRa39y4jCp4RACMkY= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.23.10 h1:R56F/k3CQZHwrd3kHQ65Y91KHPBITruyPSX5/JGYe9E= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.23.10/go.mod h1:W3+eDMk01Na1U3aQfwGkkEP1Yfe6WUn8hXzyInvGlcU= -github.com/aws/aws-sdk-go-v2/service/route53profiles v1.0.7 h1:32/NRAG4ka8/hwr1k9ZA2xwarcJeWO6djaIFJ42tuFg= -github.com/aws/aws-sdk-go-v2/service/route53profiles v1.0.7/go.mod h1:H9RRL0qQ+s+XlaZO5s5G3Z8cVZpKEoj313hOyglUwj0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1 h1:UAxBuh0/8sFJk1qOkvOKewP5sWeWaTPDknbQz0ZkDm0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1/go.mod h1:hWjsYGjVuqCgfoveVcVFPXIWgz0aByzwaxKlN1StKcM= -github.com/aws/aws-sdk-go-v2/service/s3control v1.44.13 h1:HhsZlX5gsL/KfEyHyBO5H0ewgmXoiBpjDPAZ3Ggrj8g= -github.com/aws/aws-sdk-go-v2/service/s3control v1.44.13/go.mod h1:4fXOTqROQgQ4Y6JP0G/vjF//YfG5oHxAwI2TPbgEblU= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.8.10 h1:tXVgXdk69TNCERB3gQofwGWIKBOSQYXLyhpRaiEmk/g= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.8.10/go.mod h1:+1zSuvpsye9jvBPYLg++LFV9wKaLZuKRpciXnJcRqkQ= -github.com/aws/aws-sdk-go-v2/service/schemas v1.24.10 h1:WxqJ2K51dsWHdwYUdi1oTqarDFcUOJUwcQOSTEEjQ8k= -github.com/aws/aws-sdk-go-v2/service/schemas v1.24.10/go.mod h1:Mr4cAhSy1m0p+AVxfTNmzPgkFo/Go8Pm2eIIJ9MlEMs= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.30.0 h1:nqR1mkoDntCpOwdlEfa2pZLiwvQeF4Mi56WzOTyuF/s= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.30.0/go.mod h1:M9TqBwpQ7AC6zu1Yji7vijRliqir7hxjuRcnxIk7jCc= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.49.2 h1:ybKzmQRXvLkQ9rb251QPmaC5ZlCK1g8b1MLq7DD5eaE= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.49.2/go.mod h1:6SQ5lQJXJZ4HL8ewgW7kp68UkqQtUE/3UmEvDLpJxKk= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.14.0 h1:VQuxwHBq5iKVKbsPdrfKWijA9V3vUmtTOeuqNiL6IkU= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.14.0/go.mod h1:R23fuxDRRYRzUYthyjMLC+j5J3FOdt8vEruXVmzieEc= -github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.26.10 h1:YmcqlNM/+On+uz1U8mO67xmCBpIDBunL/Jcvxh5HjnQ= -github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.26.10/go.mod h1:qNYkunnIvN0ttbrpYRRZnv2TYUEcAlQmhKXkvT46Rrs= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.12 h1:KW0wxLufq7ngz1ofsZcjqSDoJZo3mBsNIwtrQVB7Z/Y= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.12/go.mod h1:pMCoQoF+2NgjwpSf4vM+QHp23hnQCje0n5LxrnmMH4Q= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.10 h1:B4VK4LEI/L5dtYq2Omzt4XQ9WwtZX7I+YwmkhcDdEV8= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.10/go.mod h1:jAMj6BiwJo5rCrR97LdKlo1M494krOfnPJCS6X7etcU= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.30.0 h1:9K/f5C/JsiamFFof/E4kKo7DpkZ1z5sa98hI7XHV3P0= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.30.0/go.mod h1:FAFzNrXuMkCLLVL89dpjJq2yJFbgFkyJC98jSgVHsso= -github.com/aws/aws-sdk-go-v2/service/shield v1.25.10 h1:QTc2t3diE1+J1ESShBMZZetQQCJpr4DC6qGsJfpWrrQ= -github.com/aws/aws-sdk-go-v2/service/shield v1.25.10/go.mod h1:pQgQYgpvef5P1jqHjB5+q/ss21ndQ3QtcVbfzNk/GrU= -github.com/aws/aws-sdk-go-v2/service/signer v1.22.13 h1:c3VQdGTewW+OJq0iw/P5rnFpfio+Dy0u9ulPdc+QW5k= -github.com/aws/aws-sdk-go-v2/service/signer v1.22.13/go.mod h1:tm0X1UQcNg0XaT1wRSR+TJXdgTL6SMu7ZNb9EDkqXjA= -github.com/aws/aws-sdk-go-v2/service/sns v1.29.11 h1:cZN4fMAERLi1Q4ZklHj1ru0oFSQ5Dacad0cY26gu/Fc= -github.com/aws/aws-sdk-go-v2/service/sns v1.29.11/go.mod h1:au0J6BWDeQfeyItMkuqT6fhhyZ3cVARGC9FVEDaz+Fk= -github.com/aws/aws-sdk-go-v2/service/sqs v1.32.6 h1:FrGnU+Ggf+jUFj1O7Pdw5hCk42dmyO9TOTCVL7mDISk= -github.com/aws/aws-sdk-go-v2/service/sqs v1.32.6/go.mod h1:2Ef3ZgVWL7lyz5YZf854YkMboK6qF1NbG/0hc9StZsg= -github.com/aws/aws-sdk-go-v2/service/ssm v1.50.6 h1:E+gbKlOadAI0qV+8uh0JnYmkRJi7k7XvMXcKso0Inyc= -github.com/aws/aws-sdk-go-v2/service/ssm v1.50.6/go.mod h1:vR37XXoCLx2fzr/fUaTQoQ6ZlBK8Ua6VLnxLfxN6vLY= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.22.10 h1:JIw0378UWnueUdaZhOv8MO1zZ6ReIQXpYqSv01TDvio= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.22.10/go.mod h1:JDRWRN6hxzkF/XDtGSmLUYRP88SkdHBr6LFW1/yZiXI= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.30.10 h1:MizOPvyKVTN07X9x2dpd/bpvjEuPUj8NyOD4Njp4T7c= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.30.10/go.mod h1:jgVY27QLdMMdFV+ZlvVmVWiu5HsjnjZO5Hjaqh0soLU= -github.com/aws/aws-sdk-go-v2/service/ssmsap v1.13.5 h1:kiveZFwK8mqJrkaMorymQp6J6l3s/pY5n/i6tabYz3Y= -github.com/aws/aws-sdk-go-v2/service/ssmsap v1.13.5/go.mod h1:auz/mQcCWc6ijosjuNXKw1JItUvqj+ERG1iHwJfHcvE= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 h1:gEYM2GSpr4YNWc6hCd5nod4+d4kd9vWIAWrmGuLdlMw= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.11/go.mod h1:gVvwPdPNYehHSP9Rs7q27U1EU+3Or2ZpXvzAYJNh63w= -github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.25.11 h1:9R4+nCSXYw+Ea10gD/uDPLEy7jV/m3i7tTN0x4cYPDg= -github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.25.11/go.mod h1:yXOGN/jjKLKLkWjZSKRWrnRAdw+6qWXF7bYXL/fB/d4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 h1:iXjh3uaH3vsVcnyZX7MqCoCfcyxIrVE9iOQruRaWPrQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5/go.mod h1:5ZXesEuy/QcO0WUnt+4sDkxhdXRHTu2yG0uCSH8B6os= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 h1:M/1u4HBpwLuMtjlxuI2y6HoVLzF5e2mfxHCg7ZVMYmk= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.12/go.mod h1:kcfd+eTdEi/40FIbLq4Hif3XMXnl5b/+t/KTfLt9xIk= -github.com/aws/aws-sdk-go-v2/service/swf v1.23.2 h1:/EudBRyXqjvogP4JLFb31Jt8rz4YYy1UgW9KzKR+1xo= -github.com/aws/aws-sdk-go-v2/service/swf v1.23.2/go.mod h1:z92PP2/Cnis08+F2SlpnLT2kpJPpBQcWQ6aNGyGRvQg= -github.com/aws/aws-sdk-go-v2/service/synthetics v1.24.10 h1:PMQAcJQH/84Qma/LKvv4bvg0cdJmkcg4t433HZvV+BE= -github.com/aws/aws-sdk-go-v2/service/synthetics v1.24.10/go.mod h1:ecCYcAmgR/HOcRLfvMsUnSvNiI2rIpwCdoEkxl9tDo8= -github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.0.8 h1:dPKuHz5E8aOZHOt/2l5E9p4kX7WeEw93yKsgZxBvMg4= -github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.0.8/go.mod h1:ZCewKIHsDadZ9jgcCJYtvdRfH2CEMRxRXLPFobkEQec= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.25.11 h1:xdW8/PT8R5Qx/IjkAdMvZomjjOdIWlqsFMCH6mqgjsQ= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.25.11/go.mod h1:QfuI1DCBSBqbqc7hxOB0glVXBJE8NLX81hr9cc9yirQ= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.37.6 h1:KpnJG4jr1OhjkNnRklDEolRJr1CuFFeJBgKoAIXlhYE= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.37.6/go.mod h1:NMzEA79tY7NAOXO+fHz57LaOkK7WylnjJpQxmlhgoUc= -github.com/aws/aws-sdk-go-v2/service/transfer v1.48.3 h1:imZ9ImrvPCMGIMtRTLVBO6+mxGNcXw8Mi5WupIEwB9M= -github.com/aws/aws-sdk-go-v2/service/transfer v1.48.3/go.mod h1:RBiHBLIFC7Sye7F6EW16swUjnsETkgjHLBLbEo6lZAM= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.15.0 h1:mUdYHBcfWGNclMsAKSMjCmEgR95z4wzj21JH6bh3f9c= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.15.0/go.mod h1:CI0PDiO2lZqVoaSOLWmmAzDPSixUTzUSqEnlZUdhWq8= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.8.6 h1:Mn3zWbtu2877a9ONYd3WNRY43NentIgSHNUNnFs9vuQ= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.8.6/go.mod h1:0iCBzvgSjFsiQfmgRPHBK+8iZdH9mXVAG7EcfyrX4ZM= -github.com/aws/aws-sdk-go-v2/service/waf v1.20.10 h1:nN1bcxknc8kFrI+YSupMiRCmrzjKfMIucvMtKhLbWFo= -github.com/aws/aws-sdk-go-v2/service/waf v1.20.10/go.mod h1:hriMVzhWjoXy3+71A8Q/T+lGprjWLCH3IgEvcuDIvOM= -github.com/aws/aws-sdk-go-v2/service/wafregional v1.21.10 h1:HiZrToGiVRP1nzh0nTS3cQH1N6o04MrHb8nwsLNuVX8= -github.com/aws/aws-sdk-go-v2/service/wafregional v1.21.10/go.mod h1:+Rlg1RQVNbUbslQRkTSPk1QjGTPx7MCSnaEpN+VZrIY= -github.com/aws/aws-sdk-go-v2/service/wafv2 v1.49.3 h1:wnhDyatF0gn17s098Vd+/aHmgNvk3N7sknESF++wMck= -github.com/aws/aws-sdk-go-v2/service/wafv2 v1.49.3/go.mod h1:4U73NhYe9Eyz81zJgFKyho6Rmw1ZpIYnwhsdlx65mqI= -github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.30.6 h1:8W0gNavRGoSn2kolXQb/wr8MG9D7QrBAg/yjlTkmy04= -github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.30.6/go.mod h1:1P1kcHgiFKRuFfXGUck9vNaMCEmIeigbsBjb86UN2eg= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.39.6 h1:V4AQVudNs3PjsrXiDAX6HITaTLpo9W1r5yuUgzMONis= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.39.6/go.mod h1:BZlMv5EkPEBRCrHxTM6dH8nohuwIQaEHGHcI76a4pjs= -github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.18.6 h1:0IyUHkXxEeIVXWVtPB0+vQMM5sxBOWdPIoqCKwaGiG8= -github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.18.6/go.mod h1:utmfTQCJk0fAsiKFJ0FrGTJXFqyZoj5ZHm9FWT8Nf/0= -github.com/aws/aws-sdk-go-v2/service/xray v1.25.10 h1:EaxobHo3hQaj8HaGTdJwM8KRkAspfUQTthTeEXL6THA= -github.com/aws/aws-sdk-go-v2/service/xray v1.25.10/go.mod h1:doojKT3qF2pa1UDEuazJtGxdm2/Og9s9irewwJ+rpXU= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.32.3 h1:1X7ZNHsaDGwjZcNev1rbwr+NxV/wNbvj/Iw7ibFhD5Q= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.32.3/go.mod h1:0NHJUsvqVpWtSg9rROCJ1AxLmDCHJTdYEhcSs6Oto9I= +github.com/aws/aws-sdk-go-v2/service/account v1.19.3 h1:w/ZZ69+nzIYoussDQvIqyezI6iKGAjiHnVWmG+8Qs1I= +github.com/aws/aws-sdk-go-v2/service/account v1.19.3/go.mod h1:s7hT4ZWjp8GoSr0z8d5ZsJ8k+C2g4AsknLtmQaJgp0c= +github.com/aws/aws-sdk-go-v2/service/acm v1.28.4 h1:wiW1Y6/1lysA0eJZRq0I53YYKuV9MNAzL15z2eZRlEE= +github.com/aws/aws-sdk-go-v2/service/acm v1.28.4/go.mod h1:bzjymHHRhexkSMIvUHMpKydo9U82bmqQ5ru0IzYM8m8= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0 h1:CCaeK/FqBo/fmhSSqY0K8buep/ELBDEWc8IoOjf2piM= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0/go.mod h1:vDUysl9ROGF6GAsl1OgTg6xHDnw391hCc5+IYg2U/GQ= +github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 h1:o1cMErMp45oKZ2ScvBOdVXYhvu6FdUcz0Xn+JpDd408= +github.com/aws/aws-sdk-go-v2/service/amp v1.27.3/go.mod h1:TuSBSV1IedYHHrC4A3bW84WjQXNSzc6XasgvuDRDb4E= +github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3 h1:Plmg9O8/Pt4SKvPtUfSqCfv+SSSllouzlISFcvHK4bM= +github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3/go.mod h1:aP9g/m4SSSWUU+htIGXJIY8qy+pGydwr3gpt3OcjBJE= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.3 h1:vAtlXN1IZ+2etHppbmgbPw0ADNVRXS0Dfff/mPRLC3Y= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.3/go.mod h1:jmTl7BrsxCEUl4HwtL9tCDVfmSmCwatcUQA7QXgtT34= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.3 h1:g99B1JOPkygjlDAjsD0xhvWifAs25Xw9SJ9WwC9Rn20= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.3/go.mod h1:IN1OJRdB0VVSXsx1wlEfaDPpuXwSPkAVjhj7R5iSKsU= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.31.3 h1:nQVKaNJ8VrSKJpGQgzb+HVlrd8ehMuqYXF3Em+UK3P8= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.31.3/go.mod h1:tRgqJ4QiuXQeZ0QNDF6jdr+ImyXz5J4ystLtgUxPsD8= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.9.3 h1:Vz7if7/byANRrsN9Z0VQm1ZUff5iep5uZN16F7Z2A6c= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.9.3/go.mod h1:0ner/kMBkm2TcxNEtLglUXTZ+UDXuXnqDE+9FKUxOtM= +github.com/aws/aws-sdk-go-v2/service/appflow v1.43.3 h1:zD7Md/MQTUfa83LgDmaKGHTLs3+mpl3LncfS5wUypSI= +github.com/aws/aws-sdk-go-v2/service/appflow v1.43.3/go.mod h1:2b2pJQjTVLfBIzXs9TphXy1zJyRvNp34kbBgrnz4ByI= +github.com/aws/aws-sdk-go-v2/service/appintegrations v1.27.3 h1:joEF6jGgq/6aHp4MEHidJyOfqWrwqJpjxHYfcocjNsU= +github.com/aws/aws-sdk-go-v2/service/appintegrations v1.27.3/go.mod h1:7q06vKzUfBAZZrIfii6V8KC/+PmVNzNL2opqo9ivUMk= +github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.30.4 h1:qPr8FF0Jo4AIBcxb8gFmYcOW/zlsQX4iv8WkOGDm/F8= +github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.30.4/go.mod h1:gNFF1rFmR0dVaBfehDuil+nuTqwzdJexrcvKaDY2JU8= +github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.26.3 h1:G7hP9np1L0ykj02CFQgkqdZERUmHCXdw8WmR5pW2pHM= +github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.26.3/go.mod h1:NU+zX7v6CGH1X2Lz+lg3EqDjdqOgiCe2MjtobaToi6o= +github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.2.3 h1:TzO+pIk4UFmMTrHRsrqyOO3qUBxV4EYyEOFYjN1I7aI= +github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.2.3/go.mod h1:xN0wvFa9G1ENYN0RbajUQ8VN3LMzyL3rcu2yP08cSMs= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.30.3 h1:x6wptcqKbH2eQw7v43MI25ILW3OtIyYwZ9gifEM0DW8= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.30.3/go.mod h1:buTv8bJjlKxqALyK7/2G1206H/YYllu0R/F9Hz0rhv4= +github.com/aws/aws-sdk-go-v2/service/appstream v1.36.3 h1:msS6jU0f3kTgLfUQk7JxazMbfwG5/RbsOwiwXDBO9IU= +github.com/aws/aws-sdk-go-v2/service/appstream v1.36.3/go.mod h1:zgB9SASIAI0KWFuUSlo9pGC37f6DDjh1ZJfZEhQcPhU= +github.com/aws/aws-sdk-go-v2/service/appsync v1.34.3 h1:th1DsTjU1sw61RM9rW5g5c61QP1awuWt+zGBYFSIgb0= +github.com/aws/aws-sdk-go-v2/service/appsync v1.34.3/go.mod h1:1BIEiY+76rNP8PEcv/Iyt7ybml38JqitIbrHfMDEYb8= +github.com/aws/aws-sdk-go-v2/service/athena v1.44.3 h1:T2tJUqFEs8+2944NHspI3dRFELzKH4HfPXdrrIy18WA= +github.com/aws/aws-sdk-go-v2/service/athena v1.44.3/go.mod h1:Vn+X6oPpEMNBFAlGGHHNiNc+Tk10F3dPYLbtbED7fIE= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.35.3 h1:bUG4DuAXPb0inqsuG/kugMUwsJxxc2l7Sw2+jR+lvmI= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.35.3/go.mod h1:2uO8WcgMPuckIGMQd4HpDsUFhE8G6t3MkMNnrqREnl0= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.43.3 h1:y4kBd6IXizNoJ1QnVa1kFFmonxnv6mm6z+q7z0Jkdhg= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.43.3/go.mod h1:j2WsKJ/NQS+y8JUgpv+BBzyzddNZP2SG60fB5aQBZaA= +github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.22.3 h1:DnZw/gxHCBnqOVi/ML/E3QFYVF3/lIV/j8FhyTS7JWo= +github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.22.3/go.mod h1:yda4Po8TZKCPFw7B3f4KfoRV308C+6sriJmuuTRSvlU= +github.com/aws/aws-sdk-go-v2/service/backup v1.36.3 h1:8yBWFpIBlL8uOHKFgWykiRnku2wQVQP+hF91/FKFdnc= +github.com/aws/aws-sdk-go-v2/service/backup v1.36.3/go.mod h1:HLROV+NOBQ/hGMGc72X65qRctcEIKvaf6k7PekTLw+k= +github.com/aws/aws-sdk-go-v2/service/batch v1.43.0 h1:LQDwHqwORPQC1cP8iF+gaEbw6gFNVQ88m8qa66ou8d0= +github.com/aws/aws-sdk-go-v2/service/batch v1.43.0/go.mod h1:gzEWhQvhwjniRJbCksLNPR6//8dmfRHJGJMfFcNqOdk= +github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.5.3 h1:SUgFOQbtQNPqjvN68d8esf9qHWqh45wTZ7205wOz7oo= +github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.5.3/go.mod h1:KS4Up5owaEKw+EUTveQsSf9zsaUiJCSdoxZW1M8dbuE= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.12.0 h1:Ie1I5DsX0N5cQlJw+XwK8x/nZuca9MK7V/3FjumxSNc= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.12.0/go.mod h1:KP4dFAvbA6N2iUkDj61pqd140QyfceyK69PeKPD6860= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.16.0 h1:9DpqAvqAPGhJ4bnqJX8WiDJZUDdmRlotYoh95K8NgVc= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.16.0/go.mod h1:RhcOKxIQHAqPTPIEUtEMG9eMnIRruBMY6+cmx4Mh8Dg= +github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3 h1:BfuKcgSyNTzS2N57JSM4uQ/dq1Qw8TQkoOoVvsFXoCw= +github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3/go.mod h1:QJ119U4g137qbYZRXqFxtvyARMT88athXWt9gYcRBjM= +github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3 h1:BFVoEcC9czVq0/KHdNheLtPUGjBvu133EfgIF0hO3SI= +github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3/go.mod h1:9jB/CYDhmh+LPD3iRNnu4Zj+9A3AMoBQkxPp1j8reSs= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3 h1:NY/98Ry+J3xzQXaH9uy8KXya6JiOnoXjFqGLL7aKHLw= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3/go.mod h1:AC5wH108q+kaTSjuQoKoKCH4fxGKoteUMRPb0wLYzGI= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.17.3 h1:e8mAmTy94SOhD/KdTRpocBj6+KOyxjQg7JYN1oBjT08= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.17.3/go.mod h1:Snfhyz0+wdVWPaqSLP2Bf3nziCeyP61AzEzwnxEhbWY= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.14.3 h1:GDqMlQfhiyBD3pWTY2JanoTyCmCMdWu8BejrYU1qQXs= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.14.3/go.mod h1:mRQ3DX5oSX/YETFLFjY4JNyerAE1yrumwZgYcmktrAk= +github.com/aws/aws-sdk-go-v2/service/cloud9 v1.26.3 h1:QBP3/69oA+0+j5oNHXL/V8Hj4NTEjYZaOXHPNFhbFv0= +github.com/aws/aws-sdk-go-v2/service/cloud9 v1.26.3/go.mod h1:ehJ9aR1QffkV/66jI90pJ05g2qCOIMuOLsuSkJ93cHc= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.20.3 h1:QdoWu2A7sOU7g38Uj1dH9rCvJcINiAV7B/exER1AOKo= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.20.3/go.mod h1:AOsjRDzfgBXF2xsVqwoirlk69ZzSzZIiZdxMyqTih6k= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.3 h1:mIpL+FXa+2U6oc85b/15JwJhNUU+c/LHwxM3hpQIxXQ= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.3/go.mod h1:lcQ7+K0Q9x0ozhjBwDfBkuY8qexSP/QXLgp0jj+/NZg= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.38.4 h1:I/sQ9uGOs72/483obb2SPoa9ZEsYGbel6jcTTwD/0zU= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.38.4/go.mod h1:P6ByphKl2oNQZlv4WsCaLSmRncKEcOnbitYLtJPfqZI= +github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.6.3 h1:ZHv5lcXUXHVAHZEZW3NfBqa4PcaclQPKf7AMiFJ4Oq4= +github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.6.3/go.mod h1:Lv6trdyO6NW+ReaFMDUSrEaExuO/EUGOzBYLQ5xkbd8= +github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.25.2 h1:Db/kjlMkNVzqiWpeazMWcLZGGVOIsAL4Ftpl7SC7O1M= +github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.25.2/go.mod h1:BgimFWmGZs2F5QzLQA/X9IKqhHpckuWJ2yR3/GwlOqA= +github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.24.3 h1:Dlop6K06hj/An056A77eq8MEmLmmz7TF35m403ZH2Vo= +github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.24.3/go.mod h1:vBprWws4t1YOJtHb7m4BtfFIJ64tmsN4d+9bkl82994= +github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.42.3 h1:dtFepCqT+Lm3sFxracD6PvVJAMTuIKTRd3yqBpMOomk= +github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.42.3/go.mod h1:p+4/sHQpT3kcfY2LruQuVgVFKd72yLnqJUayHhwfStY= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3 h1:VminN0bFfPQkaJ2MZOJh0d7+sVu0SKdZnO9FfyE1C18= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3/go.mod h1:SxcxnimuI5pVps173h7VcyuFadgOFFfl2aUXUCswoY0= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.37.3 h1:pnvujeesw3tP0iDLKdREjPAzxmPqC8F0bov77VN2wSk= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.37.3/go.mod h1:eJZGfJNuTmvBgiy2O5XIPlHMBi4GUYoJoKZ6U6wCVVk= +github.com/aws/aws-sdk-go-v2/service/codeartifact v1.30.3 h1:9eAjfGKFWduKyCR94Qi/JfORoJLndGydph2dcLtM7gI= +github.com/aws/aws-sdk-go-v2/service/codeartifact v1.30.3/go.mod h1:AdirH4VV5v1ik2pOOU0WdEdojBBgzTdECBrOQl0ojOc= +github.com/aws/aws-sdk-go-v2/service/codebuild v1.40.3 h1:v+CiUB5RsmyRpGQ5Tddwn3prS1Y+uCIKVAzZ0Wb3Nyk= +github.com/aws/aws-sdk-go-v2/service/codebuild v1.40.3/go.mod h1:HDiBVjDHX2n7UGFgynZLkVGPXvEnurxlEeaxPF/Ql/0= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.15.3 h1:Bz3QJAdZd1z1rxlllKMl0s5y8kjbryqeMhlX57XJ5q8= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.15.3/go.mod h1:R3dXCLxoYZu1zDODLw810Je3DOydgMUC2MZqyf8Gi9g= +github.com/aws/aws-sdk-go-v2/service/codecommit v1.24.3 h1:fqMQmtdFtZkPgCFKn4S9xp21RSCfdR3mytel6zfAzaQ= +github.com/aws/aws-sdk-go-v2/service/codecommit v1.24.3/go.mod h1:VgBrrInGfpFZyyCfVJ+EhV57+I924PItEJ4/yqT34u8= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.27.3 h1:MSA1lrc/3I1rDQtLKmCe0P3J/jgc39jmN3SZBFVfJxA= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.27.3/go.mod h1:Zqk3aokH+BfnsAfJl10gz9zWU3TC28e5rR5N/U7yYDk= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.22.3 h1:SLX2POpbXZne1+f9RmdqEhof4p5zCpJRIt/ch4R3/bU= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.22.3/go.mod h1:n/bcMFxX+woGslg9MazSiTs5FIPDXozv1F/TvjbIZeA= +github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.27.3 h1:Fx4bbX53SzG2flM5fJHFgJ3fA7WpWohTwc4Q5np0ZYU= +github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.27.3/go.mod h1:FNcnCnqWpfricoUGPTi5AoMpDp0UF4xOtS7hvdRUz38= +github.com/aws/aws-sdk-go-v2/service/codepipeline v1.30.3 h1:yoSnmI4DWImw7bFpv+9tMqcn0TtGZRLnyyvUE9j7KJw= +github.com/aws/aws-sdk-go-v2/service/codepipeline v1.30.3/go.mod h1:V/08OFKsq9jFlh0zb5WC3AvBXhPgTbMfoVrsWU0gKGg= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.27.3 h1:ZuvuMAG2sgoruSgJ/rxLOZWtK2kkyn225YphvpOvPDc= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.27.3/go.mod h1:lhFfISGURSZzi/OQYyc94YoGXu3FhMp1/3g4lANOktY= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.24.3 h1:dOpVsTQ+KP4cISpU7i+djPuNxlmRuQtrDilqbC9qhDU= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.24.3/go.mod h1:jzetUSpzLqwmfFc8YWImGPkkrgNrQHR0AeDSPZBVVNY= +github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.25.5 h1:iMKC49JNJGq0MLvdKU7DSuB5uZUg33bIfcasNZjoMh4= +github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.25.5/go.mod h1:nEqtURWmhc/EXQ1yYIoEtvCqQYgl5yYKxdQU8taJnv0= +github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.41.4 h1:jkvdmVYoVWVrAIjgt9aiR9e7GRK2DnxrMnvKjA5EJd0= +github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.41.4/go.mod h1:aynIysFCBIq18wfN2GrIYAeofOnQKV3LtkjyrQKfaFY= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.33.3 h1:3ZaUAjyN1VEdvH8xVTu87GLDpzp/BDTb5WjqpHU8po8= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.33.3/go.mod h1:IKMf00PVvTyj1E/ey0MGDuI58VHdRiiMtAf/2+c74EE= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.37.3 h1:0T+EzT9/cWUDqMmZ1Hvg7l7ZOso3satQ2T9trD8T6Ro= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.37.3/go.mod h1:Du8rTxK7DvQDcYWZnAH2kJfCxvIwNfKcdb/1MJJzmn4= +github.com/aws/aws-sdk-go-v2/service/configservice v1.48.3 h1:Ir1tfXyCY3XE/ENEb0mRUBn6VoWb1w9SDKYFwO+otJI= +github.com/aws/aws-sdk-go-v2/service/configservice v1.48.3/go.mod h1:Z4sA07QNZ7IWEix3oW3QeiIe21jaCTTOW8ftLgeWI3s= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.19.3 h1:6UpDqIDPvl6j+OpjjMfAWRyAKfNvZdRp6e88/gKubis= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.19.3/go.mod h1:/ni69CCzOeSFB/ySFHzJnWh6MQDJe/rNkvKW8+jfe9k= +github.com/aws/aws-sdk-go-v2/service/controltower v1.16.3 h1:uivw03qvOgsT9OHDdL7FQQ9rjnL4DoML867QemUTaOI= +github.com/aws/aws-sdk-go-v2/service/controltower v1.16.3/go.mod h1:tOyU8KwO9JqZlUXjpX3eXnf0r9iKkK/6sqlaNloJ5IQ= +github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.26.3 h1:t+h4OYWHsU9pQ6W7cDHso8TbM0fDfTjO7IPRsAl7CfY= +github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.26.3/go.mod h1:mclb7wohFWSVV4EgpRd12YtX5SLAX++hKSWco/GqH8o= +github.com/aws/aws-sdk-go-v2/service/costexplorer v1.40.3 h1:wzusAKyjqSBbOjulrFF2caN+D4ylnI14cTT8xTKm7Sw= +github.com/aws/aws-sdk-go-v2/service/costexplorer v1.40.3/go.mod h1:qgL8c9hUSWedmBiyydYvQgmzKv04NJpgHgblzWMtDOg= +github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3 h1:+AiQwAYmhOXn0m+6B42XBR9UkDhSno0QjQl5XHCPg4k= +github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3/go.mod h1:Om/t/NhLjZu7rYMYBI1rWyGqEUfqSn/vk/k1/7pLEC8= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3 h1:Aq+7pnVWk59dS2BMVSOEDWN0yProaw0XhaUsRGbH7MM= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3/go.mod h1:4duVgMu+RBKpiU+Hz4FjPedMLWNFVL4lhauBVYz8OZ4= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3 h1:HzdVJzMjEhQhLjUB1xGRMhs4zjaemPLUbdhhA4wfnMI= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3/go.mod h1:2SvlhcMgqPNNVr53/0m91cxPTY6mUFvp6o+Kzi63zUM= +github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3 h1:tFFs24+oIWlHLbTyluhnQIHaj8o4nc8yXHNnAc8PTN8= +github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3/go.mod h1:WP7xXB608MyVv3yFzduKlLeYmU0AxMo7zeF9Cuwbvwc= +github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3 h1:GndlSdjdgcW1r+mGL635+6ZlwXgdu/663aHHyBJ6Jtk= +github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3/go.mod h1:xUxKkSfH4sCQixoxh3pYc7C4N+OH2POgS0dhkOzR+u8= +github.com/aws/aws-sdk-go-v2/service/datapipeline v1.23.3 h1:kA26fZh30b6kOZZIkxr/1M4f4TnIsXBw3RcHEFuFxcs= +github.com/aws/aws-sdk-go-v2/service/datapipeline v1.23.3/go.mod h1:9Z4AiKwAlu2eXOPFEDfkLV/wTpI9o2FX09M4l6E4VE4= +github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3 h1:ZrKMl8jsL5YHurOLf0YVLb7JBYxGtqQQAknJ5g4MTz4= +github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3/go.mod h1:+ObRlRcKO/p38yJSkpVZKlCU3t9PqXMORXC+xTkb9NU= +github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2 h1:9l6JiWZz/2Sp3ne9E/AXECwnzi7NASQUJnQ7xts/8oA= +github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2/go.mod h1:li7vb6Ip/zyT59298XmAhs+dtXR2GqHXQlIdgL3QycE= +github.com/aws/aws-sdk-go-v2/service/dax v1.21.3 h1:uGHbOU0lBxntNZ/+Y2HbVo//AVFdl/BpMz7viHf/r8M= +github.com/aws/aws-sdk-go-v2/service/dax v1.21.3/go.mod h1:FNgKx9JXy9L0bThUl86EMV9gwUgqf2eexpitcne/AXc= +github.com/aws/aws-sdk-go-v2/service/detective v1.29.3 h1:HimZr2FJaLzxinq9QypFY2gGM+40pMWPwxB+ZNTkfNI= +github.com/aws/aws-sdk-go-v2/service/detective v1.29.3/go.mod h1:fiEtdUerGX5RHS/upeHldpHKikvfQz1MJCgquNFQeDo= +github.com/aws/aws-sdk-go-v2/service/devicefarm v1.25.2 h1:DSv0r8nKo8+ix2h5Rz/Zl62kkJPRxXIEQzmRI3CQVpY= +github.com/aws/aws-sdk-go-v2/service/devicefarm v1.25.2/go.mod h1:7Ev/BlW5/zbURomHu/2Ay8l/HAgoQAbaSP2XlMUED9I= +github.com/aws/aws-sdk-go-v2/service/devopsguru v1.32.3 h1:dVk+ogfz83rhZLaWSwSbgTQnxno+DIhZ3Q3KFdxTVmA= +github.com/aws/aws-sdk-go-v2/service/devopsguru v1.32.3/go.mod h1:Rbgi0LKyAIyWHlqVtgU5wy39omdfHHvlGjrl+Vg41us= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.27.3 h1:Ua8NLsRNDm/HSotawG9MjeUEdo88uuTsEJ+EQB99G7c= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.27.3/go.mod h1:DeGGGnrVVVNQlfMpAqmIiEndGTlDVbUIzNI4MbyyH68= +github.com/aws/aws-sdk-go-v2/service/dlm v1.26.3 h1:LAZoBLsYn4eSTzJlfIu+v/+EHzqLqkPlIIc+y36HgEA= +github.com/aws/aws-sdk-go-v2/service/dlm v1.26.3/go.mod h1:Sy6z2qbpj3pxXtwi0H5nR8WG1AMj2M2Gv6qPw2ChFYM= +github.com/aws/aws-sdk-go-v2/service/docdb v1.36.3 h1:6LabOycU59L+JfgCavDzfK1lheqj0wt/Fbta5OpeiUI= +github.com/aws/aws-sdk-go-v2/service/docdb v1.36.3/go.mod h1:cA+GYSfYfLSczv09u72Ger5kQ6JR5UHW3YmHD8c66tA= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.11.3 h1:1DLJ+BTpBLXMuWJPHPoemYYcBJS4GBpXg2VYZx29I4A= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.11.3/go.mod h1:wZl6Satx8GY99iRC/wA5nsPOCPOnnaizt/kb1t6hSRk= +github.com/aws/aws-sdk-go-v2/service/drs v1.28.3 h1:ss4Ib/kWbYA4pveQtSOluDE/Kf0e0jQ9SPwltAmRxKY= +github.com/aws/aws-sdk-go-v2/service/drs v1.28.3/go.mod h1:tjzPl3EOCkojHm9Q4y+Kuq7GGSJJw/P0UIqc4eHvtFI= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 h1:nEhZKd1JQ4EB1tekcqW1oIVpDC1ZFrjrp/cLC5MXjFQ= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3/go.mod h1:q9vzW3Xr1KEXa8n4waHiFt1PrppNDlMymlYP+xpsFbY= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0 h1:zPwhEYn3Y83mnnr9QG+i6NTiAbVbcJe6RpCSJKHIQNE= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0/go.mod h1:9KdiRVKTZyPRTlbX3i41FxTV+5OatZ7xOJCN4lleX7g= +github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3 h1:+v2hv29pWaVDASIScHuUhDC93nqJGVlGf6cujrJMHZE= +github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3/go.mod h1:RhaP7Wil0+uuuhiE4FzOOEFZwkmFAk1ZflXzK+O3ptU= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3 h1:n2eqzO9VabUkd77b88Hos6OEtbGohB/TRrtXLTZi38Y= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3/go.mod h1:Oy3yHBGkKtTmsn6iJGEZxytzZQrEvoFRWldB4XmzlO4= +github.com/aws/aws-sdk-go-v2/service/ecs v1.44.3 h1:JkVDQ9mfUSwMOGWIEmyB74mIznjKnHykJSq3uwusBBs= +github.com/aws/aws-sdk-go-v2/service/ecs v1.44.3/go.mod h1:MsQWy/90Xwn3cy5u+eiiXqC521xIm21wOODIweLo4hs= +github.com/aws/aws-sdk-go-v2/service/efs v1.31.3 h1:vHNTbv0pFB/E19MokZcWAxZIggWgcLlcixNePBe6iZc= +github.com/aws/aws-sdk-go-v2/service/efs v1.31.3/go.mod h1:P1X7sDHKpqZCLac7bRsFF/EN2REOgmeKStQTa14FpEA= +github.com/aws/aws-sdk-go-v2/service/eks v1.46.2 h1:byyz/tBy/uGyucr/QLE1UmTuGaJx9ge19aWUZCiOMCc= +github.com/aws/aws-sdk-go-v2/service/eks v1.46.2/go.mod h1:awleuSoavuUt32hemzWdSrI47zq7slFtIj8St07EXpE= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.40.3 h1:nmEN5lGIAShc0nNFjvUk2/YYlsTSwX2n1XF37Av93Yw= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.40.3/go.mod h1:OcUtpbcNsyMdA/Wv5XenKl8aG3yrqA6HVIOF7ms+Ikc= +github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.26.2 h1:OA2kqnEcSqpnznO4hb4MKDXxeCRuEkADGgnihLwvn4E= +github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.26.2/go.mod h1:N/YWNrjILpIoai7cZ4Uq2KCNvBPf25Y+vIhbm9QpwDc= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.26.3 h1:5B2Dq2zy/hgtEO3wITnOZiyh6e+GyuHTGw6bK/8+L3w= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.26.3/go.mod h1:mgU2kG+D5ybtfGhEuZRW8usYOGrNSgsimRt/hOSI65s= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3 h1:yiBmRRlVwehTN2TF0wbUkM7BluYFOLZU/U2SeQHE+q8= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3/go.mod h1:L5bVuO4PeXuDuMYZfL3IW69E6mz6PDCYpp6IKDlcLMA= +github.com/aws/aws-sdk-go-v2/service/emr v1.42.2 h1:j3aHjEsxFGCNGOCJjJM6AtPhdvn1pw2i2hGqxLU0qeI= +github.com/aws/aws-sdk-go-v2/service/emr v1.42.2/go.mod h1:rN91rXF7gucnSnArDWbv9xDdZjBEetO4LFoJgGK/Wqw= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3 h1:zxpxkpY1h+kPWquiUSG8u2CJ3AtEJPqqBqiMKxLwPjI= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3/go.mod h1:9+NjcAre2lLrpGvCrb9V+TUDii5D+Z8xER/vCPZdZFg= +github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 h1:pjZzcXU25gsD2WmlmlayEsyXIWMVOK3//x4BXvK9c0U= +github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3/go.mod h1:4ew4HelByABYyBE+8iU8Rzrp5PdBic5yd9nFMhbnwE8= +github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 h1:bAuNjv1PmyZvjojnXlozw68T2X2eq1xhjteyU6qGDQU= +github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3/go.mod h1:EtC1+tObvVB/l/c9Dh6IILA/r/cu9Pc17S870zRihq4= +github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3 h1:Y8VS/XHyeJ1cxSCtmvUOFLqfNIl9rASWOE/gsrydGFw= +github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3/go.mod h1:xbE7o+ADq+h0DeKA/05618ox75wY/jtoZTF9XuvSvnI= +github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3 h1:BMYs3DZYSIaIDhkPSsAUeobQ7Z0ipNRJSiFTP2C4RWE= +github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3/go.mod h1:8rN4JsVXcCHl/f4hwOWVuy+iQ5iolXOdSX+QFYZyubw= +github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 h1:NwddG0xUTBM2zoq4D8rotQmT2Z/S8IGM+D2wYzKFSQs= +github.com/aws/aws-sdk-go-v2/service/fis v1.26.3/go.mod h1:QmdVf0N/vrhckZLHK4x+f+u9EUuMhetsRgu1rjU1eL0= +github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 h1:QeYAz3JhpkTxkS+fifDBfmgWFdSRBI21MQzN2bCO1xo= +github.com/aws/aws-sdk-go-v2/service/fms v1.35.3/go.mod h1:GXASgVouW5X/bmEgOoV/tkzJkp5ib7ZeA+YxMc5piqs= +github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3 h1:de8RU808VMx8km6t2wY3WDWigB6GqbNEcyVQRJFaIYs= +github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3/go.mod h1:F/qjepwnxPHHUTK9ikZp14jLyrvB18kZ/22MmaPxtHE= +github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.27.0 h1:nlm6tZX8gwsVktDKTQe3IOagNVK1+6CGf9IpdWM6x+E= +github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.27.0/go.mod h1:ehkx8lBkJkZbdKYX2GyMFmdOAfcGs0mjcbaNXNiHAEE= +github.com/aws/aws-sdk-go-v2/service/grafana v1.24.3 h1:riHLAJSqo5zczCyMSo8XDA46X2aDpQvB46F0seKuNEM= +github.com/aws/aws-sdk-go-v2/service/grafana v1.24.3/go.mod h1:2ipW9QX9MlePs99Dy8ohwfdW847hMJG6BU9jvixIpxE= +github.com/aws/aws-sdk-go-v2/service/greengrass v1.25.3 h1:5KauP/IHPWGoHni4mt2Sjp0EtHMkdWtPP3v81qaHHyg= +github.com/aws/aws-sdk-go-v2/service/greengrass v1.25.3/go.mod h1:Cw18f8jWmb5IQlxd48bIDSXOPfKf5am3Zr9GnOyCcTw= +github.com/aws/aws-sdk-go-v2/service/groundstation v1.29.3 h1:qo3UtqkypEXmUSOGepFqFt1bbEi1EAsJcHm6I3WQtOk= +github.com/aws/aws-sdk-go-v2/service/groundstation v1.29.3/go.mod h1:upTLlgFk3Yw83uo6jNxlFD2EdU/iwZc+FM1OG+Zhikw= +github.com/aws/aws-sdk-go-v2/service/guardduty v1.45.3 h1:V7+xcerreGBsoLqraRPAJRCaFiN/04kP85mMeQjgRO4= +github.com/aws/aws-sdk-go-v2/service/guardduty v1.45.3/go.mod h1:zjxzcOjdQYMgh90Xm5XRVbeQD7bSeD7XaPB77CNq1C8= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.26.3 h1:hIlZp+8MV4c5dWOelj4ygDv8w/uyuKURga1FHT8MI44= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.26.3/go.mod h1:n7B4cOb7+4pzcO0F7KVnUgnS9Z5dKQHxQrCR7D/bZyE= +github.com/aws/aws-sdk-go-v2/service/iam v1.34.3 h1:p4L/tixJ3JUIxCteMGT6oMlqCbEv/EzSZoVwdiib8sU= +github.com/aws/aws-sdk-go-v2/service/iam v1.34.3/go.mod h1:rfOWxxwdecWvSC9C2/8K/foW3Blf+aKnIIPP9kQ2DPE= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.25.3 h1:eiL4q6pEzvazErz3gBOoP9hDm3Ul8pV69Qn7BrPARrU= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.25.3/go.mod h1:oNDSqrUg2dofbodrdr9fBzJ6dX8Lkh/2xN7LXXdvr5A= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.28.3 h1:dscyhNwL1v6pYPCflnp8/jBMeCC5y5Vn8npXmM/EE78= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.28.3/go.mod h1:EI8IxOq2F4KHZQQEB4rmQPXmYILE2avtX6wOiR8A5XQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 h1:lhAX5f7KpgwyieXjbDnRTjPEUI0l3emSRyxXj1PXP8w= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16/go.mod h1:AblAlCwvi7Q/SFowvckgN+8M3uFPlopSYeLlbNDArhA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.16.3 h1:3dIg2t4akBnpmzXJO20z/JxqS7AQfuR7+WZKQRpdpmM= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.16.3/go.mod h1:kGhxggatnXh1Kog+ppPQwEHVdaJiuGuEYg1DbdSXPwU= +github.com/aws/aws-sdk-go-v2/service/iot v1.55.3 h1:di+va5f5fLC32K+0eDQa2AWQujjLgdeTXakUQXtsS68= +github.com/aws/aws-sdk-go-v2/service/iot v1.55.3/go.mod h1:2blUX4qcMUQIyWY6nfu8R0kMORCNH0oLRZU1EOj2+mk= +github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 h1:SEt8SRvlGvnOkqDV5PJ9eFvwz03H9A67Co/QPPdic5Y= +github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3/go.mod h1:XDi19IK0UluaSVnm1mu2AakZKHtWjg6gksitvH7+LQw= +github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 h1:9Lao6kmD9P+yywuIn9I8hrraJ2jHIztU/GJspIxn6lA= +github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3/go.mod h1:V2BDVrnP+Tn+MM1xxFI7Qcb+YPhiGgY5PUoKzrKHaCQ= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 h1:d7y5Gs9BfO+1Jhj8y1/lZhegiJXXy/DlanzwRgYrkXM= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3/go.mod h1:rtw6VOH+4X/TWoOKQlOC+oq/WBDJD4BqaPi930II6Mk= +github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 h1:MUx27PrqicGxgsiDWo7xv/Zsl4b0X8kHCRvMpX7XrQs= +github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3/go.mod h1:mBWO7tOHjEvfZ88cUBhCfViO9vclCumFcTeiR1cB4IA= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 h1:jJyh5SN/b78UZjIsVqM8/N5GQsD12sEvM2g5bVsFVhg= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3/go.mod h1:XuvDeFgRl8LZ0tPHImZYbq/71qXlXEh4a3UBvTOmKZw= +github.com/aws/aws-sdk-go-v2/service/kendra v1.52.3 h1:SgSKyym+vQfUvEOyuLR9uPJ8o63pBIMI06xWLGZ75s0= +github.com/aws/aws-sdk-go-v2/service/kendra v1.52.3/go.mod h1:I7nz57YLvHw0sd5TjLRyAc8Ea7Qic6Emk+V+TwleBYY= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.12.3 h1:25HN/tJRRf0rwPzDpNyTALuk3Yrd9wBEXR+WMZIMA38= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.12.3/go.mod h1:/sTpi3FG4DsTSTabyXfKXypVEjCuNU/8jxTCQLWYRZQ= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 h1:ktR7RUdUQ8m9rkgCPRsS7iTJgFp9MXEX0nltrT8bxY4= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3/go.mod h1:hufTMUGSlcBLGgs6leSPbDfY1sM3mrO2qjtVkPMTDhE= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 h1:UPTdlTOwWUX49fVi7cymEN6hDqCwe3LNv1vi7TXUutk= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.3/go.mod h1:gjDP16zn+WWalyaUqwCCioQ8gU8lzttCCc9jYsiQI/8= +github.com/aws/aws-sdk-go-v2/service/lakeformation v1.35.3 h1:Rfl7JjXVdriUprd8TTlbgcTyPU/Pl+v/O/nMD9HYpgA= +github.com/aws/aws-sdk-go-v2/service/lakeformation v1.35.3/go.mod h1:cyogDr92z2UF8fBoRN/+/gKuVTrxBD10bo6PVn3tDeQ= +github.com/aws/aws-sdk-go-v2/service/lambda v1.56.3 h1:r/y4nQOln25cbjrD8Wmzhhvnvr2ObPjgcPvPdoU9yHs= +github.com/aws/aws-sdk-go-v2/service/lambda v1.56.3/go.mod h1:/4Vaddp+wJc1AA8ViAqwWKAcYykPV+ZplhmLQuq3RbQ= +github.com/aws/aws-sdk-go-v2/service/launchwizard v1.6.3 h1:HlZn+zJoCEFuUvKLGbGXVIwXp3XA1xvLf/udp7ABDvk= +github.com/aws/aws-sdk-go-v2/service/launchwizard v1.6.3/go.mod h1:IJIHGsE1X4tRCw3s+SMG0NlIQM4yM7rlj5CfUDqT/+M= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.45.3 h1:sZwjTNfuXXk3Fyor/bEpjcznTD1+f6OEYxONrAU2sAc= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.45.3/go.mod h1:GE1lDQwM3Dm7Fysaet+yeNanYwwTvfLIUlK3P/owUw8= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.40.3 h1:dy4sbyGy7BS4c0KaPZwg1P5ZP+lW+auTVcPiwrmbn8M= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.40.3/go.mod h1:EMgqMhof+RuaYvQavxKC0ZWvP7yB4B4NJhP+dbm13u0= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.29.3 h1:UJEd/gP0jzWDfr4f/3TPKSls8MuomApfPap1CS/PxMY= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.29.3/go.mod h1:o21usaj4iSiu94CTwmLKS94wMM4+AJY/HNR5vWmcEC0= +github.com/aws/aws-sdk-go-v2/service/m2 v1.15.3 h1:2rO4AxOqwtWar9xx051FKeDDXu8njV0DZt+tdlfy8y4= +github.com/aws/aws-sdk-go-v2/service/m2 v1.15.3/go.mod h1:OKkohde5gLaVJ2MWJkBxU0DXBggmMDdEQ6dSxeKdDcU= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0 h1:Y79CoATONI7M7deTCC5RX/84rK5n/oK1s8HWk7LMV+4= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0/go.mod h1:6cpEF3W3oCNX9shBj9N3lrehYdxLuzDbYZdhOiaoN94= +github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3 h1:1ls4o+377rEfTuZ4YaqDrSo75qpC1ySv8m2FfVk23tw= +github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3/go.mod h1:JAiHALb6LfTclPNBdUUTL8xmDZcwBCTbSVgJEkgiIv4= +github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3 h1:9aDpYGrfgFjfvzOdAfMcEdGbWa3l/1RjGtOr4On9Kd4= +github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3/go.mod h1:49kVyWdlOWpusFyzDrmxCG9PqXlKtpKmHYoTv5h1O5k= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3 h1:fBtklFkqk6QhJBzSBgNJiwWySt1RvspmvCvY+giXgdI= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3/go.mod h1:BejXbLdRRWr6uMl4wZrz3iAcJDVgJu3EEstqDq8wxEE= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3 h1:ytQ77lC/wrYatbiLSZlYSpgjzvtgXBey0xxRsBA4swY= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3/go.mod h1:+R07/s3U8lJzEZDiwFxv/jmlSNbQjnoSqKaZEoqWt5Y= +github.com/aws/aws-sdk-go-v2/service/mediastore v1.22.3 h1:WBVRvc0iIJdbdCkBjWRMVtUOMmAvOyN70x1KrBTOFm0= +github.com/aws/aws-sdk-go-v2/service/mediastore v1.22.3/go.mod h1:plJWP1InGjEZiJvXfTlBqTBeMW8ddEZeIdYYFTYZMyE= +github.com/aws/aws-sdk-go-v2/service/mq v1.25.3 h1:SyRcb9GRPcoNKCuLnpj1qGIr/8stnVIf4DsuRhXIzEA= +github.com/aws/aws-sdk-go-v2/service/mq v1.25.3/go.mod h1:Xu8nT/Yj64z5Gj1ebVB3drPEIBsPNDoFhx2xZDrdGlc= +github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4 h1:lptYTP7Br5zll9USf2aKY1ZlN69vYAlZOSCv1Q+k1S4= +github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4/go.mod h1:mtgvj3nNI+LiRNT07JaHbTh6E/y8QRrClvd+/GMhMS4= +github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3 h1:XEbvRhi+ELazJaqh8k0KgTZrAgXM3rmR0hsGPTIpUIo= +github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3/go.mod h1:tfCOS8E/SwIkqHHGgpwRZTly3ZQxcsORZPEVBKMkbx4= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3 h1:hb3i/o9ouQj6RZjykyGI1koOfp22/ZMuWpuPfeu+zNE= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3/go.mod h1:Y9mINPJv+o9q8Ztr5/PRh2C1Iynik64IhPzwe2ERGqQ= +github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3 h1:I+m+rITTdVA9BNJeuCzYgMQjqbUE10xcY0OqgBvFEFE= +github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3/go.mod h1:R+4X5haYg3eRWYb99y+m1UhlVjFrHNlcfl3WES5e1oQ= +github.com/aws/aws-sdk-go-v2/service/oam v1.13.3 h1:KCbGN36Q/qQ27mv+/4BSax0q6/KSAxh3K3R+gRhNHwg= +github.com/aws/aws-sdk-go-v2/service/oam v1.13.3/go.mod h1:T/GYfs9EvCp1ke+82YQJZTTP0FlRETQnny3uPl1YTlY= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.13.3 h1:xRRPnilDJCDohQ+J1dUH4UvzL6P+KPQ0NwO7cs0odfc= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.13.3/go.mod h1:J9Ybe5zLnJG/PsLrdI80ihIW1MYSHMlQyVtdc1X9irQ= +github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2 h1:+tGF0JH2u4HwneqNFAKFHqENwfpBweKj67+LbwTKpqE= +github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2/go.mod h1:6wxO8s5wMumyNRsOgOgcIvqvF8rIf8Cj7Khhn/bFI0c= +github.com/aws/aws-sdk-go-v2/service/osis v1.12.3 h1:T9+bvsT2me+zQx7rUUTgalP7u5lOruoZoH8Xnp1gSPI= +github.com/aws/aws-sdk-go-v2/service/osis v1.12.3/go.mod h1:582tNTtG2bLnDxD5ceguyDlc7hAqtHYY29xHcux37Lo= +github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.12.3 h1:9oQMCF4oLvWSCDTiiAVEwPs4Sl/iBsC/17qvIa2sYjU= +github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.12.3/go.mod h1:NNyvgUO7XweCVxGTSnllS6XdsD/9Il6Kc63D/stKgiM= +github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.7.3 h1:xKVSPlN0K1r9VBe6MaKHgUi3EvJotLE9s4etstJq0jw= +github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.7.3/go.mod h1:4Lk91jzPQQKOzml7LHOR/zAE5FF4+mL0CPrArI8vnCY= +github.com/aws/aws-sdk-go-v2/service/pipes v1.14.3 h1:fYZlFa1OvrgaFODrdf0KVDp4qCRHMZNr8S/F3aGNuno= +github.com/aws/aws-sdk-go-v2/service/pipes v1.14.3/go.mod h1:S0g2KF8IpU6Ptn46eSywrS+w1PMUwrf/xWF8szcTZ2Q= +github.com/aws/aws-sdk-go-v2/service/polly v1.42.3 h1:MuoVKFJr/TUimLdT6nvio+OehAPM7kILgNLF3rYcaP0= +github.com/aws/aws-sdk-go-v2/service/polly v1.42.3/go.mod h1:PQlzSg4fsvxUgyXl0VIORU06zIQV2Y1Jd5YkDrP46FI= +github.com/aws/aws-sdk-go-v2/service/pricing v1.30.3 h1:CO5rn/wveWDphdllj+E6fdfX26XhmBj6zbntQbwajzE= +github.com/aws/aws-sdk-go-v2/service/pricing v1.30.3/go.mod h1:JnnBNRgok4OQBoHCzpS37BgWNQkbY73q97HZMCDgvho= +github.com/aws/aws-sdk-go-v2/service/qbusiness v1.10.2 h1:ZEVUuXUj5FERUTzzACAFJ8p/0q3AWTkvnbOOp9nVIXA= +github.com/aws/aws-sdk-go-v2/service/qbusiness v1.10.2/go.mod h1:+O5t/RLHL/ureGkytxCumU3VQjAaKOQ4PU89+aZC9ow= +github.com/aws/aws-sdk-go-v2/service/qldb v1.23.3 h1:qrU3Xiv20E8yPTJq7ZDTjVOBuYVbEE9NsucXKP57YiE= +github.com/aws/aws-sdk-go-v2/service/qldb v1.23.3/go.mod h1:ZePPGflmFHyvUediLcKpc4I9ZaIARm/OgAvtayU7sD0= +github.com/aws/aws-sdk-go-v2/service/ram v1.27.3 h1:MoQ0up3IiE2fl0+qySx3Lb0swK6G6ESQ4S3w3WfJZ48= +github.com/aws/aws-sdk-go-v2/service/ram v1.27.3/go.mod h1:XymSCzlSx2QjdvU/KdV/+niPQBZRC1A8luPDFz3pjyg= +github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3 h1:5Izo7ZI9zrvG9VLpJdnDl97gNyCFr310RtriuKIJgFk= +github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3/go.mod h1:GlAG1tgrchQnNlO/fxXLmmF6t+v+9fQMNHNdW7Zc8Zc= +github.com/aws/aws-sdk-go-v2/service/rds v1.81.4 h1:tBtjOMKyEWLvsO6HaX6A+0A0V1gKcU2aSZKQXw6MSCM= +github.com/aws/aws-sdk-go-v2/service/rds v1.81.4/go.mod h1:j27FNXhbbHXC3ExFsJkoxq2Y+4dQypf8KFX1IkgwVvM= +github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4 h1:wNBruTRRDfBv2Pz3Mvw6JIJS7ujfTd1ztCG5pIlrfRk= +github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4/go.mod h1:AhuwOvTE4nMwWfJQNZ2khZGV9yXexB2MjNYtCuLQA4s= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3 h1:rtX1ZHGPpqbQGZlPuN1u7nA+0zjq0DB7QTVNlYY/gfw= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3/go.mod h1:8Ah7aUFE9G0dppkn6ZXn1iExeHUV4369IJ2GRi7++Y0= +github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.20.3 h1:dZTe+TGD6B15Qhhugp4MUOCLPzaODOxc5qc6K5/yZDA= +github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.20.3/go.mod h1:oJRMDbpdkGsrRiSmJUumhj4KuXdP4QN9A5AK1rE0xps= +github.com/aws/aws-sdk-go-v2/service/rekognition v1.43.2 h1:nrR1xZ6QoW7lUvFmLHOwTK2n25nnuPhP2f++C3DlPRc= +github.com/aws/aws-sdk-go-v2/service/rekognition v1.43.2/go.mod h1:UkvOY/p1SKtJgzvwmlPnrFWOP2kj6efrbcbQHFy9qvM= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.12.3 h1:GEkqXpMrNF6UpC8edjE66HZgVpqppvxxMRhHcBbyQiU= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.12.3/go.mod h1:PQCEcRWQIPD+uqrqSaLJDfveDYqHTPaimym1+5WtvMU= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.24.3 h1:lxYeMxHTz8TculPM7bxM4uZxJpAH394xY215ub595H8= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.24.3/go.mod h1:wyzvCa9oNmh3Ejs0kM63IR7lq9Vie9bcg2YIg+p9alY= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.23.3 h1:ByynKMsGZGmpUpnQ99y+lS7VxZrNt3mdagCnHd011Kk= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.23.3/go.mod h1:ZR4h87npHPuVQ2SEeoWMe+CO/HcS9g2iYMLnT5HawW8= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.13.3 h1:3lqKckUrVhC86nI5d/7suyv4sBhUJgACHfbs8qTj6+g= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.13.3/go.mod h1:7IIMPfX6TzfxRIJIp1NLYWFkApDOMnlb5XrynzpxMkA= +github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3 h1:MmLCRqP4U4Cw9gJ4bNrCG0mWqEtBlmAVleyelcHARMU= +github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3/go.mod h1:AMPjK2YnRh0YgOID3PqhJA1BRNfXDfGOnSsKHtAe8yA= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.25.3 h1:VGLIgiClxmwxBpGzHERgNgwJMukHZpLcQZqJuQYjAiM= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.25.3/go.mod h1:Kgq5O7ZaDk0mTZmX6YCL+ZtZ1YcJHtGsVubp0OT77MA= +github.com/aws/aws-sdk-go-v2/service/route53profiles v1.2.3 h1:N4f5sliNiWcp3abC+8YpcaVjXuaNJIlz/dBd+saimm0= +github.com/aws/aws-sdk-go-v2/service/route53profiles v1.2.3/go.mod h1:r2B4BvTn3zSMK+BFHGl0q63B/nJMOk9/NukLZzqO8sY= +github.com/aws/aws-sdk-go-v2/service/rum v1.19.3 h1:DR+GYJRPL7eEZknnGdwm+lH686LmUBB/X2YVQDHLNY4= +github.com/aws/aws-sdk-go-v2/service/rum v1.19.3/go.mod h1:5jFxbuc05P/+BbJvVbBspMbzDR2IFU0LegQG3iUvj8g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= +github.com/aws/aws-sdk-go-v2/service/s3control v1.46.3 h1:3De8/YQpup0mLNKh0G9JHWJLEkWNdghd5z84vw4v+yw= +github.com/aws/aws-sdk-go-v2/service/s3control v1.46.3/go.mod h1:sUA7DOI2fdRHQQUpvRVfYKTo9P0+UAsWYBHvyqFHcC0= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3 h1:gmpU7E0ntMzXr+yQQIXbiiueOewf/1BQ9WgeaXo6BcQ= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3/go.mod h1:jnQp5kPPvEgPmVPm0h/XZPmlx7DQ0pqUiISRO4s6U3s= +github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3 h1:ZJW2OQNpkR8P7URtISmF8twpvz2V0tUN/OgMenlxkao= +github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3/go.mod h1:QcRvTKZ9cBv6TlZECUStXI1z1qlCMWKpPi/ZefknVpQ= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3 h1:ilavrucVBQHYnMjD2KmZQDCU1fuluQb0l9zRigGNVEc= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3/go.mod h1:TKKN7IQoM7uTnyuFm9bm9cw5P//ZYTl4m3htBWQ1G/c= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3 h1:tFzkGJZKDWgwGDSQXwxZK7Bm3NzlKOW6KwNr14xXZqc= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3/go.mod h1:MfWlz2hEZ2O0XdyBBJNtF6qUZwpHtvc892BU7gludBw= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3 h1:7isk2tSNmVbm2f8epPfokkHjjWfwS46IpNNmI+rarUo= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3/go.mod h1:X5rHkguK4jCvFOM74tkme3oLUOaR++APKgwhNcIdOW0= +github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.22.3 h1:E4NzUkgPrKmlbC9OxVUEQnTdPRg3MTTiDwmq5dJfH9U= +github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.22.3/go.mod h1:/nzQOH+tOGrQVv5QbVN+88HoNYc15s8aKsJmOT9MPJI= +github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.28.3 h1:l19QC3al5lqQydnJRz1cpduAoL0YoEeSxI5Wb5NUEis= +github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.28.3/go.mod h1:0Em81iN4ZnER1M0XDirgcbsZK3jNghA0YlY2Xw2BDOQ= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.3 h1:EthA93BNgTnk36FoI9DCKtv4S0m63WzdGDYlBp/CvHQ= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.3/go.mod h1:4xh/h0pevPhBkA4b2iYosZaqrThccxFREQxiGuZpJlc= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.23.3 h1:J6R7Mo3nDY9BmmG4V9EpQa70A0XOoCuWPYTpsmouM48= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.23.3/go.mod h1:be52Ycqv581QoIOZzHfZFWlJLcGAI2M/ItUSlx7lLp0= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.32.3 h1:DLJCsgYZoNIIIFnWd3MXyg9ehgnlihOKDEvOAkzGRMc= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.32.3/go.mod h1:klyMXN+cNAndrESWMyT7LA8Ll0I6Nc03jxfSkeuU/Xg= +github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3 h1:7BK+k08c5r1oqqHeb6ye0affEQQJ/fimBTGZSjmpjwk= +github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3/go.mod h1:+mtHHxsylrf+kjxcbvfnu6jtyTT8Fa9BlqjQk5XJZ80= +github.com/aws/aws-sdk-go-v2/service/shield v1.27.3 h1:SfjI6FuphzspGPvcRD8hjMD6wLUAE6vtJLGrui19j2s= +github.com/aws/aws-sdk-go-v2/service/shield v1.27.3/go.mod h1:JpxjPa91y1hRb3G8xxzhOQFcK/r90it41jA/hD0q+Gg= +github.com/aws/aws-sdk-go-v2/service/signer v1.24.3 h1:vN91JPGjBc5imkkpIqVWolvFxZygpDlRUovx221Wid8= +github.com/aws/aws-sdk-go-v2/service/signer v1.24.3/go.mod h1:1/6iDWLI/6V+I8n9ZnUd5m7zkPWQVituijVZs0jRdGU= +github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 h1:eSTEdxkfle2G98FE+Xl3db/XAXXVTJPNQo9K/Ar8oAI= +github.com/aws/aws-sdk-go-v2/service/sns v1.31.3/go.mod h1:1dn0delSO3J69THuty5iwP0US2Glt0mx2qBBlI13pvw= +github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 h1:Vjqy5BZCOIsn4Pj8xzyqgGmsSqzz7y/WXbN3RgOoVrc= +github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3/go.mod h1:L0enV3GCRd5iG9B64W35C4/hwsCB00Ib+DKVGTadKHI= +github.com/aws/aws-sdk-go-v2/service/ssm v1.52.3 h1:iu53lwRKbZOGCVUH09g3J0xU8A+bAGVo09VR9K4d0Yg= +github.com/aws/aws-sdk-go-v2/service/ssm v1.52.3/go.mod h1:v7NIzEFIHBiicOMaMTuEmbnzGnqW0d+6ulNALul6fYE= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.24.3 h1:j2hdqn1dz8FPePLCQNXtDMd/6URmRya2Ys3Um78a1Es= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.24.3/go.mod h1:jTZ3loeBr6JRNIhq7C24OwjtzEaV9tAJUtWjLIKoin8= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3 h1:IXODiFsgKoyW7QVWWHoIjdBB2dWPRFPT5KREfBxHoQ8= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3/go.mod h1:JvtI6itHlTxyGew0oT7xYNbF7OA767givRMsCuBFK5k= +github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3 h1:vBcoorWl+c4r5un837H8fhLoS0Kc8SKlGBHpyq7KM9w= +github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3/go.mod h1:Mq0FruBai8A9f7fpzjcfD+S+y0I4DkZTygb3HxuqDB4= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3 h1:pBE7FzR3AUpauidRUITPlDWTQ4hHktI649xZt3e/wKM= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3/go.mod h1:EyoPT+dUT5zqspxSub9KHDWOZyIP30bPgIavBvGGVz0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/aws-sdk-go-v2/service/swf v1.25.3 h1:7zYsHA9ORjiCHYzTJf0g+gwo3mPpn2XbMlWQreiXWdM= +github.com/aws/aws-sdk-go-v2/service/swf v1.25.3/go.mod h1:FIwuqwcEguy+ToyQzMwpMAXc9Kxh5QwH3nlXMeHdHnA= +github.com/aws/aws-sdk-go-v2/service/synthetics v1.26.3 h1:JPgfM6lEqJ3O3kYLYWxYaZEL4pE4binxBWYzXxFADBE= +github.com/aws/aws-sdk-go-v2/service/synthetics v1.26.3/go.mod h1:iVEoUBC/J06ZwJujK/pa57Gm+G9OOfYxynf2O2hWtWc= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3 h1:Qbimk+9ZyMxjyunIkdvaDeA/LLbeSV0NqurwC2D/gKg= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3/go.mod h1:2AEQ9klGEJdMIg+bC1gnGGiJqKebIkhfwJyNYBYh9dg= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3 h1:GbbpHIz5tBazjVOunsf6xcgruWFvj1DT+jUNyKDwK2s= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3/go.mod h1:sXSJhu0vub083lif2S+g7fPocwVuqu9D9Bp1FEIYqOE= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.39.3 h1:vgXMSzoRvWgptv2xmpsF7kWUiwr/e+RrBxLVIAH3pfY= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.39.3/go.mod h1:xtCxGy771E4UOUqmxqLa/EoA73U/06wA/wvEexj9JSE= +github.com/aws/aws-sdk-go-v2/service/transfer v1.50.3 h1:CpeH+cboQS9A0ar387V6dxVxs6UYUXO1N4rtRU2244c= +github.com/aws/aws-sdk-go-v2/service/transfer v1.50.3/go.mod h1:plbUFzNIVQ/qYehjK2qKzZNP3Qu5vob2Jeezeeb8pMc= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.17.3 h1:RvKL61+VcqZIL9dS3BE0bQTyN1lCrDCv3cz9kdkNm6k= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.17.3/go.mod h1:AmO4nIKOKHzJCbVn467c4keHpzmZwy7s98zEsLjcJos= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.10.3 h1:sg673tzRhiA0N0iyc8EojgNnenuUQFFJmzxa/ni3VGI= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.10.3/go.mod h1:vo6OSH+QLP6X9gyZiccj6SV1eiPWgtT7V5GongrGA2s= +github.com/aws/aws-sdk-go-v2/service/waf v1.23.3 h1:D0EDHlqaPWwm05+/3SaJye4HwXs6TWPJe2wINrbc+Dw= +github.com/aws/aws-sdk-go-v2/service/waf v1.23.3/go.mod h1:M0olbEl0NTVF9337MxfjJz4iUl6za1Zka5ZFSZvJ+AU= +github.com/aws/aws-sdk-go-v2/service/wafregional v1.23.3 h1:7dr6En0/6KRFoz8VmnYks9dVvL+tkL5RjRrxqGzr1zI= +github.com/aws/aws-sdk-go-v2/service/wafregional v1.23.3/go.mod h1:24TtlRsv4LKAE3VnRJQhpatr8cpX0yj8NSzg8/lxOCw= +github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4 h1:1khBA5uryBRJoCb4G2iR5RT06BkfPEjjDCHAiRb8P3Q= +github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4/go.mod h1:QpFImaPGKNwa+MiZ+oo6LbV1PVQBapc0CnrAMRScoxM= +github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.32.3 h1:BjPTq4qiR/Ywu3yf3DeGepCj5RB1c4rtEUmE62bmkus= +github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.32.3/go.mod h1:jeL9apgA3x3fwH3ZkaDPIfYcXZUlmCXNrU4o+6oY4oM= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.44.2 h1:xqlHduaOQOIstwjydeUA3MyQOsX78Xz+0xbkc/Lwi18= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.44.2/go.mod h1:YRGgDr23EJC+32pPpWnoVB2p4JP3u5xASobpmoOlhEo= +github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.21.3 h1:fZVJVU+fgDbHDZpHv447C43ZM9E9QHbj7reT6tB19FA= +github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.21.3/go.mod h1:CWln0RlRf0Cc4Csr4HkyXI6BkkIujyTeWuwTo3hijP0= +github.com/aws/aws-sdk-go-v2/service/xray v1.27.3 h1:0jSgvovW7R95P8XJiGxYfrnxdryQyClvebJeYbUlecw= +github.com/aws/aws-sdk-go-v2/service/xray v1.27.3/go.mod h1:yKewwhgsy9idJZ7oJLrFleYmy2oq/JSLQWdHNgLUYMM= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beevik/etree v1.4.0 h1:oz1UedHRepuY3p4N5OjE0nK1WLCqtzHf25bxplKOHLs= github.com/beevik/etree v1.4.0/go.mod h1:cyWiXwGoasx60gHvtnEh5x8+uIjUVnjWqBvEnhnqKDA= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= @@ -435,15 +479,15 @@ github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxG github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= -github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dlclark/regexp2 v1.11.2 h1:/u628IuisSTwri5/UKloiIsH8+qF2Pu7xEQX+yIKg68= +github.com/dlclark/regexp2 v1.11.2/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/gdavison/terraform-plugin-log v0.0.0-20230928191232-6c653d8ef8fb h1:HM67IMNxlkqGxAM5ymxMg2ANCcbL4oEr5cy+tGZ6fNo= @@ -480,10 +524,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0 h1:l16/Vrl0+x+HjHJWEjcKPwHYoxN9EC78gAFXKlH6m84= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0/go.mod h1:HAmscHyzSOfB1Dr16KLc177KNbn83wscnZC+N7WyaM8= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.53 h1:jgOMbQlypMpUMaqYJotjT7ERSMvQP00Mppgjgh8lNt8= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.53/go.mod h1:nvpXIeF0ANfZ7sMssXKSSR3pyXfksajxoC2tl4jjN08= -github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.54 h1:raRbM2Wynqv0Nyhe7AwVnFgb2roGSvpSUeQKxEg8Lts= -github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.54/go.mod h1:Q5SSO00VVkkbiPtT6ssI9twHV7yfh4gPLOtoLQJMbzw= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.54 h1:O37FpbmkDSmSPgukMJLAzJzo5WBSFQx0iwn4PlY6BKI= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.54/go.mod h1:TJ+Mz49cn0zKURLX5haphWDbmGWz15OsEiLp1CcXDwY= +github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.55 h1:7izXD15MCmPcWbKJ5qAwcSlnWvTwkioIJkq0+OJIJG0= +github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.55/go.mod h1:I4WCc/OKwraiUqRXb4p+/sVMyMld2EXTSacu5RShbqI= github.com/hashicorp/awspolicyequivalence v1.6.0 h1:7aadmkalbc5ewStC6g3rljx1iNvP4QyAhg2KsHx8bU8= github.com/hashicorp/awspolicyequivalence v1.6.0/go.mod h1:9IOaIHx+a7C0NfUNk1A93M7kHd5rJ19aoUx37LZGC14= github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= @@ -510,34 +554,34 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= -github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= -github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= -github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-framework v1.9.0 h1:caLcDoxiRucNi2hk8+j3kJwkKfvHznubyFsJMWfZqKU= -github.com/hashicorp/terraform-plugin-framework v1.9.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-framework v1.10.0 h1:xXhICE2Fns1RYZxEQebwkB2+kXouLC932Li9qelozrc= +github.com/hashicorp/terraform-plugin-framework v1.10.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0 h1:b8vZYB/SkXJT4YPbT3trzE6oJ7dPyMy68+9dEDKsJjE= github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0/go.mod h1:tP9BC3icoXBz72evMS5UTFvi98CiKhPdXF6yLs1wS8A= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-timetypes v0.4.0 h1:XLI93Oqw2/KTzYjgCXrUnm8LBkGAiHC/mDQg5g5Vob4= github.com/hashicorp/terraform-plugin-framework-timetypes v0.4.0/go.mod h1:mGuieb3bqKFYwEYB4lCMt302Z3siyv4PFYk/41wAUps= -github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= -github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0/go.mod h1:wGeI02gEhj9nPANU62F2jCaHjXulejm/X+af4PdZaNo= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-mux v0.16.0 h1:RCzXHGDYwUwwqfYYWJKBFaS3fQsWn/ZECEiW7p2023I= github.com/hashicorp/terraform-plugin-mux v0.16.0/go.mod h1:PF79mAsPc8CpusXPfEVa4X8PtkB+ngWoiUClMrNZlYo= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= -github.com/hashicorp/terraform-plugin-testing v1.8.0 h1:wdYIgwDk4iO933gC4S8KbKdnMQShu6BXuZQPScmHvpk= -github.com/hashicorp/terraform-plugin-testing v1.8.0/go.mod h1:o2kOgf18ADUaZGhtOl0YCkfIxg01MAiMATT2EtIHlZk= +github.com/hashicorp/terraform-plugin-testing v1.9.0 h1:xOsQRqqlHKXpFq6etTxih3ubdK3HVDtfE1IY7Rpd37o= +github.com/hashicorp/terraform-plugin-testing v1.9.0/go.mod h1:fhhVx/8+XNJZTD5o3b4stfZ6+q7z9+lIWigIYdT6/44= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -647,31 +691,31 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0 h1:FGMfzzxfkNkw+gvKJOeT8dSmBjgrSFh+ClLl+OMKPno= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0/go.mod h1:hmHUXiKhyxbIhuNfG5ZTySq9HqqxJFNxaFOfXXvoMmQ= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0 h1:kAytSRJYoIy4eJtDOfSGf9LOCD4QdXFN37YJs0+bYrw= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0/go.mod h1:l6VnFEqDdeMSMfwULTDDY9ewlnlVLhmvBainVT+h/Zs= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -690,13 +734,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -708,8 +752,8 @@ golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/infrastructure/repository/labels-service.tf b/infrastructure/repository/labels-service.tf index 33e1ad0f109..4b9bcb2b474 100644 --- a/infrastructure/repository/labels-service.tf +++ b/infrastructure/repository/labels-service.tf @@ -21,6 +21,7 @@ variable "service_labels" { "appintegrations", "applicationcostprofiler", "applicationinsights", + "applicationsignals", "appmesh", "apprunner", "appstream", @@ -229,6 +230,7 @@ variable "service_labels" { "neptunegraph", "networkfirewall", "networkmanager", + "networkmonitor", "nimble", "oam", "opensearch", diff --git a/infrastructure/repository/main.tf b/infrastructure/repository/main.tf index c3688ca3423..0886602faf3 100644 --- a/infrastructure/repository/main.tf +++ b/infrastructure/repository/main.tf @@ -13,7 +13,7 @@ terraform { required_providers { github = { source = "integrations/github" - version = "6.2.1" + version = "6.2.3" } } diff --git a/internal/acctest/acctest.go b/internal/acctest/acctest.go index 13ad621d5c3..41bfa6fb825 100644 --- a/internal/acctest/acctest.go +++ b/internal/acctest/acctest.go @@ -24,6 +24,9 @@ import ( accounttypes "github.com/aws/aws-sdk-go-v2/service/account/types" "github.com/aws/aws-sdk-go-v2/service/acmpca" acmpcatypes "github.com/aws/aws-sdk-go-v2/service/acmpca/types" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + dstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/iam" awstypes "github.com/aws/aws-sdk-go-v2/service/iam/types" @@ -37,9 +40,9 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/arn" "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/outposts" + "github.com/aws/aws-sdk-go/service/pinpoint" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-go/tfprotov5" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -652,23 +655,13 @@ func CheckResourceAttrRFC3339(resourceName, attributeName string) resource.TestC return resource.TestMatchResourceAttr(resourceName, attributeName, regexache.MustCompile(RFC3339RegexPattern)) } -// CheckResourceAttrEquivalentJSON is a TestCheckFunc that compares a JSON value with an expected value. Both JSON -// values are normalized before being compared. -func CheckResourceAttrEquivalentJSON(resourceName, attributeName, expectedJSON string) resource.TestCheckFunc { - return func(s *terraform.State) error { - is, err := PrimaryInstanceState(s, resourceName) - if err != nil { - return err - } - - v, ok := is.Attributes[attributeName] - if !ok { - return fmt.Errorf("%s: No attribute %q found", resourceName, attributeName) - } - - vNormal, err := structure.NormalizeJsonString(v) +// CheckResourceAttrEquivalentJSON is a TestCheckFunc that compares a JSON value with an expected value. +// Both JSON values are normalized before being compared. +func CheckResourceAttrEquivalentJSON(n, key, expectedJSON string) resource.TestCheckFunc { + return resource.TestCheckResourceAttrWith(n, key, func(value string) error { + vNormal, err := structure.NormalizeJsonString(value) if err != nil { - return fmt.Errorf("%s: Error normalizing JSON in %q: %w", resourceName, attributeName, err) + return fmt.Errorf("%s: Error normalizing JSON in %q: %w", n, key, err) } expectedNormal, err := structure.NormalizeJsonString(expectedJSON) @@ -677,10 +670,10 @@ func CheckResourceAttrEquivalentJSON(resourceName, attributeName, expectedJSON s } if vNormal != expectedNormal { - return fmt.Errorf("%s: Attribute %q expected\n%s\ngot\n%s", resourceName, attributeName, expectedJSON, v) + return fmt.Errorf("%s: Attribute %q expected\n%s\ngot\n%s", n, key, expectedJSON, value) } return nil - } + }) } func CheckResourceAttrJMES(name, key, jmesPath, value string) resource.TestCheckFunc { @@ -989,6 +982,26 @@ func PreCheckPartitionNot(t *testing.T, partitions ...string) { } } +func PreCheckCognitoIdentityProvider(ctx context.Context, t *testing.T) { + t.Helper() + + conn := Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) + + input := &cognitoidentityprovider.ListUserPoolsInput{ + MaxResults: aws.Int32(1), + } + + _, err := conn.ListUserPools(ctx, input) + + if PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func PreCheckInspector2(ctx context.Context, t *testing.T) { t.Helper() @@ -1087,6 +1100,22 @@ func PreCheckOrganizationMemberAccountWithProvider(ctx context.Context, t *testi } } +func PreCheckPinpointApp(ctx context.Context, t *testing.T) { + conn := Provider.Meta().(*conns.AWSClient).PinpointConn(ctx) + + input := &pinpoint.GetAppsInput{} + + _, err := conn.GetAppsWithContext(ctx, input) + + if PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} + func PreCheckRegionOptIn(ctx context.Context, t *testing.T, region string) { t.Helper() @@ -1185,10 +1214,10 @@ func PreCheckIAMServiceLinkedRoleWithProvider(ctx context.Context, t *testing.T, func PreCheckDirectoryService(ctx context.Context, t *testing.T) { t.Helper() - conn := Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := Provider.Meta().(*conns.AWSClient).DSClient(ctx) input := &directoryservice.DescribeDirectoriesInput{} - _, err := conn.DescribeDirectoriesWithContext(ctx, input) + _, err := conn.DescribeDirectories(ctx, input) if PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) @@ -1205,20 +1234,20 @@ func PreCheckDirectoryService(ctx context.Context, t *testing.T) { func PreCheckDirectoryServiceSimpleDirectory(ctx context.Context, t *testing.T) { t.Helper() - conn := Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := Provider.Meta().(*conns.AWSClient).DSClient(ctx) input := &directoryservice.CreateDirectoryInput{ Name: aws.String("corp.example.com"), Password: aws.String("PreCheck123"), - Size: aws.String(directoryservice.DirectorySizeSmall), + Size: dstypes.DirectorySizeSmall, } - _, err := conn.CreateDirectoryWithContext(ctx, input) + _, err := conn.CreateDirectory(ctx, input) - if tfawserr.ErrMessageContains(err, directoryservice.ErrCodeClientException, "Simple AD directory creation is currently not supported in this region") { + if errs.IsAErrorMessageContains[*dstypes.ClientException](err, "Simple AD directory creation is currently not supported in this region") { t.Skipf("skipping acceptance testing: %s", err) } - if err != nil && !tfawserr.ErrMessageContains(err, directoryservice.ErrCodeInvalidParameterException, "VpcSettings must be specified") { + if err != nil && !errs.IsAErrorMessageContains[*dstypes.InvalidParameterException](err, "VpcSettings must be specified") { t.Fatalf("unexpected PreCheck error: %s", err) } } diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index 47fd6e02edf..976b936ef7f 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -18,8 +18,6 @@ import ( s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - directoryservice_sdkv1 "github.com/aws/aws-sdk-go/service/directoryservice" - efs_sdkv1 "github.com/aws/aws-sdk-go/service/efs" opsworks_sdkv1 "github.com/aws/aws-sdk-go/service/opsworks" rds_sdkv1 "github.com/aws/aws-sdk-go/service/rds" baselogging "github.com/hashicorp/aws-sdk-go-base/v2/logging" @@ -64,26 +62,6 @@ func (c *AWSClient) AwsConfig(context.Context) aws_sdkv2.Config { // nosemgrep:c return c.awsConfig.Copy() } -// DSConnForRegion returns an AWS SDK For Go v1 DS API client for the specified AWS Region. -// If the specified region is not the default a new "simple" client is created. -// This new client does not use any configured endpoint override. -func (c *AWSClient) DSConnForRegion(ctx context.Context, region string) *directoryservice_sdkv1.DirectoryService { - if region == c.Region { - return c.DSConn(ctx) - } - return directoryservice_sdkv1.New(c.session, aws_sdkv1.NewConfig().WithRegion(region)) -} - -// EFSConnForRegion returns an AWS SDK For Go v1 EFS API client for the specified AWS Region. -// If the specified region is not the default a new "simple" client is created. -// This new client does not use any configured endpoint override. -func (c *AWSClient) EFSConnForRegion(ctx context.Context, region string) *efs_sdkv1.EFS { - if region == c.Region { - return c.EFSConn(ctx) - } - return efs_sdkv1.New(c.session, aws_sdkv1.NewConfig().WithRegion(region)) -} - // OpsWorksConnForRegion returns an AWS SDK For Go v1 OpsWorks API client for the specified AWS Region. // If the specified region is not the default a new "simple" client is created. // This new client does not use any configured endpoint override. diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index fa56803e051..fb94dc454c0 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -18,12 +18,15 @@ import ( appintegrations_sdkv2 "github.com/aws/aws-sdk-go-v2/service/appintegrations" applicationautoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling" applicationinsights_sdkv2 "github.com/aws/aws-sdk-go-v2/service/applicationinsights" + applicationsignals_sdkv2 "github.com/aws/aws-sdk-go-v2/service/applicationsignals" apprunner_sdkv2 "github.com/aws/aws-sdk-go-v2/service/apprunner" appstream_sdkv2 "github.com/aws/aws-sdk-go-v2/service/appstream" + appsync_sdkv2 "github.com/aws/aws-sdk-go-v2/service/appsync" athena_sdkv2 "github.com/aws/aws-sdk-go-v2/service/athena" auditmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/auditmanager" autoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscaling" autoscalingplans_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscalingplans" + backup_sdkv2 "github.com/aws/aws-sdk-go-v2/service/backup" batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" bcmdataexports_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bcmdataexports" bedrock_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrock" @@ -54,6 +57,7 @@ import ( codestarconnections_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codestarconnections" codestarnotifications_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codestarnotifications" cognitoidentity_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cognitoidentity" + cognitoidentityprovider_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" comprehend_sdkv2 "github.com/aws/aws-sdk-go-v2/service/comprehend" computeoptimizer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/computeoptimizer" configservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/configservice" @@ -63,9 +67,14 @@ import ( costexplorer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costexplorer" costoptimizationhub_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costoptimizationhub" customerprofiles_sdkv2 "github.com/aws/aws-sdk-go-v2/service/customerprofiles" + databasemigrationservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + databrew_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databrew" + dataexchange_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dataexchange" + datapipeline_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datapipeline" datasync_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datasync" datazone_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datazone" dax_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dax" + detective_sdkv2 "github.com/aws/aws-sdk-go-v2/service/detective" devicefarm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/devicefarm" devopsguru_sdkv2 "github.com/aws/aws-sdk-go-v2/service/devopsguru" directoryservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/directoryservice" @@ -78,9 +87,11 @@ import ( ecr_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecr" ecrpublic_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecrpublic" ecs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecs" + efs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/efs" eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" elasticache_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticache" elasticbeanstalk_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk" + elasticloadbalancing_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" elasticloadbalancingv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" emr_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emr" emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" @@ -92,6 +103,8 @@ import ( fms_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fms" glacier_sdkv2 "github.com/aws/aws-sdk-go-v2/service/glacier" globalaccelerator_sdkv2 "github.com/aws/aws-sdk-go-v2/service/globalaccelerator" + grafana_sdkv2 "github.com/aws/aws-sdk-go-v2/service/grafana" + greengrass_sdkv2 "github.com/aws/aws-sdk-go-v2/service/greengrass" groundstation_sdkv2 "github.com/aws/aws-sdk-go-v2/service/groundstation" guardduty_sdkv2 "github.com/aws/aws-sdk-go-v2/service/guardduty" healthlake_sdkv2 "github.com/aws/aws-sdk-go-v2/service/healthlake" @@ -99,8 +112,12 @@ import ( identitystore_sdkv2 "github.com/aws/aws-sdk-go-v2/service/identitystore" inspector2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/inspector2" internetmonitor_sdkv2 "github.com/aws/aws-sdk-go-v2/service/internetmonitor" + iot_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iot" + iotanalytics_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotanalytics" + iotevents_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotevents" ivschat_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivschat" kafka_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafka" + kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" kendra_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kendra" keyspaces_sdkv2 "github.com/aws/aws-sdk-go-v2/service/keyspaces" kinesis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kinesis" @@ -121,6 +138,8 @@ import ( mq_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mq" mwaa_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mwaa" neptunegraph_sdkv2 "github.com/aws/aws-sdk-go-v2/service/neptunegraph" + networkfirewall_sdkv2 "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + networkmonitor_sdkv2 "github.com/aws/aws-sdk-go-v2/service/networkmonitor" oam_sdkv2 "github.com/aws/aws-sdk-go-v2/service/oam" opensearchserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" organizations_sdkv2 "github.com/aws/aws-sdk-go-v2/service/organizations" @@ -146,6 +165,7 @@ import ( route53_sdkv2 "github.com/aws/aws-sdk-go-v2/service/route53" route53domains_sdkv2 "github.com/aws/aws-sdk-go-v2/service/route53domains" route53profiles_sdkv2 "github.com/aws/aws-sdk-go-v2/service/route53profiles" + rum_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rum" s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" s3control_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3control" scheduler_sdkv2 "github.com/aws/aws-sdk-go-v2/service/scheduler" @@ -153,10 +173,12 @@ import ( secretsmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/secretsmanager" securityhub_sdkv2 "github.com/aws/aws-sdk-go-v2/service/securityhub" securitylake_sdkv2 "github.com/aws/aws-sdk-go-v2/service/securitylake" + serverlessapplicationrepository_sdkv2 "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository" servicecatalogappregistry_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry" servicediscovery_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicediscovery" servicequotas_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicequotas" sesv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sesv2" + sfn_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sfn" shield_sdkv2 "github.com/aws/aws-sdk-go-v2/service/shield" signer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/signer" sns_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sns" @@ -184,41 +206,23 @@ import ( workspacesweb_sdkv2 "github.com/aws/aws-sdk-go-v2/service/workspacesweb" xray_sdkv2 "github.com/aws/aws-sdk-go-v2/service/xray" appmesh_sdkv1 "github.com/aws/aws-sdk-go/service/appmesh" - appsync_sdkv1 "github.com/aws/aws-sdk-go/service/appsync" - backup_sdkv1 "github.com/aws/aws-sdk-go/service/backup" batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" chime_sdkv1 "github.com/aws/aws-sdk-go/service/chime" - cloudwatchrum_sdkv1 "github.com/aws/aws-sdk-go/service/cloudwatchrum" - cognitoidentityprovider_sdkv1 "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" connect_sdkv1 "github.com/aws/aws-sdk-go/service/connect" - databasemigrationservice_sdkv1 "github.com/aws/aws-sdk-go/service/databasemigrationservice" - dataexchange_sdkv1 "github.com/aws/aws-sdk-go/service/dataexchange" - datapipeline_sdkv1 "github.com/aws/aws-sdk-go/service/datapipeline" - detective_sdkv1 "github.com/aws/aws-sdk-go/service/detective" directconnect_sdkv1 "github.com/aws/aws-sdk-go/service/directconnect" - directoryservice_sdkv1 "github.com/aws/aws-sdk-go/service/directoryservice" ec2_sdkv1 "github.com/aws/aws-sdk-go/service/ec2" ecs_sdkv1 "github.com/aws/aws-sdk-go/service/ecs" - efs_sdkv1 "github.com/aws/aws-sdk-go/service/efs" - elasticache_sdkv1 "github.com/aws/aws-sdk-go/service/elasticache" elasticsearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/elasticsearchservice" elastictranscoder_sdkv1 "github.com/aws/aws-sdk-go/service/elastictranscoder" - elb_sdkv1 "github.com/aws/aws-sdk-go/service/elb" - elbv2_sdkv1 "github.com/aws/aws-sdk-go/service/elbv2" emr_sdkv1 "github.com/aws/aws-sdk-go/service/emr" emrcontainers_sdkv1 "github.com/aws/aws-sdk-go/service/emrcontainers" fsx_sdkv1 "github.com/aws/aws-sdk-go/service/fsx" gamelift_sdkv1 "github.com/aws/aws-sdk-go/service/gamelift" glue_sdkv1 "github.com/aws/aws-sdk-go/service/glue" - greengrass_sdkv1 "github.com/aws/aws-sdk-go/service/greengrass" guardduty_sdkv1 "github.com/aws/aws-sdk-go/service/guardduty" imagebuilder_sdkv1 "github.com/aws/aws-sdk-go/service/imagebuilder" inspector_sdkv1 "github.com/aws/aws-sdk-go/service/inspector" - iot_sdkv1 "github.com/aws/aws-sdk-go/service/iot" - iotanalytics_sdkv1 "github.com/aws/aws-sdk-go/service/iotanalytics" - iotevents_sdkv1 "github.com/aws/aws-sdk-go/service/iotevents" ivs_sdkv1 "github.com/aws/aws-sdk-go/service/ivs" - kafkaconnect_sdkv1 "github.com/aws/aws-sdk-go/service/kafkaconnect" kinesisanalytics_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisanalytics" kinesisanalyticsv2_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisanalyticsv2" kinesisvideo_sdkv1 "github.com/aws/aws-sdk-go/service/kinesisvideo" @@ -226,10 +230,8 @@ import ( licensemanager_sdkv1 "github.com/aws/aws-sdk-go/service/licensemanager" locationservice_sdkv1 "github.com/aws/aws-sdk-go/service/locationservice" macie2_sdkv1 "github.com/aws/aws-sdk-go/service/macie2" - managedgrafana_sdkv1 "github.com/aws/aws-sdk-go/service/managedgrafana" memorydb_sdkv1 "github.com/aws/aws-sdk-go/service/memorydb" neptune_sdkv1 "github.com/aws/aws-sdk-go/service/neptune" - networkfirewall_sdkv1 "github.com/aws/aws-sdk-go/service/networkfirewall" networkmanager_sdkv1 "github.com/aws/aws-sdk-go/service/networkmanager" opensearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/opensearchservice" opsworks_sdkv1 "github.com/aws/aws-sdk-go/service/opsworks" @@ -244,10 +246,8 @@ import ( route53resolver_sdkv1 "github.com/aws/aws-sdk-go/service/route53resolver" s3outposts_sdkv1 "github.com/aws/aws-sdk-go/service/s3outposts" sagemaker_sdkv1 "github.com/aws/aws-sdk-go/service/sagemaker" - serverlessapplicationrepository_sdkv1 "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" servicecatalog_sdkv1 "github.com/aws/aws-sdk-go/service/servicecatalog" ses_sdkv1 "github.com/aws/aws-sdk-go/service/ses" - sfn_sdkv1 "github.com/aws/aws-sdk-go/service/sfn" simpledb_sdkv1 "github.com/aws/aws-sdk-go/service/simpledb" storagegateway_sdkv1 "github.com/aws/aws-sdk-go/service/storagegateway" worklink_sdkv1 "github.com/aws/aws-sdk-go/service/worklink" @@ -319,14 +319,18 @@ func (c *AWSClient) AppStreamClient(ctx context.Context) *appstream_sdkv2.Client return errs.Must(client[*appstream_sdkv2.Client](ctx, c, names.AppStream, make(map[string]any))) } -func (c *AWSClient) AppSyncConn(ctx context.Context) *appsync_sdkv1.AppSync { - return errs.Must(conn[*appsync_sdkv1.AppSync](ctx, c, names.AppSync, make(map[string]any))) +func (c *AWSClient) AppSyncClient(ctx context.Context) *appsync_sdkv2.Client { + return errs.Must(client[*appsync_sdkv2.Client](ctx, c, names.AppSync, make(map[string]any))) } func (c *AWSClient) ApplicationInsightsClient(ctx context.Context) *applicationinsights_sdkv2.Client { return errs.Must(client[*applicationinsights_sdkv2.Client](ctx, c, names.ApplicationInsights, make(map[string]any))) } +func (c *AWSClient) ApplicationSignalsClient(ctx context.Context) *applicationsignals_sdkv2.Client { + return errs.Must(client[*applicationsignals_sdkv2.Client](ctx, c, names.ApplicationSignals, make(map[string]any))) +} + func (c *AWSClient) AthenaClient(ctx context.Context) *athena_sdkv2.Client { return errs.Must(client[*athena_sdkv2.Client](ctx, c, names.Athena, make(map[string]any))) } @@ -347,8 +351,8 @@ func (c *AWSClient) BCMDataExportsClient(ctx context.Context) *bcmdataexports_sd return errs.Must(client[*bcmdataexports_sdkv2.Client](ctx, c, names.BCMDataExports, make(map[string]any))) } -func (c *AWSClient) BackupConn(ctx context.Context) *backup_sdkv1.Backup { - return errs.Must(conn[*backup_sdkv1.Backup](ctx, c, names.Backup, make(map[string]any))) +func (c *AWSClient) BackupClient(ctx context.Context) *backup_sdkv2.Client { + return errs.Must(client[*backup_sdkv2.Client](ctx, c, names.Backup, make(map[string]any))) } func (c *AWSClient) BatchConn(ctx context.Context) *batch_sdkv1.Batch { @@ -471,8 +475,8 @@ func (c *AWSClient) CodeStarNotificationsClient(ctx context.Context) *codestarno return errs.Must(client[*codestarnotifications_sdkv2.Client](ctx, c, names.CodeStarNotifications, make(map[string]any))) } -func (c *AWSClient) CognitoIDPConn(ctx context.Context) *cognitoidentityprovider_sdkv1.CognitoIdentityProvider { - return errs.Must(conn[*cognitoidentityprovider_sdkv1.CognitoIdentityProvider](ctx, c, names.CognitoIDP, make(map[string]any))) +func (c *AWSClient) CognitoIDPClient(ctx context.Context) *cognitoidentityprovider_sdkv2.Client { + return errs.Must(client[*cognitoidentityprovider_sdkv2.Client](ctx, c, names.CognitoIDP, make(map[string]any))) } func (c *AWSClient) CognitoIdentityClient(ctx context.Context) *cognitoidentity_sdkv2.Client { @@ -519,28 +523,28 @@ func (c *AWSClient) DLMClient(ctx context.Context) *dlm_sdkv2.Client { return errs.Must(client[*dlm_sdkv2.Client](ctx, c, names.DLM, make(map[string]any))) } -func (c *AWSClient) DMSConn(ctx context.Context) *databasemigrationservice_sdkv1.DatabaseMigrationService { - return errs.Must(conn[*databasemigrationservice_sdkv1.DatabaseMigrationService](ctx, c, names.DMS, make(map[string]any))) +func (c *AWSClient) DMSClient(ctx context.Context) *databasemigrationservice_sdkv2.Client { + return errs.Must(client[*databasemigrationservice_sdkv2.Client](ctx, c, names.DMS, make(map[string]any))) } func (c *AWSClient) DRSClient(ctx context.Context) *drs_sdkv2.Client { return errs.Must(client[*drs_sdkv2.Client](ctx, c, names.DRS, make(map[string]any))) } -func (c *AWSClient) DSConn(ctx context.Context) *directoryservice_sdkv1.DirectoryService { - return errs.Must(conn[*directoryservice_sdkv1.DirectoryService](ctx, c, names.DS, make(map[string]any))) -} - func (c *AWSClient) DSClient(ctx context.Context) *directoryservice_sdkv2.Client { return errs.Must(client[*directoryservice_sdkv2.Client](ctx, c, names.DS, make(map[string]any))) } -func (c *AWSClient) DataExchangeConn(ctx context.Context) *dataexchange_sdkv1.DataExchange { - return errs.Must(conn[*dataexchange_sdkv1.DataExchange](ctx, c, names.DataExchange, make(map[string]any))) +func (c *AWSClient) DataBrewClient(ctx context.Context) *databrew_sdkv2.Client { + return errs.Must(client[*databrew_sdkv2.Client](ctx, c, names.DataBrew, make(map[string]any))) +} + +func (c *AWSClient) DataExchangeClient(ctx context.Context) *dataexchange_sdkv2.Client { + return errs.Must(client[*dataexchange_sdkv2.Client](ctx, c, names.DataExchange, make(map[string]any))) } -func (c *AWSClient) DataPipelineConn(ctx context.Context) *datapipeline_sdkv1.DataPipeline { - return errs.Must(conn[*datapipeline_sdkv1.DataPipeline](ctx, c, names.DataPipeline, make(map[string]any))) +func (c *AWSClient) DataPipelineClient(ctx context.Context) *datapipeline_sdkv2.Client { + return errs.Must(client[*datapipeline_sdkv2.Client](ctx, c, names.DataPipeline, make(map[string]any))) } func (c *AWSClient) DataSyncClient(ctx context.Context) *datasync_sdkv2.Client { @@ -555,8 +559,8 @@ func (c *AWSClient) DeployClient(ctx context.Context) *codedeploy_sdkv2.Client { return errs.Must(client[*codedeploy_sdkv2.Client](ctx, c, names.Deploy, make(map[string]any))) } -func (c *AWSClient) DetectiveConn(ctx context.Context) *detective_sdkv1.Detective { - return errs.Must(conn[*detective_sdkv1.Detective](ctx, c, names.Detective, make(map[string]any))) +func (c *AWSClient) DetectiveClient(ctx context.Context) *detective_sdkv2.Client { + return errs.Must(client[*detective_sdkv2.Client](ctx, c, names.Detective, make(map[string]any))) } func (c *AWSClient) DevOpsGuruClient(ctx context.Context) *devopsguru_sdkv2.Client { @@ -607,20 +611,16 @@ func (c *AWSClient) ECSClient(ctx context.Context) *ecs_sdkv2.Client { return errs.Must(client[*ecs_sdkv2.Client](ctx, c, names.ECS, make(map[string]any))) } -func (c *AWSClient) EFSConn(ctx context.Context) *efs_sdkv1.EFS { - return errs.Must(conn[*efs_sdkv1.EFS](ctx, c, names.EFS, make(map[string]any))) +func (c *AWSClient) EFSClient(ctx context.Context) *efs_sdkv2.Client { + return errs.Must(client[*efs_sdkv2.Client](ctx, c, names.EFS, make(map[string]any))) } func (c *AWSClient) EKSClient(ctx context.Context) *eks_sdkv2.Client { return errs.Must(client[*eks_sdkv2.Client](ctx, c, names.EKS, make(map[string]any))) } -func (c *AWSClient) ELBConn(ctx context.Context) *elb_sdkv1.ELB { - return errs.Must(conn[*elb_sdkv1.ELB](ctx, c, names.ELB, make(map[string]any))) -} - -func (c *AWSClient) ELBV2Conn(ctx context.Context) *elbv2_sdkv1.ELBV2 { - return errs.Must(conn[*elbv2_sdkv1.ELBV2](ctx, c, names.ELBV2, make(map[string]any))) +func (c *AWSClient) ELBClient(ctx context.Context) *elasticloadbalancing_sdkv2.Client { + return errs.Must(client[*elasticloadbalancing_sdkv2.Client](ctx, c, names.ELB, make(map[string]any))) } func (c *AWSClient) ELBV2Client(ctx context.Context) *elasticloadbalancingv2_sdkv2.Client { @@ -643,10 +643,6 @@ func (c *AWSClient) EMRServerlessClient(ctx context.Context) *emrserverless_sdkv return errs.Must(client[*emrserverless_sdkv2.Client](ctx, c, names.EMRServerless, make(map[string]any))) } -func (c *AWSClient) ElastiCacheConn(ctx context.Context) *elasticache_sdkv1.ElastiCache { - return errs.Must(conn[*elasticache_sdkv1.ElastiCache](ctx, c, names.ElastiCache, make(map[string]any))) -} - func (c *AWSClient) ElastiCacheClient(ctx context.Context) *elasticache_sdkv2.Client { return errs.Must(client[*elasticache_sdkv2.Client](ctx, c, names.ElastiCache, make(map[string]any))) } @@ -707,12 +703,12 @@ func (c *AWSClient) GlueConn(ctx context.Context) *glue_sdkv1.Glue { return errs.Must(conn[*glue_sdkv1.Glue](ctx, c, names.Glue, make(map[string]any))) } -func (c *AWSClient) GrafanaConn(ctx context.Context) *managedgrafana_sdkv1.ManagedGrafana { - return errs.Must(conn[*managedgrafana_sdkv1.ManagedGrafana](ctx, c, names.Grafana, make(map[string]any))) +func (c *AWSClient) GrafanaClient(ctx context.Context) *grafana_sdkv2.Client { + return errs.Must(client[*grafana_sdkv2.Client](ctx, c, names.Grafana, make(map[string]any))) } -func (c *AWSClient) GreengrassConn(ctx context.Context) *greengrass_sdkv1.Greengrass { - return errs.Must(conn[*greengrass_sdkv1.Greengrass](ctx, c, names.Greengrass, make(map[string]any))) +func (c *AWSClient) GreengrassClient(ctx context.Context) *greengrass_sdkv2.Client { + return errs.Must(client[*greengrass_sdkv2.Client](ctx, c, names.Greengrass, make(map[string]any))) } func (c *AWSClient) GroundStationClient(ctx context.Context) *groundstation_sdkv2.Client { @@ -763,16 +759,16 @@ func (c *AWSClient) InternetMonitorClient(ctx context.Context) *internetmonitor_ return errs.Must(client[*internetmonitor_sdkv2.Client](ctx, c, names.InternetMonitor, make(map[string]any))) } -func (c *AWSClient) IoTConn(ctx context.Context) *iot_sdkv1.IoT { - return errs.Must(conn[*iot_sdkv1.IoT](ctx, c, names.IoT, make(map[string]any))) +func (c *AWSClient) IoTClient(ctx context.Context) *iot_sdkv2.Client { + return errs.Must(client[*iot_sdkv2.Client](ctx, c, names.IoT, make(map[string]any))) } -func (c *AWSClient) IoTAnalyticsConn(ctx context.Context) *iotanalytics_sdkv1.IoTAnalytics { - return errs.Must(conn[*iotanalytics_sdkv1.IoTAnalytics](ctx, c, names.IoTAnalytics, make(map[string]any))) +func (c *AWSClient) IoTAnalyticsClient(ctx context.Context) *iotanalytics_sdkv2.Client { + return errs.Must(client[*iotanalytics_sdkv2.Client](ctx, c, names.IoTAnalytics, make(map[string]any))) } -func (c *AWSClient) IoTEventsConn(ctx context.Context) *iotevents_sdkv1.IoTEvents { - return errs.Must(conn[*iotevents_sdkv1.IoTEvents](ctx, c, names.IoTEvents, make(map[string]any))) +func (c *AWSClient) IoTEventsClient(ctx context.Context) *iotevents_sdkv2.Client { + return errs.Must(client[*iotevents_sdkv2.Client](ctx, c, names.IoTEvents, make(map[string]any))) } func (c *AWSClient) KMSClient(ctx context.Context) *kms_sdkv2.Client { @@ -783,8 +779,8 @@ func (c *AWSClient) KafkaClient(ctx context.Context) *kafka_sdkv2.Client { return errs.Must(client[*kafka_sdkv2.Client](ctx, c, names.Kafka, make(map[string]any))) } -func (c *AWSClient) KafkaConnectConn(ctx context.Context) *kafkaconnect_sdkv1.KafkaConnect { - return errs.Must(conn[*kafkaconnect_sdkv1.KafkaConnect](ctx, c, names.KafkaConnect, make(map[string]any))) +func (c *AWSClient) KafkaConnectClient(ctx context.Context) *kafkaconnect_sdkv2.Client { + return errs.Must(client[*kafkaconnect_sdkv2.Client](ctx, c, names.KafkaConnect, make(map[string]any))) } func (c *AWSClient) KendraClient(ctx context.Context) *kendra_sdkv2.Client { @@ -903,14 +899,18 @@ func (c *AWSClient) NeptuneGraphClient(ctx context.Context) *neptunegraph_sdkv2. return errs.Must(client[*neptunegraph_sdkv2.Client](ctx, c, names.NeptuneGraph, make(map[string]any))) } -func (c *AWSClient) NetworkFirewallConn(ctx context.Context) *networkfirewall_sdkv1.NetworkFirewall { - return errs.Must(conn[*networkfirewall_sdkv1.NetworkFirewall](ctx, c, names.NetworkFirewall, make(map[string]any))) +func (c *AWSClient) NetworkFirewallClient(ctx context.Context) *networkfirewall_sdkv2.Client { + return errs.Must(client[*networkfirewall_sdkv2.Client](ctx, c, names.NetworkFirewall, make(map[string]any))) } func (c *AWSClient) NetworkManagerConn(ctx context.Context) *networkmanager_sdkv1.NetworkManager { return errs.Must(conn[*networkmanager_sdkv1.NetworkManager](ctx, c, names.NetworkManager, make(map[string]any))) } +func (c *AWSClient) NetworkMonitorClient(ctx context.Context) *networkmonitor_sdkv2.Client { + return errs.Must(client[*networkmonitor_sdkv2.Client](ctx, c, names.NetworkMonitor, make(map[string]any))) +} + func (c *AWSClient) ObservabilityAccessManagerClient(ctx context.Context) *oam_sdkv2.Client { return errs.Must(client[*oam_sdkv2.Client](ctx, c, names.ObservabilityAccessManager, make(map[string]any))) } @@ -991,8 +991,8 @@ func (c *AWSClient) RDSClient(ctx context.Context) *rds_sdkv2.Client { return errs.Must(client[*rds_sdkv2.Client](ctx, c, names.RDS, make(map[string]any))) } -func (c *AWSClient) RUMConn(ctx context.Context) *cloudwatchrum_sdkv1.CloudWatchRUM { - return errs.Must(conn[*cloudwatchrum_sdkv1.CloudWatchRUM](ctx, c, names.RUM, make(map[string]any))) +func (c *AWSClient) RUMClient(ctx context.Context) *rum_sdkv2.Client { + return errs.Must(client[*rum_sdkv2.Client](ctx, c, names.RUM, make(map[string]any))) } func (c *AWSClient) RedshiftConn(ctx context.Context) *redshift_sdkv1.Redshift { @@ -1079,8 +1079,8 @@ func (c *AWSClient) SESV2Client(ctx context.Context) *sesv2_sdkv2.Client { return errs.Must(client[*sesv2_sdkv2.Client](ctx, c, names.SESV2, make(map[string]any))) } -func (c *AWSClient) SFNConn(ctx context.Context) *sfn_sdkv1.SFN { - return errs.Must(conn[*sfn_sdkv1.SFN](ctx, c, names.SFN, make(map[string]any))) +func (c *AWSClient) SFNClient(ctx context.Context) *sfn_sdkv2.Client { + return errs.Must(client[*sfn_sdkv2.Client](ctx, c, names.SFN, make(map[string]any))) } func (c *AWSClient) SNSClient(ctx context.Context) *sns_sdkv2.Client { @@ -1147,8 +1147,8 @@ func (c *AWSClient) SecurityLakeClient(ctx context.Context) *securitylake_sdkv2. return errs.Must(client[*securitylake_sdkv2.Client](ctx, c, names.SecurityLake, make(map[string]any))) } -func (c *AWSClient) ServerlessRepoConn(ctx context.Context) *serverlessapplicationrepository_sdkv1.ServerlessApplicationRepository { - return errs.Must(conn[*serverlessapplicationrepository_sdkv1.ServerlessApplicationRepository](ctx, c, names.ServerlessRepo, make(map[string]any))) +func (c *AWSClient) ServerlessRepoClient(ctx context.Context) *serverlessapplicationrepository_sdkv2.Client { + return errs.Must(client[*serverlessapplicationrepository_sdkv2.Client](ctx, c, names.ServerlessRepo, make(map[string]any))) } func (c *AWSClient) ServiceCatalogConn(ctx context.Context) *servicecatalog_sdkv1.ServiceCatalog { diff --git a/internal/create/errors.go b/internal/create/errors.go index 41dfd1961d3..d739ba9154c 100644 --- a/internal/create/errors.go +++ b/internal/create/errors.go @@ -53,6 +53,13 @@ func ProblemStandardMessage(service, action, resource, id string, gotError error return fmt.Sprintf("%s %s %s (%s): %s", action, hf, resource, id, gotError) } +func AddError(d *fwdiag.Diagnostics, service, action, resource, id string, gotError error) { + d.AddError( + ProblemStandardMessage(service, action, resource, id, nil), + gotError.Error(), + ) +} + // Error returns an errors.Error with a standardized error message func Error(service, action, resource, id string, gotError error) error { return errors.New(ProblemStandardMessage(service, action, resource, id, gotError)) diff --git a/internal/experimental/sync/sync.go b/internal/experimental/sync/sync.go index 97e6bb33d1b..88d875ec9df 100644 --- a/internal/experimental/sync/sync.go +++ b/internal/experimental/sync/sync.go @@ -4,7 +4,6 @@ package sync import ( - "log" "os" "strconv" "sync" @@ -59,7 +58,7 @@ func (s Semaphore) Notify() { select { case <-s: default: - log.Println("[WARN] Notifying semaphore without Wait") + // log.Println("[WARN] Notifying semaphore without Wait") } } diff --git a/internal/flex/flex.go b/internal/flex/flex.go index 510eea82422..cabd68647d7 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -425,11 +425,16 @@ func IntValueToString(v int) *string { return aws.String(strconv.Itoa(v)) } -// Int64ToStringValue converts an int64 pointer to a Go string value. +// Int32ToStringValue converts an int32 pointer to a Go string value. func Int32ToStringValue(v *int32) string { return strconv.FormatInt(int64(aws.Int32Value(v)), 10) } +// Int32ValueToStringValue converts an int32 value to a Go string value. +func Int32ValueToStringValue(v int32) string { + return strconv.FormatInt(int64(v), 10) +} + // Int64ToStringValue converts an int64 pointer to a Go string value. func Int64ToStringValue(v *int64) string { return strconv.FormatInt(aws.Int64Value(v), 10) @@ -450,8 +455,7 @@ func StringToIntValue(v *string) int { // StringToInt32Value converts a string pointer to a Go int32 value. // Invalid integer strings are converted to 0. func StringToInt32Value(v *string) int32 { - i, _ := strconv.ParseInt(aws.StringValue(v), 0, 32) - return int32(i) + return StringValueToInt32Value(aws.StringValue(v)) } // StringValueToBase64String converts a string to a Go base64 string pointer. @@ -459,6 +463,19 @@ func StringValueToBase64String(v string) *string { return aws.String(itypes.Base64EncodeOnce([]byte(v))) } +// StringValueToInt64 converts a string to a Go int32 pointer. +// Invalid integer strings are converted to 0. +func StringValueToInt32(v string) *int32 { + return aws.Int32(StringValueToInt32Value(v)) +} + +// StringValueToInt32Value converts a string to a Go int32 value. +// Invalid integer strings are converted to 0. +func StringValueToInt32Value(v string) int32 { + i, _ := strconv.ParseInt(v, 0, 32) + return int32(i) +} + // StringValueToInt64 converts a string to a Go int64 pointer. // Invalid integer strings are converted to 0. func StringValueToInt64(v string) *int64 { diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/auto_expand.go index 78a1213e3aa..30a24b254d0 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/auto_expand.go @@ -19,6 +19,11 @@ import ( // Expand = TF --> AWS +// Expander is implemented by types that customize their expansion +type Expander interface { + Expand(ctx context.Context) (any, diag.Diagnostics) +} + // Expand "expands" a resource's "business logic" data structure, // implemented using Terraform Plugin Framework data types, into // an AWS SDK for Go v2 API data structure. @@ -66,6 +71,11 @@ func (expander autoExpander) getOptions() AutoFlexOptions { func (expander autoExpander) convert(ctx context.Context, valFrom, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics + if fromExpander, ok := valFrom.Interface().(Expander); ok { + diags.Append(expandExpander(ctx, fromExpander, vTo)...) + return diags + } + vFrom, ok := valFrom.Interface().(attr.Value) if !ok { diags.AddError("AutoFlEx", fmt.Sprintf("does not implement attr.Value: %s", valFrom.Kind())) @@ -358,6 +368,15 @@ func (expander autoExpander) object(ctx context.Context, vFrom basetypes.ObjectV return diags } } + + case reflect.Interface: + // + // types.Object -> interface. + // + if vFrom, ok := vFrom.(fwtypes.NestedObjectValue); ok { + diags.Append(expander.nestedObjectToStruct(ctx, vFrom, tTo, vTo)...) + return diags + } } tflog.Info(ctx, "AutoFlex Expand; incompatible types", map[string]interface{}{ @@ -379,8 +398,12 @@ func (expander autoExpander) list(ctx context.Context, vFrom basetypes.ListValua } switch v.ElementType(ctx).(type) { + case basetypes.Int64Typable: + diags.Append(expander.listOrSetOfInt64(ctx, v, vTo)...) + return diags + case basetypes.StringTypable: - diags.Append(expander.listOfString(ctx, v, vTo)...) + diags.Append(expander.listOrSetOfString(ctx, v, vTo)...) return diags case basetypes.ObjectTypable: @@ -398,8 +421,71 @@ func (expander autoExpander) list(ctx context.Context, vFrom basetypes.ListValua return diags } -// listOfString copies a Plugin Framework ListOfString(ish) value to a compatible AWS API value. -func (expander autoExpander) listOfString(ctx context.Context, vFrom basetypes.ListValue, vTo reflect.Value) diag.Diagnostics { +// listOrSetOfInt64 copies a Plugin Framework ListOfInt64(ish) or SetOfInt64(ish) value to a compatible AWS API value. +func (expander autoExpander) listOrSetOfInt64(ctx context.Context, vFrom valueWithElementsAs, vTo reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + switch vTo.Kind() { + case reflect.Slice: + switch tSliceElem := vTo.Type().Elem(); tSliceElem.Kind() { + case reflect.Int32, reflect.Int64: + // + // types.List(OfInt64) -> []int64 or []int32 + // + var to []int64 + diags.Append(vFrom.ElementsAs(ctx, &to, false)...) + if diags.HasError() { + return diags + } + + vals := reflect.MakeSlice(vTo.Type(), len(to), len(to)) + for i := 0; i < len(to); i++ { + vals.Index(i).SetInt(to[i]) + } + vTo.Set(vals) + return diags + + case reflect.Ptr: + switch tSliceElem.Elem().Kind() { + case reflect.Int32: + // + // types.List(OfInt64) -> []*int32. + // + var to []*int32 + diags.Append(vFrom.ElementsAs(ctx, &to, false)...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) + return diags + + case reflect.Int64: + // + // types.List(OfInt64) -> []*int64. + // + var to []*int64 + diags.Append(vFrom.ElementsAs(ctx, &to, false)...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) + return diags + } + } + } + + tflog.Info(ctx, "AutoFlex Expand; incompatible types", map[string]interface{}{ + "from": vFrom.Type(ctx), + "to": vTo.Kind(), + }) + + return diags +} + +// listOrSetOfString copies a Plugin Framework ListOfString(ish) or SetOfString(ish) value to a compatible AWS API value. +func (expander autoExpander) listOrSetOfString(ctx context.Context, vFrom valueWithElementsAs, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics switch vTo.Kind() { @@ -443,8 +529,8 @@ func (expander autoExpander) listOfString(ctx context.Context, vFrom basetypes.L } tflog.Info(ctx, "AutoFlex Expand; incompatible types", map[string]interface{}{ - "from list[%s]": vFrom.ElementType(ctx), - "to": vTo.Kind(), + "from": vFrom.Type(ctx), + "to": vTo.Kind(), }) return diags @@ -576,8 +662,12 @@ func (expander autoExpander) set(ctx context.Context, vFrom basetypes.SetValuabl } switch v.ElementType(ctx).(type) { + case basetypes.Int64Typable: + diags.Append(expander.listOrSetOfInt64(ctx, v, vTo)...) + return diags + case basetypes.StringTypable: - diags.Append(expander.setOfString(ctx, v, vTo)...) + diags.Append(expander.listOrSetOfString(ctx, v, vTo)...) return diags case basetypes.ObjectTypable: @@ -595,58 +685,6 @@ func (expander autoExpander) set(ctx context.Context, vFrom basetypes.SetValuabl return diags } -// setOfString copies a Plugin Framework SetOfString(ish) value to a compatible AWS API value. -func (expander autoExpander) setOfString(ctx context.Context, vFrom basetypes.SetValue, vTo reflect.Value) diag.Diagnostics { - var diags diag.Diagnostics - - switch vTo.Kind() { - case reflect.Slice: - switch tSliceElem := vTo.Type().Elem(); tSliceElem.Kind() { - case reflect.String: - // - // types.Set(OfString) -> []string. - // - var to []string - diags.Append(vFrom.ElementsAs(ctx, &to, false)...) - if diags.HasError() { - return diags - } - - // Copy elements individually to enable expansion of lists of - // custom string types (AWS enums) - vals := reflect.MakeSlice(vTo.Type(), len(to), len(to)) - for i := 0; i < len(to); i++ { - vals.Index(i).SetString(to[i]) - } - vTo.Set(vals) - return diags - - case reflect.Ptr: - switch tSliceElem.Elem().Kind() { - case reflect.String: - // - // types.Set(OfString) -> []*string. - // - var to []*string - diags.Append(vFrom.ElementsAs(ctx, &to, false)...) - if diags.HasError() { - return diags - } - - vTo.Set(reflect.ValueOf(to)) - return diags - } - } - } - - tflog.Info(ctx, "AutoFlex Expand; incompatible types", map[string]interface{}{ - "from set[%s]": vFrom.ElementType(ctx), - "to": vTo.Kind(), - }) - - return diags -} - // nestedObjectCollection copies a Plugin Framework NestedObjectCollectionValue value to a compatible AWS API value. func (expander autoExpander) nestedObjectCollection(ctx context.Context, vFrom fwtypes.NestedObjectCollectionValue, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics @@ -666,6 +704,13 @@ func (expander autoExpander) nestedObjectCollection(ctx context.Context, vFrom f return diags } + case reflect.Interface: + // + // types.List(OfObject) -> interface. + // + diags.Append(expander.nestedObjectToStruct(ctx, vFrom, tTo, vTo)...) + return diags + case reflect.Map: switch tElem := tTo.Elem(); tElem.Kind() { case reflect.Struct: @@ -706,16 +751,9 @@ func (expander autoExpander) nestedObjectCollection(ctx context.Context, vFrom f // // types.List(OfObject) -> []interface. // - // Smithy union type handling not yet implemented. Silently skip. + diags.Append(expander.nestedObjectToSlice(ctx, vFrom, tTo, tElem, vTo)...) return diags } - - case reflect.Interface: - // - // types.List(OfObject) -> interface. - // - // Smithy union type handling not yet implemented. Silently skip. - return diags } diags.AddError("Incompatible types", fmt.Sprintf("nestedObjectCollection[%s] cannot be expanded to %s", vFrom.Type(ctx).(attr.TypeWithElementType).ElementType(), vTo.Kind())) @@ -740,10 +778,12 @@ func (expander autoExpander) nestedObjectToStruct(ctx context.Context, vFrom fwt return diags } - // Set value (or pointer). - if vTo.Type().Kind() == reflect.Struct { + // Set value. + switch vTo.Type().Kind() { + case reflect.Struct, reflect.Interface: vTo.Set(to.Elem()) - } else { + + default: vTo.Set(to) } @@ -773,10 +813,12 @@ func (expander autoExpander) nestedObjectToSlice(ctx context.Context, vFrom fwty return diags } - // Set value (or pointer) in the target slice. - if vTo.Type().Elem().Kind() == reflect.Struct { + // Set value in the target slice. + switch vTo.Type().Elem().Kind() { + case reflect.Struct, reflect.Interface: t.Index(i).Set(target.Elem()) - } else { + + default: t.Index(i).Set(target) } } @@ -875,3 +917,77 @@ func blockKeyMap(from any) (reflect.Value, diag.Diagnostics) { return reflect.Zero(reflect.TypeOf("")), diags } + +func expandExpander(ctx context.Context, fromExpander Expander, toVal reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + expanded, d := fromExpander.Expand(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + if expanded == nil { + diags.Append(diagExpandsToNil(reflect.TypeOf(fromExpander))) + return diags + } + + expandedVal := reflect.ValueOf(expanded) + + targetType := toVal.Type() + if targetType.Kind() == reflect.Interface { + expandedType := reflect.TypeOf(expanded) + if !expandedType.Implements(targetType) { + diags.Append(diagExpandedTypeDoesNotImplement(expandedType, targetType)) + return diags + } + + toVal.Set(expandedVal) + + return diags + } + + if targetType.Kind() == reflect.Struct { + expandedVal = expandedVal.Elem() + } + expandedType := expandedVal.Type() + + if !expandedType.AssignableTo(targetType) { + diags.Append(diagCannotBeAssigned(expandedType, targetType)) + return diags + } + + toVal.Set(expandedVal) + + return diags +} + +func diagExpandsToNil(expanderType reflect.Type) diag.ErrorDiagnostic { + return diag.NewErrorDiagnostic( + "Incompatible Types", + "An unexpected error occurred while expanding configuration. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Expanding %q returned nil.", fullTypeName(expanderType)), + ) +} + +func diagExpandedTypeDoesNotImplement(expandedType, targetType reflect.Type) diag.ErrorDiagnostic { + return diag.NewErrorDiagnostic( + "Incompatible Types", + "An unexpected error occurred while expanding configuration. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Type %q does not implement %q.", fullTypeName(expandedType), fullTypeName(targetType)), + ) +} + +func diagCannotBeAssigned(expandedType, targetType reflect.Type) diag.ErrorDiagnostic { + return diag.NewErrorDiagnostic( + "Incompatible Types", + "An unexpected error occurred while expanding configuration. "+ + "This is always an error in the provider. "+ + "Please report the following to the provider developer:\n\n"+ + fmt.Sprintf("Type %q cannot be assigned to %q.", fullTypeName(expandedType), fullTypeName(targetType)), + ) +} diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go index a2762166407..d7c7f142127 100644 --- a/internal/framework/flex/auto_expand_test.go +++ b/internal/framework/flex/auto_expand_test.go @@ -4,7 +4,9 @@ package flex import ( + "bytes" "context" + "reflect" "testing" "time" @@ -12,8 +14,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflogtest" "github.com/hashicorp/terraform-provider-aws/internal/errs" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" ) @@ -21,8 +25,6 @@ import ( func TestExpand(t *testing.T) { t.Parallel() - ctx := context.Background() - testString := "test" testStringResult := "a" @@ -34,25 +36,37 @@ func TestExpand(t *testing.T) { testCases := autoFlexTestCases{ { TestName: "nil Source and Target", - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "target (): invalid, want pointer"), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[, ]"), + }, }, { TestName: "non-pointer Target", Source: TestFlex00{}, Target: 0, - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "target (int): int, want pointer"), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[flex.TestFlex00, int]"), + }, }, { TestName: "non-struct Source", Source: testString, Target: &TestFlex00{}, - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "does not implement attr.Value: string"), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[string, *flex.TestFlex00]"), + }, }, { TestName: "non-struct Target", Source: TestFlex00{}, Target: &testString, - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "does not implement attr.Value: struct"), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[flex.TestFlex00, *string]"), + }, }, { TestName: "types.String to string", @@ -82,7 +96,11 @@ func TestExpand(t *testing.T) { TestName: "does not implement attr.Value Source", Source: &TestFlexAWS01{Field1: "a"}, Target: &TestFlexAWS01{}, - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "does not implement attr.Value: string"), + diag.NewErrorDiagnostic("AutoFlEx", "convert (Field1)"), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[*flex.TestFlexAWS01, *flex.TestFlexAWS01]"), + }, }, { TestName: "single string Source and single string Target", @@ -101,6 +119,15 @@ func TestExpand(t *testing.T) { Source: &TestFlexTF01{Field1: types.StringValue("a")}, Target: &TestFlexAWS03{}, WantTarget: &TestFlexAWS03{}, + expectedLogLines: []map[string]any{ + { + "@level": "info", + "@module": "provider", + "@message": "AutoFlex Expand; incompatible types", + "from": map[string]any{}, + "to": float64(reflect.Int64), + }, + }, }, { TestName: "primtive types Source and primtive types Target", @@ -247,8 +274,8 @@ func TestExpand(t *testing.T) { }, }, { - Context: context.WithValue(ctx, ResourcePrefix, "Intent"), - TestName: "resource name prefix", + ContextFn: func(ctx context.Context) context.Context { return context.WithValue(ctx, ResourcePrefix, "Intent") }, + TestName: "resource name prefix", Source: &TestFlexTF16{ Name: types.StringValue("Ovodoghen"), }, @@ -303,7 +330,7 @@ func TestExpand(t *testing.T) { }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) } func TestExpandGeneric(t *testing.T) { @@ -655,7 +682,7 @@ func TestExpandGeneric(t *testing.T) { }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) } func TestExpandSimpleSingleNestedBlock(t *testing.T) { @@ -701,7 +728,7 @@ func TestExpandSimpleSingleNestedBlock(t *testing.T) { WantTarget: &aws03{Field1: aws01{Field1: aws.String("a"), Field2: 1}}, }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) } func TestExpandComplexSingleNestedBlock(t *testing.T) { @@ -752,7 +779,7 @@ func TestExpandComplexSingleNestedBlock(t *testing.T) { WantTarget: &aws03{Field1: &aws02{Field1: &aws01{Field1: true, Field2: []string{"a", "b"}}}}, }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) } func TestExpandStringEnum(t *testing.T) { @@ -761,7 +788,6 @@ func TestExpandStringEnum(t *testing.T) { var testEnum TestEnum testEnumList := TestEnumList - ctx := context.Background() testCases := autoFlexTestCases{ { TestName: "valid value", @@ -776,7 +802,191 @@ func TestExpandStringEnum(t *testing.T) { WantTarget: &testEnum, }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) +} + +func TestExpandListOfInt64(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + { + TestName: "valid value []int64", + Source: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]int64{}, + WantTarget: &[]int64{1, -1}, + }, + { + TestName: "empty value []int64", + Source: types.ListValueMust(types.Int64Type, []attr.Value{}), + Target: &[]int64{}, + WantTarget: &[]int64{}, + }, + { + TestName: "null value []int64", + Source: types.ListNull(types.Int64Type), + Target: &[]int64{}, + WantTarget: &[]int64{}, + }, + { + TestName: "valid value []*int64", + Source: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]*int64{}, + WantTarget: &[]*int64{aws.Int64(1), aws.Int64(-1)}, + }, + { + TestName: "empty value []*int64", + Source: types.ListValueMust(types.Int64Type, []attr.Value{}), + Target: &[]*int64{}, + WantTarget: &[]*int64{}, + }, + { + TestName: "null value []*int64", + Source: types.ListNull(types.Int64Type), + Target: &[]*int64{}, + WantTarget: &[]*int64{}, + }, + { + TestName: "valid value []int32", + Source: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]int32{}, + WantTarget: &[]int32{1, -1}, + }, + { + TestName: "empty value []int32", + Source: types.ListValueMust(types.Int64Type, []attr.Value{}), + Target: &[]int32{}, + WantTarget: &[]int32{}, + }, + { + TestName: "null value []int32", + Source: types.ListNull(types.Int64Type), + Target: &[]int32{}, + WantTarget: &[]int32{}, + }, + { + TestName: "valid value []*int32", + Source: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]*int32{}, + WantTarget: &[]*int32{aws.Int32(1), aws.Int32(-1)}, + }, + { + TestName: "empty value []*int32", + Source: types.ListValueMust(types.Int64Type, []attr.Value{}), + Target: &[]*int32{}, + WantTarget: &[]*int32{}, + }, + { + TestName: "null value []*int32", + Source: types.ListNull(types.Int64Type), + Target: &[]*int32{}, + WantTarget: &[]*int32{}, + }, + } + runAutoExpandTestCases(t, testCases) +} + +func TestExpandSetOfInt64(t *testing.T) { + t.Parallel() + + testCases := autoFlexTestCases{ + { + TestName: "valid value []int64", + Source: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]int64{}, + WantTarget: &[]int64{1, -1}, + }, + { + TestName: "empty value []int64", + Source: types.SetValueMust(types.Int64Type, []attr.Value{}), + Target: &[]int64{}, + WantTarget: &[]int64{}, + }, + { + TestName: "null value []int64", + Source: types.SetNull(types.Int64Type), + Target: &[]int64{}, + WantTarget: &[]int64{}, + }, + { + TestName: "valid value []*int64", + Source: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]*int64{}, + WantTarget: &[]*int64{aws.Int64(1), aws.Int64(-1)}, + }, + { + TestName: "empty value []*int64", + Source: types.SetValueMust(types.Int64Type, []attr.Value{}), + Target: &[]*int64{}, + WantTarget: &[]*int64{}, + }, + { + TestName: "null value []*int64", + Source: types.SetNull(types.Int64Type), + Target: &[]*int64{}, + WantTarget: &[]*int64{}, + }, + { + TestName: "valid value []int32", + Source: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]int32{}, + WantTarget: &[]int32{1, -1}, + }, + { + TestName: "empty value []int32", + Source: types.SetValueMust(types.Int64Type, []attr.Value{}), + Target: &[]int32{}, + WantTarget: &[]int32{}, + }, + { + TestName: "null value []int32", + Source: types.SetNull(types.Int64Type), + Target: &[]int32{}, + WantTarget: &[]int32{}, + }, + { + TestName: "valid value []*int32", + Source: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + Target: &[]*int32{}, + WantTarget: &[]*int32{aws.Int32(1), aws.Int32(-1)}, + }, + { + TestName: "empty value []*int32", + Source: types.SetValueMust(types.Int64Type, []attr.Value{}), + Target: &[]*int32{}, + WantTarget: &[]*int32{}, + }, + { + TestName: "null value []*int32", + Source: types.SetNull(types.Int64Type), + Target: &[]*int32{}, + WantTarget: &[]*int32{}, + }, + } + runAutoExpandTestCases(t, testCases) } func TestExpandListOfStringEnum(t *testing.T) { @@ -786,7 +996,6 @@ func TestExpandListOfStringEnum(t *testing.T) { var testEnumFoo testEnum = "foo" var testEnumBar testEnum = "bar" - ctx := context.Background() testCases := autoFlexTestCases{ { TestName: "valid value", @@ -810,7 +1019,7 @@ func TestExpandListOfStringEnum(t *testing.T) { WantTarget: &[]testEnum{}, }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) } func TestExpandSetOfStringEnum(t *testing.T) { @@ -820,7 +1029,6 @@ func TestExpandSetOfStringEnum(t *testing.T) { var testEnumFoo testEnum = "foo" var testEnumBar testEnum = "bar" - ctx := context.Background() testCases := autoFlexTestCases{ { TestName: "valid value", @@ -844,7 +1052,7 @@ func TestExpandSetOfStringEnum(t *testing.T) { WantTarget: &[]testEnum{}, }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) } func TestExpandSimpleNestedBlockWithStringEnum(t *testing.T) { @@ -859,7 +1067,6 @@ func TestExpandSimpleNestedBlockWithStringEnum(t *testing.T) { Field2 TestEnum } - ctx := context.Background() testCases := autoFlexTestCases{ { TestName: "single nested valid value", @@ -874,7 +1081,7 @@ func TestExpandSimpleNestedBlockWithStringEnum(t *testing.T) { WantTarget: &aws01{Field1: 1, Field2: ""}, }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) } func TestExpandComplexNestedBlockWithStringEnum(t *testing.T) { @@ -910,7 +1117,7 @@ func TestExpandComplexNestedBlockWithStringEnum(t *testing.T) { WantTarget: &aws01{Field1: 1, Field2: &aws02{Field2: ""}}, }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) } func TestExpandOptions(t *testing.T) { @@ -991,23 +1198,487 @@ func TestExpandOptions(t *testing.T) { }, }, } - runAutoExpandTestCases(ctx, t, testCases) + runAutoExpandTestCases(t, testCases) +} + +func TestExpandInterface(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + var targetInterface testFlexAWSInterfaceInterface + + testCases := autoFlexTestCases{ + { + TestName: "top level", + Source: testFlexTFInterfaceExpander{ + Field1: types.StringValue("value1"), + }, + Target: &targetInterface, + WantTarget: testFlexAWSInterfaceInterfacePtr(&testFlexAWSInterfaceInterfaceImpl{ + AWSField: "value1", + }), + }, + { + TestName: "top level return value does not implement target interface", + Source: testFlexTFInterfaceIncompatibleExpander{ + Field1: types.StringValue("value1"), + }, + Target: &targetInterface, + expectedDiags: diag.Diagnostics{ + diagExpandedTypeDoesNotImplement(reflect.TypeFor[*testFlexAWSInterfaceIncompatibleImpl](), reflect.TypeFor[testFlexAWSInterfaceInterface]()), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[flex.testFlexTFInterfaceIncompatibleExpander, *flex.testFlexAWSInterfaceInterface]"), + }, + }, + { + TestName: "single list Source and single interface Target", + Source: testFlexTFInterfaceListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFInterfaceExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &testFlexAWSInterfaceSingle{}, + WantTarget: &testFlexAWSInterfaceSingle{ + Field1: &testFlexAWSInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + { + TestName: "single list non-Expander Source and single interface Target", + Source: testFlexTFInterfaceListNestedObjectNonExpander{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []TestFlexTF01{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &testFlexAWSInterfaceSingle{}, + WantTarget: &testFlexAWSInterfaceSingle{ + Field1: nil, + }, + expectedLogLines: []map[string]any{ + { + "@level": "info", + "@module": "provider", + "@message": "AutoFlex Expand; incompatible types", + "from": map[string]any{}, + "to": float64(reflect.Interface), + }, + }, + }, + { + TestName: "single set Source and single interface Target", + Source: testFlexTFInterfaceSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []testFlexTFInterfaceExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &testFlexAWSInterfaceSingle{}, + WantTarget: &testFlexAWSInterfaceSingle{ + Field1: &testFlexAWSInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + { + TestName: "empty list Source and empty interface Target", + Source: testFlexTFInterfaceListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFInterfaceExpander{}), + }, + Target: &testFlexAWSInterfaceSlice{}, + WantTarget: &testFlexAWSInterfaceSlice{ + Field1: []testFlexAWSInterfaceInterface{}, + }, + }, + { + TestName: "non-empty list Source and non-empty interface Target", + Source: testFlexTFInterfaceListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFInterfaceExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &testFlexAWSInterfaceSlice{}, + WantTarget: &testFlexAWSInterfaceSlice{ + Field1: []testFlexAWSInterfaceInterface{ + &testFlexAWSInterfaceInterfaceImpl{ + AWSField: "value1", + }, + &testFlexAWSInterfaceInterfaceImpl{ + AWSField: "value2", + }, + }, + }, + }, + { + TestName: "empty set Source and empty interface Target", + Source: testFlexTFInterfaceSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []testFlexTFInterfaceExpander{}), + }, + Target: &testFlexAWSInterfaceSlice{}, + WantTarget: &testFlexAWSInterfaceSlice{ + Field1: []testFlexAWSInterfaceInterface{}, + }, + }, + { + TestName: "non-empty set Source and non-empty interface Target", + Source: testFlexTFInterfaceListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFInterfaceExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &testFlexAWSInterfaceSlice{}, + WantTarget: &testFlexAWSInterfaceSlice{ + Field1: []testFlexAWSInterfaceInterface{ + &testFlexAWSInterfaceInterfaceImpl{ + AWSField: "value1", + }, + &testFlexAWSInterfaceInterfaceImpl{ + AWSField: "value2", + }, + }, + }, + }, + { + TestName: "object value Source and struct Target", + Source: testFlexTFInterfaceObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &testFlexTFInterfaceExpander{ + Field1: types.StringValue("value1"), + }), + }, + Target: &testFlexAWSInterfaceSingle{}, + WantTarget: &testFlexAWSInterfaceSingle{ + Field1: &testFlexAWSInterfaceInterfaceImpl{ + AWSField: "value1", + }, + }, + }, + } + runAutoExpandTestCases(t, testCases) +} + +func testFlexAWSInterfaceInterfacePtr(v testFlexAWSInterfaceInterface) *testFlexAWSInterfaceInterface { // nosemgrep:ci.aws-in-func-name + return &v +} + +func TestExpandExpander(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + testCases := autoFlexTestCases{ + { + TestName: "top level struct Target", + Source: testFlexTFExpander{ + Field1: types.StringValue("value1"), + }, + Target: &testFlexAWSExpander{}, + WantTarget: &testFlexAWSExpander{ + AWSField: "value1", + }, + }, + { + TestName: "top level string Target", + Source: testFlexTFExpanderToString{ + Field1: types.StringValue("value1"), + }, + Target: aws.String(""), + WantTarget: aws.String("value1"), + }, + { + TestName: "top level incompatible struct Target", + Source: testFlexTFExpander{ + Field1: types.StringValue("value1"), + }, + Target: &testFlexAWSExpanderIncompatible{}, + expectedDiags: diag.Diagnostics{ + diagCannotBeAssigned(reflect.TypeFor[testFlexAWSExpander](), reflect.TypeFor[testFlexAWSExpanderIncompatible]()), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[flex.testFlexTFExpander, *flex.testFlexAWSExpanderIncompatible]"), + }, + }, + { + TestName: "top level expands to nil", + Source: testFlexTFExpanderToNil{ + Field1: types.StringValue("value1"), + }, + Target: &testFlexAWSExpander{}, + expectedDiags: diag.Diagnostics{ + diagExpandsToNil(reflect.TypeFor[testFlexTFExpanderToNil]()), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[flex.testFlexTFExpanderToNil, *flex.testFlexAWSExpander]"), + }, + }, + { + TestName: "top level incompatible non-struct Target", + Source: testFlexTFExpanderToString{ + Field1: types.StringValue("value1"), + }, + Target: aws.Int64(0), + expectedDiags: diag.Diagnostics{ + diagCannotBeAssigned(reflect.TypeFor[string](), reflect.TypeFor[int64]()), + diag.NewErrorDiagnostic("AutoFlEx", "Expand[flex.testFlexTFExpanderToString, *int64]"), + }, + }, + { + TestName: "single list Source and single struct Target", + Source: testFlexTFExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &testFlexAWSExpanderSingleStruct{}, + WantTarget: &testFlexAWSExpanderSingleStruct{ + Field1: testFlexAWSExpander{ + AWSField: "value1", + }, + }, + }, + { + TestName: "single set Source and single struct Target", + Source: testFlexTFExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &testFlexAWSExpanderSingleStruct{}, + WantTarget: &testFlexAWSExpanderSingleStruct{ + Field1: testFlexAWSExpander{ + AWSField: "value1", + }, + }, + }, + { + TestName: "single list Source and single *struct Target", + Source: testFlexTFExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &testFlexAWSExpanderSinglePtr{}, + WantTarget: &testFlexAWSExpanderSinglePtr{ + Field1: &testFlexAWSExpander{ + AWSField: "value1", + }, + }, + }, + { + TestName: "single set Source and single *struct Target", + Source: testFlexTFExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{ + { + Field1: types.StringValue("value1"), + }, + }), + }, + Target: &testFlexAWSExpanderSinglePtr{}, + WantTarget: &testFlexAWSExpanderSinglePtr{ + Field1: &testFlexAWSExpander{ + AWSField: "value1", + }, + }, + }, + { + TestName: "empty list Source and empty struct Target", + Source: testFlexTFExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{}), + }, + Target: &testFlexAWSExpanderStructSlice{}, + WantTarget: &testFlexAWSExpanderStructSlice{ + Field1: []testFlexAWSExpander{}, + }, + }, + { + TestName: "non-empty list Source and non-empty struct Target", + Source: testFlexTFExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &testFlexAWSExpanderStructSlice{}, + WantTarget: &testFlexAWSExpanderStructSlice{ + Field1: []testFlexAWSExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + { + TestName: "empty list Source and empty *struct Target", + Source: testFlexTFExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{}), + }, + Target: &testFlexAWSExpanderPtrSlice{}, + WantTarget: &testFlexAWSExpanderPtrSlice{ + Field1: []*testFlexAWSExpander{}, + }, + }, + { + TestName: "non-empty list Source and non-empty *struct Target", + Source: testFlexTFExpanderListNestedObject{ + Field1: fwtypes.NewListNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &testFlexAWSExpanderPtrSlice{}, + WantTarget: &testFlexAWSExpanderPtrSlice{ + Field1: []*testFlexAWSExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + { + TestName: "empty set Source and empty struct Target", + Source: testFlexTFExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{}), + }, + Target: &testFlexAWSExpanderStructSlice{}, + WantTarget: &testFlexAWSExpanderStructSlice{ + Field1: []testFlexAWSExpander{}, + }, + }, + { + TestName: "non-empty set Source and non-empty struct Target", + Source: testFlexTFExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &testFlexAWSExpanderStructSlice{}, + WantTarget: &testFlexAWSExpanderStructSlice{ + Field1: []testFlexAWSExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + { + TestName: "empty set Source and empty *struct Target", + Source: testFlexTFExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{}), + }, + Target: &testFlexAWSExpanderPtrSlice{}, + WantTarget: &testFlexAWSExpanderPtrSlice{ + Field1: []*testFlexAWSExpander{}, + }, + }, + { + TestName: "non-empty set Source and non-empty *struct Target", + Source: testFlexTFExpanderSetNestedObject{ + Field1: fwtypes.NewSetNestedObjectValueOfValueSliceMust(ctx, []testFlexTFExpander{ + { + Field1: types.StringValue("value1"), + }, + { + Field1: types.StringValue("value2"), + }, + }), + }, + Target: &testFlexAWSExpanderPtrSlice{}, + WantTarget: &testFlexAWSExpanderPtrSlice{ + Field1: []*testFlexAWSExpander{ + { + AWSField: "value1", + }, + { + AWSField: "value2", + }, + }, + }, + }, + { + TestName: "object value Source and struct Target", + Source: testFlexTFExpanderObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &testFlexTFExpander{ + Field1: types.StringValue("value1"), + }), + }, + Target: &testFlexAWSExpanderSingleStruct{}, + WantTarget: &testFlexAWSExpanderSingleStruct{ + Field1: testFlexAWSExpander{ + AWSField: "value1", + }, + }, + }, + { + TestName: "object value Source and *struct Target", + Source: testFlexTFExpanderObjectValue{ + Field1: fwtypes.NewObjectValueOfMust(ctx, &testFlexTFExpander{ + Field1: types.StringValue("value1"), + }), + }, + Target: &testFlexAWSExpanderSinglePtr{}, + WantTarget: &testFlexAWSExpanderSinglePtr{ + Field1: &testFlexAWSExpander{ + AWSField: "value1", + }, + }, + }, + } + runAutoExpandTestCases(t, testCases) } type autoFlexTestCase struct { - Context context.Context //nolint:containedctx // testing context use - Options []AutoFlexOptionsFunc - TestName string - Source any - Target any - WantErr bool - WantTarget any - WantDiff bool + ContextFn func(context.Context) context.Context + Options []AutoFlexOptionsFunc + TestName string + Source any + Target any + expectedDiags diag.Diagnostics + expectedLogLines []map[string]any + WantTarget any + WantDiff bool } type autoFlexTestCases []autoFlexTestCase -func runAutoExpandTestCases(ctx context.Context, t *testing.T, testCases autoFlexTestCases) { +func runAutoExpandTestCases(t *testing.T, testCases autoFlexTestCases) { t.Helper() for _, testCase := range testCases { @@ -1015,24 +1686,32 @@ func runAutoExpandTestCases(ctx context.Context, t *testing.T, testCases autoFle t.Run(testCase.TestName, func(t *testing.T) { t.Parallel() - testCtx := ctx //nolint:contextcheck // simplify use of testing context - if testCase.Context != nil { - testCtx = testCase.Context + ctx := context.Background() + if testCase.ContextFn != nil { + ctx = testCase.ContextFn(ctx) } - err := Expand(testCtx, testCase.Source, testCase.Target, testCase.Options...) - gotErr := err != nil + var buf bytes.Buffer + ctx = tflogtest.RootLogger(ctx, &buf) - if gotErr != testCase.WantErr { - t.Errorf("gotErr = %v, wantErr = %v", gotErr, testCase.WantErr) + diags := Expand(ctx, testCase.Source, testCase.Target, testCase.Options...) + + if diff := cmp.Diff(diags, testCase.expectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + lines, err := tflogtest.MultilineJSONDecode(&buf) + if err != nil { + t.Fatalf("Expand: decoding log lines: %s", err) + } + if diff := cmp.Diff(lines, testCase.expectedLogLines); diff != "" { + t.Errorf("unexpected log lines diff (+wanted, -got): %s", diff) } - if gotErr { - if !testCase.WantErr { - t.Errorf("err = %q", err) + if !diags.HasError() { + if diff := cmp.Diff(testCase.Target, testCase.WantTarget); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) } - } else if diff := cmp.Diff(testCase.Target, testCase.WantTarget); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) } diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index a4816e19963..85c81aabda6 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -398,149 +398,90 @@ func (flattener autoFlattener) slice(ctx context.Context, vFrom reflect.Value, t var diags diag.Diagnostics switch tSliceElem := vFrom.Type().Elem(); tSliceElem.Kind() { - case reflect.String: + case reflect.Int32, reflect.Int64: switch tTo := tTo.(type) { case basetypes.ListTypable: // - // []string -> types.List(OfString). + // []int32 or []int64 -> types.List(OfInt64). // - if vFrom.IsNil() { - to, d := tTo.ValueFromList(ctx, types.ListNull(types.StringType)) - diags.Append(d...) - if diags.HasError() { - return diags - } - - vTo.Set(reflect.ValueOf(to)) - return diags - } - - elements := make([]attr.Value, vFrom.Len()) - for i := 0; i < vFrom.Len(); i++ { - elements[i] = types.StringValue(vFrom.Index(i).String()) - } - list, d := types.ListValue(types.StringType, elements) - diags.Append(d...) - if diags.HasError() { - return diags - } + diags.Append(flattener.sliceOfPrimtiveToList(ctx, vFrom, tTo, vTo, types.Int64Type, newInt64ValueFromReflectValue)...) + return diags - to, d := tTo.ValueFromList(ctx, list) - diags.Append(d...) - if diags.HasError() { - return diags - } + case basetypes.SetTypable: + // + // []int32 or []int64 -> types.Set(OfInt64). + // + diags.Append(flattener.sliceOfPrimitiveToSet(ctx, vFrom, tTo, vTo, types.Int64Type, newInt64ValueFromReflectValue)...) + return diags + } - vTo.Set(reflect.ValueOf(to)) + case reflect.String: + switch tTo := tTo.(type) { + case basetypes.ListTypable: + // + // []string -> types.List(OfString). + // + diags.Append(flattener.sliceOfPrimtiveToList(ctx, vFrom, tTo, vTo, types.StringType, newStringValueFromReflectValue)...) return diags case basetypes.SetTypable: // // []string -> types.Set(OfString). // - if vFrom.IsNil() { - to, d := tTo.ValueFromSet(ctx, types.SetNull(types.StringType)) - diags.Append(d...) - if diags.HasError() { - return diags - } + diags.Append(flattener.sliceOfPrimitiveToSet(ctx, vFrom, tTo, vTo, types.StringType, newStringValueFromReflectValue)...) + return diags + } - vTo.Set(reflect.ValueOf(to)) + case reflect.Ptr: + switch tSliceElem.Elem().Kind() { + case reflect.Int32: + switch tTo := tTo.(type) { + case basetypes.ListTypable: + // + // []*int32 -> types.List(OfInt64). + // + diags.Append(flattener.sliceOfPrimtiveToList(ctx, vFrom, tTo, vTo, types.Int64Type, newInt64ValueFromReflectPointerValue)...) return diags - } - elements := make([]attr.Value, vFrom.Len()) - for i := 0; i < vFrom.Len(); i++ { - elements[i] = types.StringValue(vFrom.Index(i).String()) - } - set, d := types.SetValue(types.StringType, elements) - diags.Append(d...) - if diags.HasError() { + case basetypes.SetTypable: + // + // []*int32 -> types.Set(OfInt64). + // + diags.Append(flattener.sliceOfPrimitiveToSet(ctx, vFrom, tTo, vTo, types.Int64Type, newInt64ValueFromReflectPointerValue)...) return diags } - to, d := tTo.ValueFromSet(ctx, set) - diags.Append(d...) - if diags.HasError() { + case reflect.Int64: + switch tTo := tTo.(type) { + case basetypes.ListTypable: + // + // []*int64 -> types.List(OfInt64). + // + diags.Append(flattener.sliceOfPrimtiveToList(ctx, vFrom, tTo, vTo, types.Int64Type, newInt64ValueFromReflectPointerValue)...) return diags - } - vTo.Set(reflect.ValueOf(to)) - return diags - } + case basetypes.SetTypable: + // + // []*int64 -> types.Set(OfInt64). + // + diags.Append(flattener.sliceOfPrimitiveToSet(ctx, vFrom, tTo, vTo, types.Int64Type, newInt64ValueFromReflectPointerValue)...) + return diags + } - case reflect.Ptr: - switch tSliceElem.Elem().Kind() { case reflect.String: switch tTo := tTo.(type) { case basetypes.ListTypable: // // []*string -> types.List(OfString). // - if vFrom.IsNil() { - to, d := tTo.ValueFromList(ctx, types.ListNull(types.StringType)) - diags.Append(d...) - if diags.HasError() { - return diags - } - - vTo.Set(reflect.ValueOf(to)) - return diags - } - - from := vFrom.Interface().([]*string) - elements := make([]attr.Value, len(from)) - for i, v := range from { - elements[i] = types.StringPointerValue(v) - } - list, d := types.ListValue(types.StringType, elements) - diags.Append(d...) - if diags.HasError() { - return diags - } - - to, d := tTo.ValueFromList(ctx, list) - diags.Append(d...) - if diags.HasError() { - return diags - } - - vTo.Set(reflect.ValueOf(to)) + diags.Append(flattener.sliceOfPrimtiveToList(ctx, vFrom, tTo, vTo, types.StringType, newStringValueFromReflectPointerValue)...) return diags case basetypes.SetTypable: // - // []string -> types.Set(OfString). + // []*string -> types.Set(OfString). // - if vFrom.IsNil() { - to, d := tTo.ValueFromSet(ctx, types.SetNull(types.StringType)) - diags.Append(d...) - if diags.HasError() { - return diags - } - - vTo.Set(reflect.ValueOf(to)) - return diags - } - - from := vFrom.Interface().([]*string) - elements := make([]attr.Value, len(from)) - for i, v := range from { - elements[i] = types.StringPointerValue(v) - } - set, d := types.SetValue(types.StringType, elements) - diags.Append(d...) - if diags.HasError() { - return diags - } - - to, d := tTo.ValueFromSet(ctx, set) - diags.Append(d...) - if diags.HasError() { - return diags - } - - vTo.Set(reflect.ValueOf(to)) + diags.Append(flattener.sliceOfPrimitiveToSet(ctx, vFrom, tTo, vTo, types.StringType, newStringValueFromReflectPointerValue)...) return diags } @@ -549,7 +490,7 @@ func (flattener autoFlattener) slice(ctx context.Context, vFrom reflect.Value, t // // []*struct -> types.List(OfObject). // - diags.Append(flattener.sliceOfStructNestedObjectCollection(ctx, vFrom, tTo, vTo)...) + diags.Append(flattener.sliceOfStructToNestedObjectCollection(ctx, vFrom, tTo, vTo)...) return diags } } @@ -559,7 +500,7 @@ func (flattener autoFlattener) slice(ctx context.Context, vFrom reflect.Value, t // // []struct -> types.List(OfObject). // - diags.Append(flattener.sliceOfStructNestedObjectCollection(ctx, vFrom, tTo, vTo)...) + diags.Append(flattener.sliceOfStructToNestedObjectCollection(ctx, vFrom, tTo, vTo)...) return diags } @@ -887,8 +828,78 @@ func (flattener autoFlattener) structToNestedObject(ctx context.Context, vFrom r return diags } -// sliceOfStructNestedObjectCollection copies an AWS API []struct value to a compatible Plugin Framework NestedObjectCollectionValue value. -func (flattener autoFlattener) sliceOfStructNestedObjectCollection(ctx context.Context, vFrom reflect.Value, tTo fwtypes.NestedObjectCollectionType, vTo reflect.Value) diag.Diagnostics { +// sliceOfPrimtiveToList copies an AWS API slice of primitive (or pointer to primitive) value to a compatible Plugin Framework List value. +func (flattener autoFlattener) sliceOfPrimtiveToList(ctx context.Context, vFrom reflect.Value, tTo basetypes.ListTypable, vTo reflect.Value, elementType attr.Type, f attrValueFromReflectValueFunc) diag.Diagnostics { + var diags diag.Diagnostics + + if vFrom.IsNil() { + to, d := tTo.ValueFromList(ctx, types.ListNull(elementType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) + return diags + } + + elements := make([]attr.Value, vFrom.Len()) + for i := 0; i < vFrom.Len(); i++ { + elements[i] = f(vFrom.Index(i)) + } + list, d := types.ListValue(elementType, elements) + diags.Append(d...) + if diags.HasError() { + return diags + } + + to, d := tTo.ValueFromList(ctx, list) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) + return diags +} + +// sliceOfPrimitiveToSet copies an AWS API slice of primitive (or pointer to primitive) value to a compatible Plugin Framework Set value. +func (flattener autoFlattener) sliceOfPrimitiveToSet(ctx context.Context, vFrom reflect.Value, tTo basetypes.SetTypable, vTo reflect.Value, elementType attr.Type, f attrValueFromReflectValueFunc) diag.Diagnostics { + var diags diag.Diagnostics + + if vFrom.IsNil() { + to, d := tTo.ValueFromSet(ctx, types.SetNull(elementType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) + return diags + } + + elements := make([]attr.Value, vFrom.Len()) + for i := 0; i < vFrom.Len(); i++ { + elements[i] = f(vFrom.Index(i)) + } + set, d := types.SetValue(elementType, elements) + diags.Append(d...) + if diags.HasError() { + return diags + } + + to, d := tTo.ValueFromSet(ctx, set) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) + return diags +} + +// sliceOfStructToNestedObjectCollection copies an AWS API []struct value to a compatible Plugin Framework NestedObjectCollectionValue value. +func (flattener autoFlattener) sliceOfStructToNestedObjectCollection(ctx context.Context, vFrom reflect.Value, tTo fwtypes.NestedObjectCollectionType, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics if vFrom.IsNil() { @@ -983,3 +994,29 @@ func blockKeyMapSet(to any, key reflect.Value) diag.Diagnostics { return diags } + +type attrValueFromReflectValueFunc func(reflect.Value) attr.Value + +func newInt64ValueFromReflectValue(v reflect.Value) attr.Value { + return types.Int64Value(v.Int()) +} + +func newInt64ValueFromReflectPointerValue(v reflect.Value) attr.Value { + if v.IsNil() { + return types.Int64Null() + } + + return newInt64ValueFromReflectValue(v.Elem()) +} + +func newStringValueFromReflectValue(v reflect.Value) attr.Value { + return types.StringValue(v.String()) +} + +func newStringValueFromReflectPointerValue(v reflect.Value) attr.Value { + if v.IsNil() { + return types.StringNull() + } + + return newStringValueFromReflectValue(v.Elem()) +} diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index 3ad9e6700d3..7042e7bfe90 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -4,8 +4,10 @@ package flex import ( + "bytes" "context" "fmt" + "reflect" "testing" "time" @@ -14,8 +16,10 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflogtest" "github.com/hashicorp/terraform-provider-aws/internal/errs" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" ) @@ -36,25 +40,37 @@ func TestFlatten(t *testing.T) { testCases := autoFlexTestCases{ { TestName: "nil Source and Target", - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "target (): invalid, want pointer"), + diag.NewErrorDiagnostic("AutoFlEx", "Flatten[, ]"), + }, }, { TestName: "non-pointer Target", Source: TestFlex00{}, Target: 0, - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "target (int): int, want pointer"), + diag.NewErrorDiagnostic("AutoFlEx", "Flatten[flex.TestFlex00, int]"), + }, }, { TestName: "non-struct Source", Source: testString, Target: &TestFlex00{}, - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "does not implement attr.Value: struct"), + diag.NewErrorDiagnostic("AutoFlEx", "Flatten[string, *flex.TestFlex00]"), + }, }, { TestName: "non-struct Target", Source: TestFlex00{}, Target: &testString, - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "does not implement attr.Value: string"), + diag.NewErrorDiagnostic("AutoFlEx", "Flatten[flex.TestFlex00, *string]"), + }, }, { TestName: "json interface Source string Target", @@ -92,7 +108,11 @@ func TestFlatten(t *testing.T) { TestName: "does not implement attr.Value Target", Source: &TestFlexAWS01{Field1: "a"}, Target: &TestFlexAWS01{}, - WantErr: true, + expectedDiags: diag.Diagnostics{ + diag.NewErrorDiagnostic("AutoFlEx", "does not implement attr.Value: string"), + diag.NewErrorDiagnostic("AutoFlEx", "convert (Field1)"), + diag.NewErrorDiagnostic("AutoFlEx", "Flatten[*flex.TestFlexAWS01, *flex.TestFlexAWS01]"), + }, }, { TestName: "single empty string Source and single string Target", @@ -123,6 +143,15 @@ func TestFlatten(t *testing.T) { Source: &TestFlexAWS01{Field1: "a"}, Target: &TestFlexTF02{}, WantTarget: &TestFlexTF02{}, + expectedLogLines: []map[string]any{ + { + "@level": "info", + "@module": "provider", + "@message": "AutoFlex Flatten; incompatible types", + "from": float64(reflect.String), + "to": map[string]any{}, + }, + }, }, { TestName: "zero value primtive types Source and primtive types Target", @@ -375,8 +404,8 @@ func TestFlatten(t *testing.T) { }, }, { - Context: context.WithValue(ctx, ResourcePrefix, "Intent"), - TestName: "resource name prefix", + ContextFn: func(ctx context.Context) context.Context { return context.WithValue(ctx, ResourcePrefix, "Intent") }, + TestName: "resource name prefix", Source: &TestFlexAWS18{ IntentName: aws.String("Ovodoghen"), }, @@ -441,7 +470,7 @@ func TestFlatten(t *testing.T) { }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenGeneric(t *testing.T) { @@ -766,7 +795,7 @@ func TestFlattenGeneric(t *testing.T) { }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenSimpleNestedBlockWithStringEnum(t *testing.T) { @@ -781,7 +810,6 @@ func TestFlattenSimpleNestedBlockWithStringEnum(t *testing.T) { Field2 TestEnum } - ctx := context.Background() testCases := autoFlexTestCases{ { TestName: "single nested valid value", @@ -796,7 +824,7 @@ func TestFlattenSimpleNestedBlockWithStringEnum(t *testing.T) { WantTarget: &tf01{Field1: types.Int64Value(1), Field2: fwtypes.StringEnumNull[TestEnum]()}, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenComplexNestedBlockWithStringEnum(t *testing.T) { @@ -839,7 +867,7 @@ func TestFlattenComplexNestedBlockWithStringEnum(t *testing.T) { WantTarget: &tf02{Field1: types.Int64Value(1), Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{Field2: zero})}, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenSimpleSingleNestedBlock(t *testing.T) { @@ -885,7 +913,7 @@ func TestFlattenSimpleSingleNestedBlock(t *testing.T) { WantTarget: &tf02{Field1: fwtypes.NewObjectValueOfMust[tf01](ctx, &tf01{Field1: types.StringValue("a"), Field2: types.Int64Value(1)})}, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenComplexSingleNestedBlock(t *testing.T) { @@ -937,7 +965,7 @@ func TestFlattenComplexSingleNestedBlock(t *testing.T) { }, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenSimpleNestedBlockWithFloat32(t *testing.T) { @@ -952,7 +980,6 @@ func TestFlattenSimpleNestedBlockWithFloat32(t *testing.T) { Field2 *float32 } - ctx := context.Background() testCases := autoFlexTestCases{ { TestName: "single nested valid value", @@ -961,7 +988,7 @@ func TestFlattenSimpleNestedBlockWithFloat32(t *testing.T) { WantTarget: &tf01{Field1: types.Int64Value(1), Field2: types.Float64Value(0.01)}, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenComplexNestedBlockWithFloat32(t *testing.T) { @@ -993,7 +1020,7 @@ func TestFlattenComplexNestedBlockWithFloat32(t *testing.T) { WantTarget: &tf02{Field1: types.Int64Value(1), Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{Field1: types.Float64Value(1.11), Field2: types.Float64Value(-2.22)})}, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenSimpleNestedBlockWithFloat64(t *testing.T) { @@ -1008,7 +1035,6 @@ func TestFlattenSimpleNestedBlockWithFloat64(t *testing.T) { Field2 *float64 } - ctx := context.Background() testCases := autoFlexTestCases{ { TestName: "single nested valid value", @@ -1017,7 +1043,7 @@ func TestFlattenSimpleNestedBlockWithFloat64(t *testing.T) { WantTarget: &tf01{Field1: types.Int64Value(1), Field2: types.Float64Value(0.01)}, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenComplexNestedBlockWithFloat64(t *testing.T) { @@ -1049,7 +1075,7 @@ func TestFlattenComplexNestedBlockWithFloat64(t *testing.T) { WantTarget: &tf02{Field1: types.Int64Value(1), Field2: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tf01{Field1: types.Float64Value(1.11), Field2: types.Float64Value(-2.22)})}, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } func TestFlattenOptions(t *testing.T) { @@ -1150,10 +1176,10 @@ func TestFlattenOptions(t *testing.T) { }, }, } - runAutoFlattenTestCases(ctx, t, testCases) + runAutoFlattenTestCases(t, testCases) } -func runAutoFlattenTestCases(ctx context.Context, t *testing.T, testCases autoFlexTestCases) { +func runAutoFlattenTestCases(t *testing.T, testCases autoFlexTestCases) { t.Helper() for _, testCase := range testCases { @@ -1161,27 +1187,34 @@ func runAutoFlattenTestCases(ctx context.Context, t *testing.T, testCases autoFl t.Run(testCase.TestName, func(t *testing.T) { t.Parallel() - testCtx := ctx //nolint:contextcheck // simplify use of testing context - if testCase.Context != nil { - testCtx = testCase.Context + ctx := context.Background() + if testCase.ContextFn != nil { + ctx = testCase.ContextFn(ctx) } - err := Flatten(testCtx, testCase.Source, testCase.Target, testCase.Options...) - gotErr := err != nil + var buf bytes.Buffer + ctx = tflogtest.RootLogger(ctx, &buf) + + diags := Flatten(ctx, testCase.Source, testCase.Target, testCase.Options...) - if gotErr != testCase.WantErr { - t.Errorf("gotErr = %v, wantErr = %v", gotErr, testCase.WantErr) + if diff := cmp.Diff(diags, testCase.expectedDiags); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) } - less := func(a, b any) bool { return fmt.Sprintf("%+v", a) < fmt.Sprintf("%+v", b) } + lines, err := tflogtest.MultilineJSONDecode(&buf) + if err != nil { + t.Fatalf("Expand: decoding log lines: %s", err) + } + if diff := cmp.Diff(lines, testCase.expectedLogLines); diff != "" { + t.Errorf("unexpected log lines diff (+wanted, -got): %s", diff) + } - if gotErr { - if !testCase.WantErr { - t.Errorf("err = %q", err) - } - } else if diff := cmp.Diff(testCase.Target, testCase.WantTarget, cmpopts.SortSlices(less)); diff != "" { - if !testCase.WantDiff { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) + if !diags.HasError() { + less := func(a, b any) bool { return fmt.Sprintf("%+v", a) < fmt.Sprintf("%+v", b) } + if diff := cmp.Diff(testCase.Target, testCase.WantTarget, cmpopts.SortSlices(less)); diff != "" { + if !testCase.WantDiff { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } } } }) diff --git a/internal/framework/flex/autoflex.go b/internal/framework/flex/autoflex.go index 136dfc48b8d..6fccf832d16 100644 --- a/internal/framework/flex/autoflex.go +++ b/internal/framework/flex/autoflex.go @@ -10,7 +10,9 @@ import ( "strings" pluralize "github.com/gertd/go-pluralize" + "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" ) type ResourcePrefixCtxKey string @@ -123,6 +125,19 @@ func autoFlexConvertStruct(ctx context.Context, from any, to any, flexer autoFle return diags } + if fromExpander, ok := valFrom.Interface().(Expander); ok { + diags.Append(expandExpander(ctx, fromExpander, valTo)...) + return diags + } + + if valTo.Kind() == reflect.Interface { + tflog.Info(ctx, "AutoFlex Expand; incompatible types", map[string]any{ + "from": valFrom.Type(), + "to": valTo.Kind(), + }) + return diags + } + opts := flexer.getOptions() for i, typFrom := 0, valFrom.Type(); i < typFrom.NumField(); i++ { field := typFrom.Field(i) @@ -155,6 +170,16 @@ func autoFlexConvertStruct(ctx context.Context, from any, to any, flexer autoFle return diags } +func fullTypeName(t reflect.Type) string { + if t.Kind() == reflect.Pointer { + return "*" + fullTypeName(t.Elem()) + } + if path := t.PkgPath(); path != "" { + return fmt.Sprintf("%s.%s", path, t.Name()) + } + return t.Name() +} + func findFieldFuzzy(ctx context.Context, fieldNameFrom string, valTo, valFrom reflect.Value, flexer autoFlexer) reflect.Value { // first precedence is exact match (case sensitive) if v := valTo.FieldByName(fieldNameFrom); v.IsValid() { @@ -221,3 +246,10 @@ func fieldExistsInStruct(field string, str reflect.Value) bool { return false } + +// valueWithElementsAs extends the Value interface for values that have an ElementsAs method. +type valueWithElementsAs interface { + attr.Value + + ElementsAs(context.Context, any, bool) diag.Diagnostics +} diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 63ff9259479..62e7fdc0757 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -4,11 +4,13 @@ package flex import ( + "context" "encoding/json" "time" smithydocument "github.com/aws/smithy-go/document" "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" @@ -361,3 +363,134 @@ type TestFlexAWS21 struct { type TestFlexAWS22 struct { Field1 map[string]map[string]*string } + +type testFlexTFInterfaceListNestedObject struct { + Field1 fwtypes.ListNestedObjectValueOf[testFlexTFInterfaceExpander] `tfsdk:"field1"` +} + +type testFlexTFInterfaceListNestedObjectNonExpander struct { + Field1 fwtypes.ListNestedObjectValueOf[TestFlexTF01] `tfsdk:"field1"` +} + +type testFlexTFInterfaceSetNestedObject struct { + Field1 fwtypes.SetNestedObjectValueOf[testFlexTFInterfaceExpander] `tfsdk:"field1"` +} + +type testFlexTFInterfaceObjectValue struct { + Field1 fwtypes.ObjectValueOf[testFlexTFInterfaceExpander] `tfsdk:"field1"` +} + +type testFlexTFInterfaceExpander struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ Expander = testFlexTFInterfaceExpander{} + +func (t testFlexTFInterfaceExpander) Expand(ctx context.Context) (any, diag.Diagnostics) { + return &testFlexAWSInterfaceInterfaceImpl{ + AWSField: t.Field1.ValueString(), + }, nil +} + +type testFlexTFInterfaceIncompatibleExpander struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ Expander = testFlexTFInterfaceIncompatibleExpander{} + +func (t testFlexTFInterfaceIncompatibleExpander) Expand(ctx context.Context) (any, diag.Diagnostics) { + return &testFlexAWSInterfaceIncompatibleImpl{ + AWSField: t.Field1.ValueString(), + }, nil +} + +type testFlexAWSInterfaceIncompatibleImpl struct { + AWSField string +} + +type testFlexAWSInterfaceSingle struct { + Field1 testFlexAWSInterfaceInterface +} + +type testFlexAWSInterfaceSlice struct { + Field1 []testFlexAWSInterfaceInterface +} + +type testFlexAWSInterfaceInterface interface { + isTestFlexAWSInterfaceInterface() +} + +type testFlexAWSInterfaceInterfaceImpl struct { + AWSField string +} + +var _ testFlexAWSInterfaceInterface = &testFlexAWSInterfaceInterfaceImpl{} + +func (t *testFlexAWSInterfaceInterfaceImpl) isTestFlexAWSInterfaceInterface() {} // nosemgrep:ci.aws-in-func-name + +type testFlexTFExpander struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ Expander = testFlexTFExpander{} + +func (t testFlexTFExpander) Expand(ctx context.Context) (any, diag.Diagnostics) { + return &testFlexAWSExpander{ + AWSField: t.Field1.ValueString(), + }, nil +} + +type testFlexTFExpanderListNestedObject struct { + Field1 fwtypes.ListNestedObjectValueOf[testFlexTFExpander] `tfsdk:"field1"` +} + +type testFlexTFExpanderSetNestedObject struct { + Field1 fwtypes.SetNestedObjectValueOf[testFlexTFExpander] `tfsdk:"field1"` +} + +type testFlexTFExpanderObjectValue struct { + Field1 fwtypes.ObjectValueOf[testFlexTFExpander] `tfsdk:"field1"` +} + +type testFlexTFExpanderToString struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ Expander = testFlexTFExpanderToString{} + +func (t testFlexTFExpanderToString) Expand(ctx context.Context) (any, diag.Diagnostics) { + return t.Field1.ValueString(), nil +} + +type testFlexTFExpanderToNil struct { + Field1 types.String `tfsdk:"field1"` +} + +var _ Expander = testFlexTFExpanderToNil{} + +func (t testFlexTFExpanderToNil) Expand(ctx context.Context) (any, diag.Diagnostics) { + return nil, nil +} + +type testFlexAWSExpander struct { + AWSField string +} +type testFlexAWSExpanderIncompatible struct { + Incompatible int +} + +type testFlexAWSExpanderSingleStruct struct { + Field1 testFlexAWSExpander +} + +type testFlexAWSExpanderSinglePtr struct { + Field1 *testFlexAWSExpander +} + +type testFlexAWSExpanderStructSlice struct { + Field1 []testFlexAWSExpander +} + +type testFlexAWSExpanderPtrSlice struct { + Field1 []*testFlexAWSExpander +} diff --git a/internal/framework/flex/bool.go b/internal/framework/flex/bool.go index 46d34f385f2..33f46092029 100644 --- a/internal/framework/flex/bool.go +++ b/internal/framework/flex/bool.go @@ -21,6 +21,14 @@ func BoolFromFramework(ctx context.Context, v basetypes.BoolValuable) *bool { return output } +func BoolValueFromFramework(ctx context.Context, v basetypes.BoolValuable) bool { + var output bool + + must(Expand(ctx, v, &output)) + + return output +} + // BoolToFramework converts a bool pointer to a Framework Bool value. // A nil bool pointer is converted to a null Bool. func BoolToFramework(ctx context.Context, v *bool) types.Bool { diff --git a/internal/framework/flex/bool_test.go b/internal/framework/flex/bool_test.go index a0ca89faf7f..a2dd5a2d1c0 100644 --- a/internal/framework/flex/bool_test.go +++ b/internal/framework/flex/bool_test.go @@ -49,6 +49,42 @@ func TestBoolFromFramework(t *testing.T) { } } +func TestBoolValueFromFramework(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.Bool + expected bool + } + tests := map[string]testCase{ + "valid bool": { + input: types.BoolValue(true), + expected: true, + }, + "null bool": { + input: types.BoolNull(), + expected: false, + }, + "unknown bool": { + input: types.BoolUnknown(), + expected: false, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.BoolValueFromFramework(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + func TestBoolToFramework(t *testing.T) { t.Parallel() diff --git a/internal/framework/flex/int.go b/internal/framework/flex/int.go index 88ecff87855..df968d35803 100644 --- a/internal/framework/flex/int.go +++ b/internal/framework/flex/int.go @@ -66,6 +66,12 @@ func Int32ValueToFramework(ctx context.Context, v int32) types.Int64 { return output } +// Int32ToFrameworkLegacy converts an int32 pointer to a Framework Int64 value. +// A nil int32 pointer is converted to a zero Int64. +func Int32ToFrameworkLegacy(_ context.Context, v *int32) types.Int64 { + return types.Int64Value(int64(aws.ToInt32(v))) +} + // Int32FromFramework coverts a Framework Int64 value to an int32 pointer. // A null Int64 is converted to a nil int32 pointer. func Int32FromFramework(ctx context.Context, v types.Int64) *int32 { @@ -76,6 +82,19 @@ func Int32FromFramework(ctx context.Context, v types.Int64) *int32 { return output } +func Int32FromFrameworkLegacy(_ context.Context, v types.Int64) *int32 { + if v.IsNull() || v.IsUnknown() { + return nil + } + + i := v.ValueInt64() + if i == 0 { + return nil + } + + return aws.Int32(int32(i)) +} + // Int32ValueFromFramework coverts a Framework Int64 value to an int32 pointer. // A null Int64 is converted to a nil int32 pointer. func Int32ValueFromFramework(ctx context.Context, v types.Int64) int32 { diff --git a/internal/framework/flex/int_test.go b/internal/framework/flex/int_test.go index d91ab87541c..6d19a021d9a 100644 --- a/internal/framework/flex/int_test.go +++ b/internal/framework/flex/int_test.go @@ -161,6 +161,42 @@ func TestInt32ToFramework(t *testing.T) { } } +func TestInt32ToFrameworkLegacy(t *testing.T) { + t.Parallel() + + type testCase struct { + input *int32 + expected types.Int64 + } + tests := map[string]testCase{ + "valid int64": { + input: aws.Int32(42), + expected: types.Int64Value(42), + }, + "zero int64": { + input: aws.Int32(0), + expected: types.Int64Value(0), + }, + "nil int64": { + input: nil, + expected: types.Int64Value(0), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.Int32ToFrameworkLegacy(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + func TestInt32FromFramework(t *testing.T) { t.Parallel() @@ -200,3 +236,43 @@ func TestInt32FromFramework(t *testing.T) { }) } } + +func TestInt32FromFrameworkLegacy(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.Int64 + expected *int32 + } + tests := map[string]testCase{ + "valid int64": { + input: types.Int64Value(42), + expected: aws.Int32(42), + }, + "zero int64": { + input: types.Int64Value(0), + expected: nil, + }, + "null int64": { + input: types.Int64Null(), + expected: nil, + }, + "unknown int64": { + input: types.Int64Unknown(), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.Int32FromFrameworkLegacy(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} diff --git a/internal/framework/flex/list.go b/internal/framework/flex/list.go index 602cf311c00..0c018caed90 100644 --- a/internal/framework/flex/list.go +++ b/internal/framework/flex/list.go @@ -13,6 +13,38 @@ import ( fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" ) +func ExpandFrameworkInt32List(ctx context.Context, v basetypes.ListValuable) []*int32 { + var output []*int32 + + must(Expand(ctx, v, &output)) + + return output +} + +func ExpandFrameworkInt32ValueList(ctx context.Context, v basetypes.ListValuable) []int32 { + var output []int32 + + must(Expand(ctx, v, &output)) + + return output +} + +func ExpandFrameworkInt64List(ctx context.Context, v basetypes.ListValuable) []*int64 { + var output []*int64 + + must(Expand(ctx, v, &output)) + + return output +} + +func ExpandFrameworkInt64ValueList(ctx context.Context, v basetypes.ListValuable) []int64 { + var output []int64 + + must(Expand(ctx, v, &output)) + + return output +} + func ExpandFrameworkStringList(ctx context.Context, v basetypes.ListValuable) []*string { var output []*string @@ -29,6 +61,70 @@ func ExpandFrameworkStringValueList(ctx context.Context, v basetypes.ListValuabl return output } +// FlattenFrameworkInt64List converts a slice of int32 pointers to a framework List value. +// +// A nil slice is converted to a null List. +// An empty slice is converted to a null List. +func FlattenFrameworkInt32List(ctx context.Context, v []*int32) types.List { + if len(v) == 0 { + return types.ListNull(types.Int64Type) + } + + var output types.List + + must(Flatten(ctx, v, &output)) + + return output +} + +// FlattenFrameworkInt64ValueList converts a slice of int32 values to a framework List value. +// +// A nil slice is converted to a null List. +// An empty slice is converted to a null List. +func FlattenFrameworkInt32ValueList[T ~int32](ctx context.Context, v []T) types.List { + if len(v) == 0 { + return types.ListNull(types.Int64Type) + } + + var output types.List + + must(Flatten(ctx, v, &output)) + + return output +} + +// FlattenFrameworkInt64List converts a slice of int64 pointers to a framework List value. +// +// A nil slice is converted to a null List. +// An empty slice is converted to a null List. +func FlattenFrameworkInt64List(ctx context.Context, v []*int64) types.List { + if len(v) == 0 { + return types.ListNull(types.Int64Type) + } + + var output types.List + + must(Flatten(ctx, v, &output)) + + return output +} + +// FlattenFrameworkInt64ValueList converts a slice of int64 values to a framework List value. +// +// A nil slice is converted to a null List. +// An empty slice is converted to a null List. +func FlattenFrameworkInt64ValueList[T ~int64](ctx context.Context, v []T) types.List { + if len(v) == 0 { + return types.ListNull(types.Int64Type) + } + + var output types.List + + must(Flatten(ctx, v, &output)) + + return output +} + // FlattenFrameworkStringList converts a slice of string pointers to a framework List value. // // A nil slice is converted to a null List. diff --git a/internal/framework/flex/list_test.go b/internal/framework/flex/list_test.go index 80ccd430e4c..5239e07269f 100644 --- a/internal/framework/flex/list_test.go +++ b/internal/framework/flex/list_test.go @@ -14,6 +14,202 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" ) +func TestExpandFrameworkInt32List(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.List + expected []*int32 + } + tests := map[string]testCase{ + "null": { + input: types.ListNull(types.Int64Type), + expected: nil, + }, + "unknown": { + input: types.ListUnknown(types.Int64Type), + expected: nil, + }, + "two elements": { + input: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + expected: []*int32{aws.Int32(1), aws.Int32(-1)}, + }, + "zero elements": { + input: types.ListValueMust(types.Int64Type, []attr.Value{}), + expected: []*int32{}, + }, + "invalid element type": { + input: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkInt32List(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestExpandFrameworkInt32ValueList(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.List + expected []int32 + } + tests := map[string]testCase{ + "null": { + input: types.ListNull(types.Int64Type), + expected: nil, + }, + "unknown": { + input: types.ListUnknown(types.Int64Type), + expected: nil, + }, + "two elements": { + input: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + expected: []int32{1, -1}, + }, + "zero elements": { + input: types.ListValueMust(types.Int64Type, []attr.Value{}), + expected: []int32{}, + }, + "invalid element type": { + input: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkInt32ValueList(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestExpandFrameworkInt64List(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.List + expected []*int64 + } + tests := map[string]testCase{ + "null": { + input: types.ListNull(types.Int64Type), + expected: nil, + }, + "unknown": { + input: types.ListUnknown(types.Int64Type), + expected: nil, + }, + "two elements": { + input: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + expected: []*int64{aws.Int64(1), aws.Int64(-1)}, + }, + "zero elements": { + input: types.ListValueMust(types.Int64Type, []attr.Value{}), + expected: []*int64{}, + }, + "invalid element type": { + input: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkInt64List(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestExpandFrameworkInt64ValueList(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.List + expected []int64 + } + tests := map[string]testCase{ + "null": { + input: types.ListNull(types.Int64Type), + expected: nil, + }, + "unknown": { + input: types.ListUnknown(types.Int64Type), + expected: nil, + }, + "two elements": { + input: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + expected: []int64{1, -1}, + }, + "zero elements": { + input: types.ListValueMust(types.Int64Type, []attr.Value{}), + expected: []int64{}, + }, + "invalid element type": { + input: types.ListValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkInt64ValueList(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + func TestExpandFrameworkStringList(t *testing.T) { t.Parallel() @@ -112,6 +308,162 @@ func TestExpandFrameworkStringValueList(t *testing.T) { } } +func TestFlattenFrameworkInt32List(t *testing.T) { + t.Parallel() + + type testCase struct { + input []*int32 + expected types.List + } + tests := map[string]testCase{ + "two elements": { + input: []*int32{aws.Int32(1), aws.Int32(-1)}, + expected: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + }, + "zero elements": { + input: []*int32{}, + expected: types.ListNull(types.Int64Type), + }, + "nil array": { + input: nil, + expected: types.ListNull(types.Int64Type), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkInt32List(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestFlattenFrameworkInt32ValueList(t *testing.T) { + t.Parallel() + + type testCase struct { + input []int32 + expected types.List + } + tests := map[string]testCase{ + "two elements": { + input: []int32{1, -1}, + expected: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + }, + "zero elements": { + input: []int32{}, + expected: types.ListNull(types.Int64Type), + }, + "nil array": { + input: nil, + expected: types.ListNull(types.Int64Type), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkInt32ValueList(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestFlattenFrameworkInt64List(t *testing.T) { + t.Parallel() + + type testCase struct { + input []*int64 + expected types.List + } + tests := map[string]testCase{ + "two elements": { + input: []*int64{aws.Int64(1), aws.Int64(-1)}, + expected: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + }, + "zero elements": { + input: []*int64{}, + expected: types.ListNull(types.Int64Type), + }, + "nil array": { + input: nil, + expected: types.ListNull(types.Int64Type), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkInt64List(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestFlattenFrameworkInt64ValueList(t *testing.T) { + t.Parallel() + + type testCase struct { + input []int64 + expected types.List + } + tests := map[string]testCase{ + "two elements": { + input: []int64{1, -1}, + expected: types.ListValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + }, + "zero elements": { + input: []int64{}, + expected: types.ListNull(types.Int64Type), + }, + "nil array": { + input: nil, + expected: types.ListNull(types.Int64Type), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkInt64ValueList(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + func TestFlattenFrameworkStringList(t *testing.T) { t.Parallel() diff --git a/internal/framework/flex/set.go b/internal/framework/flex/set.go index 08b55107ba2..ce48d7644c7 100644 --- a/internal/framework/flex/set.go +++ b/internal/framework/flex/set.go @@ -10,9 +10,42 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" itypes "github.com/hashicorp/terraform-provider-aws/internal/types" ) +func ExpandFrameworkInt32Set(ctx context.Context, v basetypes.SetValuable) []*int32 { + var output []*int32 + + must(Expand(ctx, v, &output)) + + return output +} + +func ExpandFrameworkInt32ValueSet(ctx context.Context, v basetypes.SetValuable) []int32 { + var output []int32 + + must(Expand(ctx, v, &output)) + + return output +} + +func ExpandFrameworkInt64Set(ctx context.Context, v basetypes.SetValuable) []*int64 { + var output []*int64 + + must(Expand(ctx, v, &output)) + + return output +} + +func ExpandFrameworkInt64ValueSet(ctx context.Context, v basetypes.SetValuable) []int64 { + var output []int64 + + must(Expand(ctx, v, &output)) + + return output +} + func ExpandFrameworkStringSet(ctx context.Context, v basetypes.SetValuable) []*string { var output []*string @@ -29,6 +62,78 @@ func ExpandFrameworkStringValueSet(ctx context.Context, v basetypes.SetValuable) return output } +func ExpandFrameworkStringyValueSet[T ~string](ctx context.Context, v basetypes.SetValuable) itypes.Set[T] { + vs := ExpandFrameworkStringValueSet(ctx, v) + if vs == nil { + return nil + } + return tfslices.ApplyToAll(vs, func(s string) T { return T(s) }) +} + +// FlattenFrameworkInt64Set converts a slice of int32 pointers to a framework Set value. +// +// A nil slice is converted to a null Set. +// An empty slice is converted to a null Set. +func FlattenFrameworkInt32Set(ctx context.Context, v []*int32) types.Set { + if len(v) == 0 { + return types.SetNull(types.Int64Type) + } + + var output types.Set + + must(Flatten(ctx, v, &output)) + + return output +} + +// FlattenFrameworkInt64ValueSet converts a slice of int32 values to a framework Set value. +// +// A nil slice is converted to a null Set. +// An empty slice is converted to a null Set. +func FlattenFrameworkInt32ValueSet[T ~int32](ctx context.Context, v []T) types.Set { + if len(v) == 0 { + return types.SetNull(types.Int64Type) + } + + var output types.Set + + must(Flatten(ctx, v, &output)) + + return output +} + +// FlattenFrameworkInt64Set converts a slice of int64 pointers to a framework Set value. +// +// A nil slice is converted to a null Set. +// An empty slice is converted to a null Set. +func FlattenFrameworkInt64Set(ctx context.Context, v []*int64) types.Set { + if len(v) == 0 { + return types.SetNull(types.Int64Type) + } + + var output types.Set + + must(Flatten(ctx, v, &output)) + + return output +} + +// FlattenFrameworkInt64ValueSet converts a slice of int64 values to a framework Set value. +// +// A nil slice is converted to a null Set. +// An empty slice is converted to a null Set. +func FlattenFrameworkInt64ValueSet[T ~int64](ctx context.Context, v []T) types.Set { + if len(v) == 0 { + return types.SetNull(types.Int64Type) + } + + var output types.Set + + must(Flatten(ctx, v, &output)) + + return output +} + // FlattenFrameworkStringSet converts a slice of string pointers to a framework Set value. // // A nil slice is converted to a null Set. diff --git a/internal/framework/flex/set_test.go b/internal/framework/flex/set_test.go index 74afc03eb14..453ffc9060f 100644 --- a/internal/framework/flex/set_test.go +++ b/internal/framework/flex/set_test.go @@ -15,6 +15,202 @@ import ( itypes "github.com/hashicorp/terraform-provider-aws/internal/types" ) +func TestExpandFrameworkInt32Set(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.Set + expected []*int32 + } + tests := map[string]testCase{ + "null": { + input: types.SetNull(types.Int64Type), + expected: nil, + }, + "unknown": { + input: types.SetUnknown(types.Int64Type), + expected: nil, + }, + "two elements": { + input: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + expected: []*int32{aws.Int32(1), aws.Int32(-1)}, + }, + "zero elements": { + input: types.SetValueMust(types.Int64Type, []attr.Value{}), + expected: []*int32{}, + }, + "invalid element type": { + input: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkInt32Set(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestExpandFrameworkInt32ValueSet(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.Set + expected []int32 + } + tests := map[string]testCase{ + "null": { + input: types.SetNull(types.Int64Type), + expected: nil, + }, + "unknown": { + input: types.SetUnknown(types.Int64Type), + expected: nil, + }, + "two elements": { + input: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + expected: []int32{1, -1}, + }, + "zero elements": { + input: types.SetValueMust(types.Int64Type, []attr.Value{}), + expected: []int32{}, + }, + "invalid element type": { + input: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkInt32ValueSet(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestExpandFrameworkInt64Set(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.Set + expected []*int64 + } + tests := map[string]testCase{ + "null": { + input: types.SetNull(types.Int64Type), + expected: nil, + }, + "unknown": { + input: types.SetUnknown(types.Int64Type), + expected: nil, + }, + "two elements": { + input: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + expected: []*int64{aws.Int64(1), aws.Int64(-1)}, + }, + "zero elements": { + input: types.SetValueMust(types.Int64Type, []attr.Value{}), + expected: []*int64{}, + }, + "invalid element type": { + input: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkInt64Set(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestExpandFrameworkInt64ValueSet(t *testing.T) { + t.Parallel() + + type testCase struct { + input types.Set + expected []int64 + } + tests := map[string]testCase{ + "null": { + input: types.SetNull(types.Int64Type), + expected: nil, + }, + "unknown": { + input: types.SetUnknown(types.Int64Type), + expected: nil, + }, + "two elements": { + input: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + expected: []int64{1, -1}, + }, + "zero elements": { + input: types.SetValueMust(types.Int64Type, []attr.Value{}), + expected: []int64{}, + }, + "invalid element type": { + input: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkInt64ValueSet(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + func TestExpandFrameworkStringSet(t *testing.T) { t.Parallel() @@ -113,6 +309,254 @@ func TestExpandFrameworkStringValueSet(t *testing.T) { } } +func TestExpandFrameworkStringyValueSet(t *testing.T) { + t.Parallel() + + type testEnum string + var testVal1 testEnum = "testVal1" + var testVal2 testEnum = "testVal2" + + type testCase struct { + input types.Set + expected itypes.Set[testEnum] + } + tests := map[string]testCase{ + "null": { + input: types.SetNull(types.StringType), + expected: nil, + }, + "unknown": { + input: types.SetUnknown(types.StringType), + expected: nil, + }, + "two elements": { + input: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue(string(testVal1)), + types.StringValue(string(testVal2)), + }), + expected: []testEnum{testVal1, testVal2}, + }, + "zero elements": { + input: types.SetValueMust(types.StringType, []attr.Value{}), + expected: []testEnum{}, + }, + "invalid element type": { + input: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(42), + }), + expected: nil, + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.ExpandFrameworkStringyValueSet[testEnum](context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestFlattenFrameworkInt32Set(t *testing.T) { + t.Parallel() + + type testCase struct { + input []*int32 + expected types.Set + } + tests := map[string]testCase{ + "two elements": { + input: []*int32{aws.Int32(1), aws.Int32(-1)}, + expected: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + }, + "zero elements": { + input: []*int32{}, + expected: types.SetNull(types.Int64Type), + }, + "nil array": { + input: nil, + expected: types.SetNull(types.Int64Type), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkInt32Set(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestFlattenFrameworkInt32ValueSet(t *testing.T) { + t.Parallel() + + type testCase struct { + input []int32 + expected types.Set + } + tests := map[string]testCase{ + "two elements": { + input: []int32{1, -1}, + expected: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + }, + "zero elements": { + input: []int32{}, + expected: types.SetNull(types.Int64Type), + }, + "nil array": { + input: nil, + expected: types.SetNull(types.Int64Type), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkInt32ValueSet(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestFlattenFrameworkInt64Set(t *testing.T) { + t.Parallel() + + type testCase struct { + input []*int64 + expected types.Set + } + tests := map[string]testCase{ + "two elements": { + input: []*int64{aws.Int64(1), aws.Int64(-1)}, + expected: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + }, + "zero elements": { + input: []*int64{}, + expected: types.SetNull(types.Int64Type), + }, + "nil array": { + input: nil, + expected: types.SetNull(types.Int64Type), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkInt64Set(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestFlattenFrameworkInt64ValueSet(t *testing.T) { + t.Parallel() + + type testCase struct { + input []int64 + expected types.Set + } + tests := map[string]testCase{ + "two elements": { + input: []int64{1, -1}, + expected: types.SetValueMust(types.Int64Type, []attr.Value{ + types.Int64Value(1), + types.Int64Value(-1), + }), + }, + "zero elements": { + input: []int64{}, + expected: types.SetNull(types.Int64Type), + }, + "nil array": { + input: nil, + expected: types.SetNull(types.Int64Type), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkInt64ValueSet(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + +func TestFlattenFrameworkStringSet(t *testing.T) { + t.Parallel() + + type testCase struct { + input []*string + expected types.Set + } + tests := map[string]testCase{ + "two elements": { + input: []*string{aws.String("GET"), aws.String("HEAD")}, + expected: types.SetValueMust(types.StringType, []attr.Value{ + types.StringValue("GET"), + types.StringValue("HEAD"), + }), + }, + "zero elements": { + input: []*string{}, + expected: types.SetNull(types.StringType), + }, + "nil array": { + input: nil, + expected: types.SetNull(types.StringType), + }, + } + + for name, test := range tests { + name, test := name, test + t.Run(name, func(t *testing.T) { + t.Parallel() + + got := flex.FlattenFrameworkStringSet(context.Background(), test.input) + + if diff := cmp.Diff(got, test.expected); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} + func TestFlattenFrameworkStringValueSet(t *testing.T) { t.Parallel() diff --git a/internal/framework/types/string_enum.go b/internal/framework/types/string_enum.go index 163de73ba19..4e87548e3de 100644 --- a/internal/framework/types/string_enum.go +++ b/internal/framework/types/string_enum.go @@ -6,6 +6,7 @@ package types import ( "context" "fmt" + "strings" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/attr/xattr" @@ -122,6 +123,10 @@ func StringEnumValue[T enum.Valueser[T]](value T) StringEnum[T] { return StringEnum[T]{StringValue: basetypes.NewStringValue(string(value))} } +func StringEnumValueToUpper[T enum.Valueser[T]](value T) StringEnum[T] { + return StringEnumValue(T(strings.ToUpper(string(value)))) +} + func (v StringEnum[T]) Equal(o attr.Value) bool { other, ok := o.(StringEnum[T]) diff --git a/internal/generate/checknames/main.go b/internal/generate/checknames/main.go index 110ba6b3b75..071feb67b1f 100644 --- a/internal/generate/checknames/main.go +++ b/internal/generate/checknames/main.go @@ -72,24 +72,16 @@ func main() { log.Fatalf("in service data, line %d, for service %s, if Exclude is blank, either AWSCLIV2CommandNoDashes or GoV2Package must have values", i+lineOffset, l.HumanFriendly()) } - if l.ProviderPackageActual() != "" && l.ProviderPackageCorrect() == "" { - log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual can't be non-blank if ProviderPackageCorrect is blank", i+lineOffset, l.HumanFriendly()) - } - - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" && !l.Exclude() { - log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual and ProviderPackageCorrect cannot both be blank unless Exclude is non-blank", i+lineOffset, l.HumanFriendly()) - } - - if l.ProviderPackageCorrect() != "" && l.ProviderPackageActual() == l.ProviderPackageCorrect() { - log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual should only be used if different from ProviderPackageCorrect", i+lineOffset, l.HumanFriendly()) - } - packageToUse := l.ProviderPackageCorrect() if l.ProviderPackageActual() != "" { packageToUse = l.ProviderPackageActual() } + if l.ResourcePrefixCorrect() != "" && l.ResourcePrefixCorrect() != fmt.Sprintf("aws_%s_", l.ProviderPackageCorrect()) { + log.Fatalf("in service data, line %d, for service %s, ResourcePrefixCorrect should be aws__, where is ProviderPackageCorrect", i+lineOffset, l.HumanFriendly()) + } + if p := l.Aliases(); len(p) > 0 && packageToUse != "" { for _, v := range p { if v == packageToUse { @@ -114,10 +106,6 @@ func main() { log.Fatalf("in service data, line %d, for service %s, ResourcePrefixCorrect must have a value if Exclude is blank", i+lineOffset, l.HumanFriendly()) } - if l.ResourcePrefixCorrect() != "" && l.ResourcePrefixCorrect() != fmt.Sprintf("aws_%s_", l.ProviderPackageCorrect()) { - log.Fatalf("in service data, line %d, for service %s, ResourcePrefixCorrect should be aws__, where is ProviderPackageCorrect", i+lineOffset, l.HumanFriendly()) - } - if l.ResourcePrefixCorrect() != "" && l.ResourcePrefixActual() == l.ResourcePrefixCorrect() { log.Fatalf("in service data, line %d, for service %s, ResourcePrefixActual should not be the same as ResourcePrefixCorrect, set ResourcePrefixActual to blank", i+lineOffset, l.HumanFriendly()) } diff --git a/internal/generate/listpages/main.go b/internal/generate/listpages/main.go index 30c8fce24e9..2801954a75d 100644 --- a/internal/generate/listpages/main.go +++ b/internal/generate/listpages/main.go @@ -19,7 +19,7 @@ import ( "sort" "strings" - "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" "golang.org/x/tools/go/packages" ) @@ -52,6 +52,10 @@ func main() { flag.Usage = usage flag.Parse() + if *sdkVersion != sdkV1 && *sdkVersion != sdkV2 { + log.Fatalf("AWSSDKVersion must be either 1 or 2, got %d", *sdkVersion) + } + if (*inputPaginator != "" && *outputPaginator == "") || (*inputPaginator == "" && *outputPaginator != "") { log.Fatal("both InputPaginator and OutputPaginator must be specified if one is") } @@ -71,12 +75,16 @@ func main() { servicePackage := os.Getenv("GOPACKAGE") log.SetPrefix(fmt.Sprintf("generate/listpage: %s: ", servicePackage)) - awsService, err := names.AWSGoV1Package(servicePackage) - + service, err := data.LookupService(servicePackage) if err != nil { log.Fatalf("encountered: %s", err) } + awsService := service.GoV1Package() + if *sdkVersion == sdkV2 { + awsService = service.GoV2Package() + } + functions := strings.Split(*listOps, ",") sort.Strings(functions) @@ -92,10 +100,8 @@ func main() { } sourcePackage := fmt.Sprintf("github.com/aws/aws-sdk-go/service/%[1]s", awsService) - if *sdkVersion == sdkV2 { sourcePackage = fmt.Sprintf("github.com/aws/aws-sdk-go-v2/service/%[1]s", awsService) - } g.parsePackage(sourcePackage) @@ -107,14 +113,14 @@ func main() { SourceIntfPackage: fmt.Sprintf("github.com/aws/aws-sdk-go/service/%[1]s/%[1]siface", awsService), }, *sdkVersion) - awsUpper, err := names.AWSGoClientTypeName(servicePackage, *sdkVersion) + clientTypeName := service.ClientTypeName(*sdkVersion) if err != nil { log.Fatalf("encountered: %s", err) } for _, functionName := range functions { - g.generateFunction(functionName, awsService, awsUpper, *export, *sdkVersion, *v2Suffix) + g.generateFunction(functionName, awsService, clientTypeName, *export, *sdkVersion, *v2Suffix) } src := g.format() @@ -204,7 +210,7 @@ type FuncSpec struct { V2Suffix bool } -func (g *Generator) generateFunction(functionName, awsService, awsServiceUpper string, export bool, sdkVersion int, v2Suffix bool) { +func (g *Generator) generateFunction(functionName, awsService, clientTypeName string, export bool, sdkVersion int, v2Suffix bool) { var function *ast.FuncDecl for _, file := range g.pkg.files { @@ -233,14 +239,14 @@ func (g *Generator) generateFunction(functionName, awsService, awsServiceUpper s funcName = fmt.Sprintf("%s%s", strings.ToLower(funcName[0:1]), funcName[1:]) } - recvType := fmt.Sprintf("%[1]siface.%[2]sAPI", awsService, awsServiceUpper) + recvType := fmt.Sprintf("%[1]siface.%[2]sAPI", awsService, clientTypeName) if sdkVersion == sdkV2 { - recvType = fmt.Sprintf("*%[1]s.%[2]s", awsService, awsServiceUpper) + recvType = fmt.Sprintf("*%[1]s.%[2]s", awsService, clientTypeName) } funcSpec := FuncSpec{ - Name: fixUpFuncName(funcName, awsServiceUpper), + Name: fixUpFuncName(funcName, clientTypeName), AWSName: function.Name.Name, RecvType: recvType, ParamType: g.expandTypeField(function.Type.Params, sdkVersion, false), // Assumes there is a single input parameter diff --git a/internal/generate/namesconsts/main.go b/internal/generate/namesconsts/main.go index 8ba214f1b1b..69e84d81d1e 100644 --- a/internal/generate/namesconsts/main.go +++ b/internal/generate/namesconsts/main.go @@ -11,11 +11,17 @@ import ( "sort" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) type TemplateData struct { - Services []names.ServiceNameUpper + Services []ServiceDatum +} + +type ServiceDatum struct { + ProviderPackage string + ProviderNameUpper string + SDKID string } func main() { @@ -26,8 +32,30 @@ func main() { g.Infof("Generating names/%s", filename) - td := TemplateData{ - Services: names.ServiceNamesUpper(), + data, err := data.ReadAllServiceData() + + if err != nil { + g.Fatalf("error reading service data: %s", err) + } + + td := TemplateData{} + + for _, l := range data { + if l.Exclude() { + continue + } + + if l.NotImplemented() && !l.EndpointOnly() { + continue + } + + sd := ServiceDatum{ + ProviderPackage: l.ProviderPackage(), + ProviderNameUpper: l.ProviderNameUpper(), + SDKID: l.SDKID(), + } + + td.Services = append(td.Services, sd) } sort.Slice(td.Services, func(i, j int) bool { diff --git a/internal/generate/serviceendpointtests/file.gtpl b/internal/generate/serviceendpointtests/file.gtpl index 90138808b3a..ac57f2580a9 100644 --- a/internal/generate/serviceendpointtests/file.gtpl +++ b/internal/generate/serviceendpointtests/file.gtpl @@ -10,9 +10,8 @@ import ( {{- end }} "fmt" "maps" - {{- if and (ne .GoV1Package "") (eq .GoV2Package "") }} + "net" "net/url" - {{- end }} "os" "path/filepath" "strings" @@ -133,7 +132,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -474,7 +473,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -517,7 +516,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S {{ end -}} } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { {{- if ne .GoV2Package "" }} r := {{ .GoV2Package }}_sdkv2.NewDefaultEndpointResolverV2() @@ -525,14 +524,14 @@ func defaultEndpoint(region string) string { Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil {{ else }} r := endpoints.DefaultResolver() @@ -543,7 +542,7 @@ func defaultEndpoint(region string) string { {{- end -}} ) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -552,11 +551,11 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil {{ end -}} } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { {{- if ne .GoV2Package "" }} r := {{ .GoV2Package }}_sdkv2.NewDefaultEndpointResolverV2() @@ -565,14 +564,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil {{ else }} r := endpoints.DefaultResolver() @@ -581,7 +580,7 @@ func defaultFIPSEndpoint(region string) string { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -590,7 +589,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil {{ end -}} } @@ -716,16 +715,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { - return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + + return caseExpectations{ + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/generate/servicepackage/endpoint_resolver.go.gtpl b/internal/generate/servicepackage/endpoint_resolver.go.gtpl new file mode 100644 index 00000000000..bdc4c6e3294 --- /dev/null +++ b/internal/generate/servicepackage/endpoint_resolver.go.gtpl @@ -0,0 +1,156 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package {{ .ProviderPackage }} + +import ( + "context" + "fmt" + "net" +{{ if .ClientSDKV1 -}} + "net/url" +{{ end }} + +{{- if .ClientSDKV1 }} + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" +{{- end }} +{{- if .ClientSDKV2 }} + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + {{ .GoV2Package }}_sdkv2 "github.com/aws/aws-sdk-go-v2/service/{{ .GoV2Package }}" + smithyendpoints "github.com/aws/smithy-go/endpoints" +{{- end }} + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +{{ if .ClientSDKV1 }} +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} +{{ end }} + +{{ if .ClientSDKV2 }} +var _ {{ .GoV2Package }}_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver {{ .GoV2Package }}_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: {{ .GoV2Package }}_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params {{ .GoV2Package }}_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up {{ .GoV2Package }} endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*{{ .GoV2Package }}_sdkv2.Options) { + return func(o *{{ .GoV2Package }}_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} +{{ end }} diff --git a/internal/generate/servicepackage/file.tmpl b/internal/generate/servicepackage/file.gtpl similarity index 79% rename from internal/generate/servicepackage/file.tmpl rename to internal/generate/servicepackage/file.gtpl index b23304204a8..403cf19244b 100644 --- a/internal/generate/servicepackage/file.tmpl +++ b/internal/generate/servicepackage/file.gtpl @@ -1,24 +1,21 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package {{ .ProviderPackage }} import ( "context" -{{if not .SkipClientGenerate }} - {{- if eq .SDKVersion "1" "1,2" }} +{{ if .GenerateClient }} + {{- if .ClientSDKV1 }} aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" {{ .GoV1Package }}_sdkv1 "github.com/aws/aws-sdk-go/service/{{ .GoV1Package }}" + "github.com/hashicorp/terraform-plugin-log/tflog" {{- end }} - {{- if eq .SDKVersion "2" "1,2" }} + {{- if .ClientSDKV2 }} aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" {{ .GoV2Package }}_sdkv2 "github.com/aws/aws-sdk-go-v2/service/{{ .GoV2Package }}" {{- end }} - {{- if .SDKVersion }} - "github.com/hashicorp/terraform-plugin-log/tflog" - {{- end }} {{- end }} "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" @@ -131,8 +128,8 @@ func (p *servicePackage) ServicePackageName() string { {{- end }} } -{{- if not .SkipClientGenerate }} - {{if eq .SDKVersion "1" "1,2" }} +{{- if .GenerateClient }} + {{ if .ClientSDKV1 }} // NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*{{ .GoV1Package }}_sdkv1.{{ .GoV1ClientTypeName }}, error) { sess := config[names.AttrSession].(*session_sdkv1.Session) @@ -144,35 +141,23 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*{ "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return {{ .GoV1Package }}_sdkv1.New(sess.Copy(&cfg)), nil } {{- end }} - {{if eq .SDKVersion "2" "1,2" }} + {{ if .ClientSDKV2 }} // NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*{{ .GoV2Package }}_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return {{ .GoV2Package }}_sdkv2.NewFromConfig(cfg, func(o *{{ .GoV2Package }}_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if (o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled) { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return {{ .GoV2Package }}_sdkv2.NewFromConfig(cfg, + {{ .GoV2Package }}_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } {{- end }} {{- end }} diff --git a/internal/generate/servicepackage/main.go b/internal/generate/servicepackage/main.go index 869c497dc7e..ac628cf3d67 100644 --- a/internal/generate/servicepackage/main.go +++ b/internal/generate/servicepackage/main.go @@ -26,7 +26,8 @@ import ( func main() { const ( - filename = `service_package_gen.go` + filename = `service_package_gen.go` + endpointResolverFilenamne = `service_endpoint_resolver_gen.go` ) g := common.NewGenerator() @@ -66,8 +67,10 @@ func main() { } s := ServiceDatum{ - SkipClientGenerate: l.SkipClientGenerate(), + GenerateClient: !l.SkipClientGenerate(), + ClientSDKV1: l.ClientSDKV1(), GoV1Package: l.GoV1Package(), + ClientSDKV2: l.ClientSDKV2(), GoV2Package: l.GoV2Package(), ProviderPackage: p, ProviderNameUpper: l.ProviderNameUpper(), @@ -77,7 +80,6 @@ func main() { SDKResources: v.sdkResources, } - s.SDKVersion = l.SDKVersion() if l.ClientSDKV1() { s.GoV1ClientTypeName = l.GoV1ClientTypeName() } @@ -99,6 +101,20 @@ func main() { g.Fatalf("generating file (%s): %s", filename, err) } + if p != "meta" { + g.Infof("Generating internal/service/%s/%s", servicePackage, endpointResolverFilenamne) + + d = g.NewGoFileDestination(endpointResolverFilenamne) + + if err := d.WriteTemplate("endpointresolver", endpointResolverTmpl, s); err != nil { + g.Fatalf("error generating %s endpoint resolver: %s", p, err) + } + + if err := d.Write(); err != nil { + g.Fatalf("generating file (%s): %s", endpointResolverFilenamne, err) + } + } + break } } @@ -112,10 +128,11 @@ type ResourceDatum struct { } type ServiceDatum struct { - SkipClientGenerate bool - SDKVersion string // AWS SDK for Go version ("1", "2" or "1,2") + GenerateClient bool + ClientSDKV1 bool GoV1Package string // AWS SDK for Go v1 package name GoV1ClientTypeName string // AWS SDK for Go v1 client type name + ClientSDKV2 bool GoV2Package string // AWS SDK for Go v2 package name ProviderPackage string ProviderNameUpper string @@ -125,9 +142,12 @@ type ServiceDatum struct { SDKResources map[string]ResourceDatum } -//go:embed file.tmpl +//go:embed file.gtpl var tmpl string +//go:embed endpoint_resolver.go.gtpl +var endpointResolverTmpl string + // Annotation processing. var ( annotation = regexache.MustCompile(`^//\s*@([0-9A-Za-z]+)(\(([^)]*)\))?\s*$`) diff --git a/internal/generate/tags/main.go b/internal/generate/tags/main.go index 0b0a6e96825..fe3604af006 100644 --- a/internal/generate/tags/main.go +++ b/internal/generate/tags/main.go @@ -17,7 +17,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/generate/common" v1 "github.com/hashicorp/terraform-provider-aws/internal/generate/tags/templates/v1" v2 "github.com/hashicorp/terraform-provider-aws/internal/generate/tags/templates/v2" - "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) const ( @@ -259,28 +259,18 @@ func main() { g.Infof("Generating internal/service/%s/%s", servicePackage, filename) - awsPkg, err := names.AWSGoPackage(*sdkServicePackage, *sdkVersion) + service, err := data.LookupService(*sdkServicePackage) if err != nil { g.Fatalf("encountered: %s", err) } + awsPkg := service.GoPackageName(*sdkVersion) + var awsIntfPkg string if *sdkVersion == sdkV1 && (*getTag || *listTags || *updateTags) { awsIntfPkg = fmt.Sprintf("%[1]s/%[1]siface", awsPkg) } - clientTypeName, err := names.AWSGoClientTypeName(*sdkServicePackage, *sdkVersion) - - if err != nil { - g.Fatalf("encountered: %s", err) - } - - providerNameUpper, err := names.ProviderNameUpper(*sdkServicePackage) - - if err != nil { - g.Fatalf("encountered: %s", err) - } - createTagsFunc := *createTagsFunc if *createTags && !*updateTags { g.Infof("CreateTags only valid with UpdateTags") @@ -289,6 +279,7 @@ func main() { createTagsFunc = "" } + clientTypeName := service.ClientTypeName(*sdkVersion) var clientType string if *sdkVersion == sdkV1 { clientType = fmt.Sprintf("%siface.%sAPI", awsPkg, clientTypeName) @@ -307,6 +298,8 @@ func main() { } } + providerNameUpper := service.ProviderNameUpper() + templateData := TemplateData{ AWSService: awsPkg, AWSServiceIfacePackage: awsIntfPkg, diff --git a/internal/generate/tags/templates/v2/service_tags_map_body.tmpl b/internal/generate/tags/templates/v2/service_tags_map_body.tmpl index c640fb00507..6eda7ea8870 100644 --- a/internal/generate/tags/templates/v2/service_tags_map_body.tmpl +++ b/internal/generate/tags/templates/v2/service_tags_map_body.tmpl @@ -31,11 +31,11 @@ func {{ .SetTagsOutFunc }}(ctx context.Context, tags map[string]*string) { {{- if ne .CreateTagsFunc "" }} // {{ .CreateTagsFunc }} creates {{ .ServicePackage }} service tags for new resources. -func {{ .CreateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string, tags map[string]*string) error { +func {{ .CreateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string, tags map[string]*string, optFns ...func(*{{ .AWSService }}.Options)) error { if len(tags) == 0 { return nil } - return {{ .UpdateTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, nil, tags) + return {{ .UpdateTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, nil, tags, optFns...) } {{- end }} diff --git a/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl b/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl index eab63923533..08de40230c5 100644 --- a/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl +++ b/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl @@ -22,11 +22,11 @@ func listOfMap(tags tftags.KeyValueTags) []any { {{- if .TagKeyType }} // TagKeys returns {{ .ServicePackage }} service tag keys. -func TagKeys(tags tftags.KeyValueTags) []*{{ .AWSService }}.{{ .TagKeyType }} { - result := make([]*{{ .AWSService }}.{{ .TagKeyType }}, 0, len(tags)) +func TagKeys(tags tftags.KeyValueTags) []awstypes.{{ .TagKeyType }} { + result := make([]awstypes.{{ .TagKeyType }}, 0, len(tags)) for k := range tags.Map() { - tagKey := &{{ .AWSService }}.{{ .TagKeyType }}{ + tagKey := awstypes.{{ .TagKeyType }}{ {{ .TagTypeKeyElem }}: aws.String(k), } @@ -247,11 +247,11 @@ func {{ .SetTagsOutFunc }}(ctx context.Context, tags []awstypes.{{ .TagType }}) {{- if ne .CreateTagsFunc "" }} // {{ .CreateTagsFunc }} creates {{ .ServicePackage }} service tags for new resources. -func {{ .CreateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string, tags []awstypes.{{ .TagType }}) error { +func {{ .CreateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string, tags []awstypes.{{ .TagType }}, optFns ...func(*{{ .AWSService }}.Options)) error { if len(tags) == 0 { return nil } - return {{ .UpdateTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, nil, {{ .KeyValueTagsFunc }}(ctx, tags)) + return {{ .UpdateTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, nil, {{ .KeyValueTagsFunc }}(ctx, tags), optFns...) } {{- end }} diff --git a/internal/generate/tags/templates/v2/service_tags_value_map_body.tmpl b/internal/generate/tags/templates/v2/service_tags_value_map_body.tmpl index 289196b29e9..0066f93d36d 100644 --- a/internal/generate/tags/templates/v2/service_tags_value_map_body.tmpl +++ b/internal/generate/tags/templates/v2/service_tags_value_map_body.tmpl @@ -31,11 +31,11 @@ func {{ .SetTagsOutFunc }}(ctx context.Context, tags map[string]string) { {{- if ne .CreateTagsFunc "" }} // {{ .CreateTagsFunc }} creates {{ .ServicePackage }} service tags for new resources. -func {{ .CreateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string, tags map[string]string) error { +func {{ .CreateTagsFunc }}(ctx context.Context, conn {{ .ClientType }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string, tags map[string]string, optFns ...func(*{{ .AWSService }}.Options)) error { if len(tags) == 0 { return nil } - return {{ .UpdateTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, nil, tags) + return {{ .UpdateTagsFunc }}(ctx, conn, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}, nil, tags, optFns...) } {{- end }} diff --git a/internal/generate/teamcity/README.md b/internal/generate/teamcity/README.md index c283cf4f44e..0f6738e82f2 100644 --- a/internal/generate/teamcity/README.md +++ b/internal/generate/teamcity/README.md @@ -7,7 +7,7 @@ Can be invoked using either `make gen` along with all other generators or ## Configuration -The generator creates a TeamCity build configuration for each service listed in `names/data/names_data.csv`. +The generator creates a TeamCity build configuration for each service listed in `names/data/names_data.hcl`. By default, the service acceptance tests do not use the VPC Lock and use the default parallelism. These setting can be overridden for each service by adding a `service` entry in the file `acctest_services.hcl`. diff --git a/internal/generate/teamcity/acctest_services.hcl b/internal/generate/teamcity/acctest_services.hcl index 1c1fa22786c..51e8a8155df 100644 --- a/internal/generate/teamcity/acctest_services.hcl +++ b/internal/generate/teamcity/acctest_services.hcl @@ -1,6 +1,11 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: MPL-2.0 +service "amp" { + # The maximum scrapers per region quota is fixed at 10 + parallelism = 10 +} + service "appautoscaling" { vpc_lock = true } diff --git a/internal/provider/service_packages_gen.go b/internal/provider/service_packages_gen.go index 12129ceb431..9bbf6133b5c 100644 --- a/internal/provider/service_packages_gen.go +++ b/internal/provider/service_packages_gen.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/appflow" "github.com/hashicorp/terraform-provider-aws/internal/service/appintegrations" "github.com/hashicorp/terraform-provider-aws/internal/service/applicationinsights" + "github.com/hashicorp/terraform-provider-aws/internal/service/applicationsignals" "github.com/hashicorp/terraform-provider-aws/internal/service/appmesh" "github.com/hashicorp/terraform-provider-aws/internal/service/apprunner" "github.com/hashicorp/terraform-provider-aws/internal/service/appstream" @@ -70,6 +71,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/costoptimizationhub" "github.com/hashicorp/terraform-provider-aws/internal/service/cur" "github.com/hashicorp/terraform-provider-aws/internal/service/customerprofiles" + "github.com/hashicorp/terraform-provider-aws/internal/service/databrew" "github.com/hashicorp/terraform-provider-aws/internal/service/dataexchange" "github.com/hashicorp/terraform-provider-aws/internal/service/datapipeline" "github.com/hashicorp/terraform-provider-aws/internal/service/datasync" @@ -164,6 +166,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/service/neptunegraph" "github.com/hashicorp/terraform-provider-aws/internal/service/networkfirewall" "github.com/hashicorp/terraform-provider-aws/internal/service/networkmanager" + "github.com/hashicorp/terraform-provider-aws/internal/service/networkmonitor" "github.com/hashicorp/terraform-provider-aws/internal/service/oam" "github.com/hashicorp/terraform-provider-aws/internal/service/opensearch" "github.com/hashicorp/terraform-provider-aws/internal/service/opensearchserverless" @@ -262,6 +265,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { appflow.ServicePackage(ctx), appintegrations.ServicePackage(ctx), applicationinsights.ServicePackage(ctx), + applicationsignals.ServicePackage(ctx), appmesh.ServicePackage(ctx), apprunner.ServicePackage(ctx), appstream.ServicePackage(ctx), @@ -311,6 +315,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { costoptimizationhub.ServicePackage(ctx), cur.ServicePackage(ctx), customerprofiles.ServicePackage(ctx), + databrew.ServicePackage(ctx), dataexchange.ServicePackage(ctx), datapipeline.ServicePackage(ctx), datasync.ServicePackage(ctx), @@ -405,6 +410,7 @@ func servicePackages(ctx context.Context) []conns.ServicePackage { neptunegraph.ServicePackage(ctx), networkfirewall.ServicePackage(ctx), networkmanager.ServicePackage(ctx), + networkmonitor.ServicePackage(ctx), oam.ServicePackage(ctx), opensearch.ServicePackage(ctx), opensearchserverless.ServicePackage(ctx), diff --git a/internal/retry/wrappers.go b/internal/retry/wrappers.go index 421f824d668..b8c591beaba 100644 --- a/internal/retry/wrappers.go +++ b/internal/retry/wrappers.go @@ -82,7 +82,7 @@ func (o operation[T]) UntilFoundN(continuousTargetOccurence int) operation[T] { return true, nil } - if tfresource.NotFound(err) { + if tfresource.NotFound(err) { // nosemgrep:ci.semgrep.errors.notfound-without-err-checks targetOccurence = 0 return true, err @@ -100,7 +100,7 @@ func (o operation[T]) UntilNotFound() operation[T] { return true, nil } - if tfresource.NotFound(err) { + if tfresource.NotFound(err) { // nosemgrep:ci.semgrep.errors.notfound-without-err-checks return false, nil } diff --git a/internal/sdkv2/state.go b/internal/sdkv2/state.go new file mode 100644 index 00000000000..fe2bb36a2b0 --- /dev/null +++ b/internal/sdkv2/state.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sdkv2 + +import ( + "strings" +) + +// ToUpperSchemaStateFunc converts a string value to uppercase before storing it in state. +func ToUpperSchemaStateFunc(v interface{}) string { + return strings.ToUpper(v.(string)) +} diff --git a/internal/sdkv2/state_test.go b/internal/sdkv2/state_test.go new file mode 100644 index 00000000000..dcb7d943271 --- /dev/null +++ b/internal/sdkv2/state_test.go @@ -0,0 +1,23 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sdkv2 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestToUpperSchemaStateFunc(t *testing.T) { + t.Parallel() + + var input interface{} = "in-state" + want := "IN-STATE" + + got := ToUpperSchemaStateFunc(input) + + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("unexpected diff (+want, -got): %s", diff) + } +} diff --git a/internal/service/accessanalyzer/service_endpoint_resolver_gen.go b/internal/service/accessanalyzer/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..e79f9750e15 --- /dev/null +++ b/internal/service/accessanalyzer/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package accessanalyzer + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + accessanalyzer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/accessanalyzer" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ accessanalyzer_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver accessanalyzer_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: accessanalyzer_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params accessanalyzer_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*accessanalyzer_sdkv2.Options) { + return func(o *accessanalyzer_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/accessanalyzer/service_endpoints_gen_test.go b/internal/service/accessanalyzer/service_endpoints_gen_test.go index 25b71095dc1..8a28013d643 100644 --- a/internal/service/accessanalyzer/service_endpoints_gen_test.go +++ b/internal/service/accessanalyzer/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := accessanalyzer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), accessanalyzer_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := accessanalyzer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), accessanalyzer_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/accessanalyzer/service_package_gen.go b/internal/service/accessanalyzer/service_package_gen.go index c3b7c09378c..15dbaba5022 100644 --- a/internal/service/accessanalyzer/service_package_gen.go +++ b/internal/service/accessanalyzer/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package accessanalyzer @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" accessanalyzer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/accessanalyzer" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -52,19 +51,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*accessanalyzer_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return accessanalyzer_sdkv2.NewFromConfig(cfg, func(o *accessanalyzer_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return accessanalyzer_sdkv2.NewFromConfig(cfg, + accessanalyzer_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/account/service_endpoint_resolver_gen.go b/internal/service/account/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..72cf9078368 --- /dev/null +++ b/internal/service/account/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package account + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + account_sdkv2 "github.com/aws/aws-sdk-go-v2/service/account" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ account_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver account_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: account_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params account_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up account endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*account_sdkv2.Options) { + return func(o *account_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/account/service_endpoints_gen_test.go b/internal/service/account/service_endpoints_gen_test.go index 538f2f1adde..ff2d5701f60 100644 --- a/internal/service/account/service_endpoints_gen_test.go +++ b/internal/service/account/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := account_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), account_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := account_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), account_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/account/service_package_gen.go b/internal/service/account/service_package_gen.go index 9281a81c75f..56a0e7b1589 100644 --- a/internal/service/account/service_package_gen.go +++ b/internal/service/account/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package account @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" account_sdkv2 "github.com/aws/aws-sdk-go-v2/service/account" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -53,19 +52,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*account_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return account_sdkv2.NewFromConfig(cfg, func(o *account_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return account_sdkv2.NewFromConfig(cfg, + account_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/acm/service_endpoint_resolver_gen.go b/internal/service/acm/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..7480d799fe9 --- /dev/null +++ b/internal/service/acm/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package acm + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + acm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/acm" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ acm_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver acm_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: acm_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params acm_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up acm endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*acm_sdkv2.Options) { + return func(o *acm_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/acm/service_endpoints_gen_test.go b/internal/service/acm/service_endpoints_gen_test.go index 5e15d53a283..ec8b64d6107 100644 --- a/internal/service/acm/service_endpoints_gen_test.go +++ b/internal/service/acm/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := acm_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), acm_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := acm_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), acm_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/acm/service_package_gen.go b/internal/service/acm/service_package_gen.go index cd3c01f9c99..d76d5533294 100644 --- a/internal/service/acm/service_package_gen.go +++ b/internal/service/acm/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package acm @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" acm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/acm" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -61,19 +60,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*acm_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return acm_sdkv2.NewFromConfig(cfg, func(o *acm_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return acm_sdkv2.NewFromConfig(cfg, + acm_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/acmpca/service_endpoint_resolver_gen.go b/internal/service/acmpca/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..4b66c791a76 --- /dev/null +++ b/internal/service/acmpca/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package acmpca + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + acmpca_sdkv2 "github.com/aws/aws-sdk-go-v2/service/acmpca" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ acmpca_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver acmpca_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: acmpca_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params acmpca_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up acmpca endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*acmpca_sdkv2.Options) { + return func(o *acmpca_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/acmpca/service_endpoints_gen_test.go b/internal/service/acmpca/service_endpoints_gen_test.go index 0b342af1776..e544ebd74eb 100644 --- a/internal/service/acmpca/service_endpoints_gen_test.go +++ b/internal/service/acmpca/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := acmpca_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), acmpca_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := acmpca_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), acmpca_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/acmpca/service_package_gen.go b/internal/service/acmpca/service_package_gen.go index c9d94f134f9..0673ef94f28 100644 --- a/internal/service/acmpca/service_package_gen.go +++ b/internal/service/acmpca/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package acmpca @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" acmpca_sdkv2 "github.com/aws/aws-sdk-go-v2/service/acmpca" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -82,19 +81,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*acmpca_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return acmpca_sdkv2.NewFromConfig(cfg, func(o *acmpca_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return acmpca_sdkv2.NewFromConfig(cfg, + acmpca_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/amp/service_endpoint_resolver_gen.go b/internal/service/amp/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..6f695368d70 --- /dev/null +++ b/internal/service/amp/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package amp + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + amp_sdkv2 "github.com/aws/aws-sdk-go-v2/service/amp" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ amp_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver amp_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: amp_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params amp_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up amp endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*amp_sdkv2.Options) { + return func(o *amp_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/amp/service_endpoints_gen_test.go b/internal/service/amp/service_endpoints_gen_test.go index abc1eec3005..0301d8d856c 100644 --- a/internal/service/amp/service_endpoints_gen_test.go +++ b/internal/service/amp/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -93,7 +95,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -333,7 +335,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -354,24 +356,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := amp_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), amp_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := amp_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), amp_sdkv2.EndpointParameters{ @@ -379,14 +381,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -478,16 +480,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/amp/service_package_gen.go b/internal/service/amp/service_package_gen.go index 6d39c2a9ec4..ee3a5812e4e 100644 --- a/internal/service/amp/service_package_gen.go +++ b/internal/service/amp/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package amp @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" amp_sdkv2 "github.com/aws/aws-sdk-go-v2/service/amp" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -78,19 +77,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*amp_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return amp_sdkv2.NewFromConfig(cfg, func(o *amp_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return amp_sdkv2.NewFromConfig(cfg, + amp_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/amplify/amplify_test.go b/internal/service/amplify/amplify_test.go index 55dbecf7589..d3f75f60ca7 100644 --- a/internal/service/amplify/amplify_test.go +++ b/internal/service/amplify/amplify_test.go @@ -45,9 +45,10 @@ func TestAccAmplify_serial(t *testing.T) { "OptionalArguments": testAccBranch_OptionalArguments, }, "DomainAssociation": { - acctest.CtBasic: testAccDomainAssociation_basic, - acctest.CtDisappears: testAccDomainAssociation_disappears, - "update": testAccDomainAssociation_update, + acctest.CtBasic: testAccDomainAssociation_basic, + "certificateSettings": testAccDomainAssociation_certificateSettings, + acctest.CtDisappears: testAccDomainAssociation_disappears, + "update": testAccDomainAssociation_update, }, "Webhook": { acctest.CtBasic: testAccWebhook_basic, diff --git a/internal/service/amplify/domain_association.go b/internal/service/amplify/domain_association.go index 6a44cd703a0..6c95f903278 100644 --- a/internal/service/amplify/domain_association.go +++ b/internal/service/amplify/domain_association.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -48,6 +49,29 @@ func resourceDomainAssociation() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "certificate_settings": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "certificate_verification_dns_record": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrType: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.CertificateType](), + }, + "custom_certificate_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, "certificate_verification_dns_record": { Type: schema.TypeString, Computed: true, @@ -112,6 +136,10 @@ func resourceDomainAssociationCreate(ctx context.Context, d *schema.ResourceData SubDomainSettings: expandSubDomainSettings(d.Get("sub_domain").(*schema.Set).List()), } + if v, ok := d.GetOk("certificate_settings"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.CertificateSettings = expandCertificateSettings(v.([]interface{})[0].(map[string]interface{})) + } + _, err := conn.CreateDomainAssociation(ctx, input) if err != nil { @@ -162,6 +190,9 @@ func resourceDomainAssociationRead(ctx context.Context, d *schema.ResourceData, if err := d.Set("sub_domain", flattenSubDomains(domainAssociation.SubDomains)); err != nil { return sdkdiag.AppendErrorf(diags, "setting sub_domain: %s", err) } + if err := d.Set("certificate_settings", flattenCertificateSettings(domainAssociation.Certificate)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting certificate_settings: %s", err) + } return diags } @@ -181,6 +212,10 @@ func resourceDomainAssociationUpdate(ctx context.Context, d *schema.ResourceData DomainName: aws.String(domainName), } + if d.HasChange("certificate_settings") { + input.CertificateSettings = expandCertificateSettings(d.Get("certificate_settings").([]interface{})[0].(map[string]interface{})) + } + if d.HasChange("enable_auto_sub_domain") { input.EnableAutoSubDomain = aws.Bool(d.Get("enable_auto_sub_domain").(bool)) } @@ -385,6 +420,41 @@ func expandSubDomainSettings(tfList []interface{}) []types.SubDomainSetting { return apiObjects } +func expandCertificateSettings(tfMap map[string]interface{}) *types.CertificateSettings { + if tfMap == nil { + return nil + } + + apiObject := &types.CertificateSettings{ + Type: types.CertificateType(tfMap[names.AttrType].(string)), + } + + if v, ok := tfMap["custom_certificate_arn"].(string); ok { + apiObject.CustomCertificateArn = aws.String(v) + } + + return apiObject +} + +func flattenCertificateSettings(apiObject *types.Certificate) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + tfMap[names.AttrType] = apiObject.Type + + if v := apiObject.CertificateVerificationDNSRecord; v != nil { + tfMap["certificate_verification_dns_record"] = aws.ToString(v) + } + if v := apiObject.CustomCertificateArn; v != nil { + tfMap["custom_certificate_arn"] = aws.ToString(v) + } + + return []interface{}{tfMap} +} + func flattenSubDomain(apiObject types.SubDomain) map[string]interface{} { tfMap := map[string]interface{}{} diff --git a/internal/service/amplify/domain_association_test.go b/internal/service/amplify/domain_association_test.go index 1fa83aa546e..8fef1ff8897 100644 --- a/internal/service/amplify/domain_association_test.go +++ b/internal/service/amplify/domain_association_test.go @@ -156,6 +156,45 @@ func testAccDomainAssociation_update(t *testing.T) { }) } +func testAccDomainAssociation_certificateSettings(t *testing.T) { + ctx := acctest.Context(t) + key := "AMPLIFY_DOMAIN_NAME" + domainName := os.Getenv(key) + if domainName == "" { + t.Skipf("Environment variable %s is not set", key) + } + + var domain types.DomainAssociation + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_amplify_domain_association.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AmplifyServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDomainAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDomainAssociationConfig_certificateSettings(rName, domainName, false, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainAssociationExists(ctx, resourceName, &domain), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "amplify", regexache.MustCompile(`apps/.+/domains/.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrDomainName, domainName), + resource.TestCheckResourceAttr(resourceName, "certificate_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "certificate_settings.0.type", "AMPLIFY_MANAGED"), + resource.TestCheckResourceAttrSet(resourceName, "certificate_settings.0.certificate_verification_dns_record"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"wait_for_verification"}, + }, + }, + }) +} + func testAccCheckDomainAssociationExists(ctx context.Context, n string, v *types.DomainAssociation) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -264,3 +303,33 @@ resource "aws_amplify_domain_association" "test" { } `, rName, domainName, enableAutoSubDomain, waitForVerification) } + +func testAccDomainAssociationConfig_certificateSettings(rName, domainName string, enableAutoSubDomain bool, waitForVerification bool) string { + return fmt.Sprintf(` +resource "aws_amplify_app" "test" { + name = %[1]q +} + +resource "aws_amplify_branch" "test" { + app_id = aws_amplify_app.test.id + branch_name = %[1]q +} + +resource "aws_amplify_domain_association" "test" { + app_id = aws_amplify_app.test.id + domain_name = %[2]q + + sub_domain { + branch_name = aws_amplify_branch.test.branch_name + prefix = "" + } + + certificate_settings { + type = "AMPLIFY_MANAGED" + } + + enable_auto_sub_domain = %[3]t + wait_for_verification = %[4]t +} +`, rName, domainName, enableAutoSubDomain, waitForVerification) +} diff --git a/internal/service/amplify/service_endpoint_resolver_gen.go b/internal/service/amplify/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..e9240af9bec --- /dev/null +++ b/internal/service/amplify/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package amplify + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + amplify_sdkv2 "github.com/aws/aws-sdk-go-v2/service/amplify" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ amplify_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver amplify_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: amplify_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params amplify_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up amplify endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*amplify_sdkv2.Options) { + return func(o *amplify_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/amplify/service_endpoints_gen_test.go b/internal/service/amplify/service_endpoints_gen_test.go index e8987a44bd5..2ca6f8c9f15 100644 --- a/internal/service/amplify/service_endpoints_gen_test.go +++ b/internal/service/amplify/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := amplify_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), amplify_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := amplify_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), amplify_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/amplify/service_package_gen.go b/internal/service/amplify/service_package_gen.go index ba0930a242b..8031aa179ce 100644 --- a/internal/service/amplify/service_package_gen.go +++ b/internal/service/amplify/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package amplify @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" amplify_sdkv2 "github.com/aws/aws-sdk-go-v2/service/amplify" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -71,19 +70,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*amplify_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return amplify_sdkv2.NewFromConfig(cfg, func(o *amplify_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return amplify_sdkv2.NewFromConfig(cfg, + amplify_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/apigateway/integration.go b/internal/service/apigateway/integration.go index 16726119f54..276bd303e8f 100644 --- a/internal/service/apigateway/integration.go +++ b/internal/service/apigateway/integration.go @@ -128,7 +128,7 @@ func resourceIntegration() *schema.Resource { "timeout_milliseconds": { Type: schema.TypeInt, Optional: true, - ValidateFunc: validation.IntBetween(50, 29000), + ValidateFunc: validation.IntBetween(50, 300000), Default: 29000, }, "tls_config": { diff --git a/internal/service/apigateway/service_endpoint_resolver_gen.go b/internal/service/apigateway/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ef2fdd642c0 --- /dev/null +++ b/internal/service/apigateway/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package apigateway + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + apigateway_sdkv2 "github.com/aws/aws-sdk-go-v2/service/apigateway" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ apigateway_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver apigateway_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: apigateway_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params apigateway_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up apigateway endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*apigateway_sdkv2.Options) { + return func(o *apigateway_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/apigateway/service_endpoints_gen_test.go b/internal/service/apigateway/service_endpoints_gen_test.go index 9d22208db68..b0dcb43c2ad 100644 --- a/internal/service/apigateway/service_endpoints_gen_test.go +++ b/internal/service/apigateway/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := apigateway_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), apigateway_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := apigateway_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), apigateway_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/apigateway/service_package.go b/internal/service/apigateway/service_package.go index a48bb411c7c..cbc956a44bb 100644 --- a/internal/service/apigateway/service_package.go +++ b/internal/service/apigateway/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/apigateway" "github.com/aws/aws-sdk-go-v2/service/apigateway/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,27 +19,19 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*apigateway.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return apigateway.NewFromConfig(cfg, func(o *apigateway.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - // Many operations can return an error such as: - // ConflictException: Unable to complete operation due to concurrent modification. Please try again later. - // Handle them all globally for the service client. - if errs.IsAErrorMessageContains[*types.ConflictException](err, "try again later") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return apigateway.NewFromConfig(cfg, + apigateway.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *apigateway.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + // Many operations can return an error such as: + // ConflictException: Unable to complete operation due to concurrent modification. Please try again later. + // Handle them all globally for the service client. + if errs.IsAErrorMessageContains[*types.ConflictException](err, "try again later") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/apigateway/service_package_gen.go b/internal/service/apigateway/service_package_gen.go index ee26dadb07e..0c9dbd98479 100644 --- a/internal/service/apigateway/service_package_gen.go +++ b/internal/service/apigateway/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package apigateway diff --git a/internal/service/apigatewayv2/service_endpoint_resolver_gen.go b/internal/service/apigatewayv2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..17adefe840e --- /dev/null +++ b/internal/service/apigatewayv2/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package apigatewayv2 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + apigatewayv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/apigatewayv2" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ apigatewayv2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver apigatewayv2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: apigatewayv2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params apigatewayv2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up apigatewayv2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*apigatewayv2_sdkv2.Options) { + return func(o *apigatewayv2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/apigatewayv2/service_endpoints_gen_test.go b/internal/service/apigatewayv2/service_endpoints_gen_test.go index 92564f0fc33..1b29d667399 100644 --- a/internal/service/apigatewayv2/service_endpoints_gen_test.go +++ b/internal/service/apigatewayv2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := apigatewayv2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), apigatewayv2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := apigatewayv2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), apigatewayv2_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/apigatewayv2/service_package.go b/internal/service/apigatewayv2/service_package.go index 47d0582b72c..2eeade0b917 100644 --- a/internal/service/apigatewayv2/service_package.go +++ b/internal/service/apigatewayv2/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/apigatewayv2" awstypes "github.com/aws/aws-sdk-go-v2/service/apigatewayv2/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,24 +19,16 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*apigatewayv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return apigatewayv2.NewFromConfig(cfg, func(o *apigatewayv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*awstypes.ConflictException](err, "try again later") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return apigatewayv2.NewFromConfig(cfg, + apigatewayv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *apigatewayv2.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.ConflictException](err, "try again later") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/apigatewayv2/service_package_gen.go b/internal/service/apigatewayv2/service_package_gen.go index 72d2363996a..53b3048bbf2 100644 --- a/internal/service/apigatewayv2/service_package_gen.go +++ b/internal/service/apigatewayv2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package apigatewayv2 diff --git a/internal/service/appautoscaling/service_endpoint_resolver_gen.go b/internal/service/appautoscaling/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..2cf14a255e7 --- /dev/null +++ b/internal/service/appautoscaling/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package appautoscaling + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + applicationautoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ applicationautoscaling_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver applicationautoscaling_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: applicationautoscaling_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params applicationautoscaling_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up applicationautoscaling endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*applicationautoscaling_sdkv2.Options) { + return func(o *applicationautoscaling_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/appautoscaling/service_endpoints_gen_test.go b/internal/service/appautoscaling/service_endpoints_gen_test.go index 2f142fbba0a..989b8233bb1 100644 --- a/internal/service/appautoscaling/service_endpoints_gen_test.go +++ b/internal/service/appautoscaling/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -92,7 +94,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -275,7 +277,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -296,24 +298,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := applicationautoscaling_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), applicationautoscaling_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := applicationautoscaling_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), applicationautoscaling_sdkv2.EndpointParameters{ @@ -321,14 +323,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -411,16 +413,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/appautoscaling/service_package_gen.go b/internal/service/appautoscaling/service_package_gen.go index 47ce740919d..ff76ed4fb5c 100644 --- a/internal/service/appautoscaling/service_package_gen.go +++ b/internal/service/appautoscaling/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package appautoscaling @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" applicationautoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/applicationautoscaling" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -57,19 +56,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*applicationautoscaling_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return applicationautoscaling_sdkv2.NewFromConfig(cfg, func(o *applicationautoscaling_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return applicationautoscaling_sdkv2.NewFromConfig(cfg, + applicationautoscaling_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/appconfig/service_endpoint_resolver_gen.go b/internal/service/appconfig/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..b4d977b4c89 --- /dev/null +++ b/internal/service/appconfig/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package appconfig + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + appconfig_sdkv2 "github.com/aws/aws-sdk-go-v2/service/appconfig" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ appconfig_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver appconfig_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: appconfig_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params appconfig_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up appconfig endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*appconfig_sdkv2.Options) { + return func(o *appconfig_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/appconfig/service_endpoints_gen_test.go b/internal/service/appconfig/service_endpoints_gen_test.go index 92b91c136e5..b0b61c36938 100644 --- a/internal/service/appconfig/service_endpoints_gen_test.go +++ b/internal/service/appconfig/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := appconfig_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), appconfig_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := appconfig_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), appconfig_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/appconfig/service_package_gen.go b/internal/service/appconfig/service_package_gen.go index 2a17bf9bdec..eb833c78857 100644 --- a/internal/service/appconfig/service_package_gen.go +++ b/internal/service/appconfig/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package appconfig @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" appconfig_sdkv2 "github.com/aws/aws-sdk-go-v2/service/appconfig" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -123,19 +122,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*appconfig_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return appconfig_sdkv2.NewFromConfig(cfg, func(o *appconfig_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return appconfig_sdkv2.NewFromConfig(cfg, + appconfig_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/appfabric/app_authorization.go b/internal/service/appfabric/app_authorization.go index 8b8f2091ec3..07bafd3e846 100644 --- a/internal/service/appfabric/app_authorization.go +++ b/internal/service/appfabric/app_authorization.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" @@ -25,7 +26,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" @@ -50,17 +50,13 @@ func newAppAuthorizationResource(_ context.Context) (resource.ResourceWithConfig return r, nil } -const ( - ResNameAppAuthorization = "App Authorization" -) - type appAuthorizationResource struct { framework.ResourceWithConfigure framework.WithTimeouts framework.WithImportByID } -func (r *appAuthorizationResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { +func (*appAuthorizationResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { response.TypeName = "aws_appfabric_app_authorization" } @@ -93,9 +89,6 @@ func (r *appAuthorizationResource) Schema(ctx context.Context, request resource. }, "auth_url": schema.StringAttribute{ Computed: true, - PlanModifiers: []planmodifier.String{ - stringplanmodifier.UseStateForUnknown(), - }, }, names.AttrCreatedAt: schema.StringAttribute{ CustomType: timetypes.RFC3339Type{}, @@ -229,36 +222,36 @@ func (r *appAuthorizationResource) Create(ctx context.Context, request resource. input.Tags = getTagsIn(ctx) output, err := conn.CreateAppAuthorization(ctx, input) + if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.AppFabric, create.ErrActionCreating, ResNameAppAuthorization, data.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("creating AppFabric App (%s) Authorization", data.App.ValueString()), err.Error()) + return } // Set values for unknowns. - appAuth := output.AppAuthorization - data.AppAuthorizationARN = fwflex.StringToFramework(ctx, appAuth.AppAuthorizationArn) + data.AppAuthorizationARN = fwflex.StringToFramework(ctx, output.AppAuthorization.AppAuthorizationArn) data.setID() - aAuth, err := waitAppAuthorizationCreated(ctx, conn, data.AppAuthorizationARN.ValueString(), data.AppBundleARN.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + appAuthorization, err := waitAppAuthorizationCreated(ctx, conn, data.AppAuthorizationARN.ValueString(), data.AppBundleARN.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("waiting for App Fabric App Authorization (%s) to be created", data.AppAuthorizationARN.ValueString()), err.Error()) + response.State.SetAttribute(ctx, path.Root(names.AttrID), data.ID) // Set 'id' so as to taint the resource. + response.Diagnostics.AddError(fmt.Sprintf("waiting for AppFabric App Authorization (%s) create", data.ID.ValueString()), err.Error()) return } - // Set values for unknowns after creation is complete.I - data.Persona = fwflex.StringValueToFramework(ctx, aAuth.Persona) - data.AuthUrl = fwflex.StringToFramework(ctx, aAuth.AuthUrl) + // Set values for unknowns after creation is complete. + data.AuthURL = fwflex.StringToFramework(ctx, appAuthorization.AuthUrl) if err := data.parseAuthURL(); err != nil { response.Diagnostics.AddError("parsing Auth URL", err.Error()) return } - data.CreatedAt = fwflex.TimeToFramework(ctx, aAuth.CreatedAt) - data.UpdatedAt = fwflex.TimeToFramework(ctx, aAuth.UpdatedAt) + data.CreatedAt = fwflex.TimeToFramework(ctx, appAuthorization.CreatedAt) + data.Persona = fwflex.StringValueToFramework(ctx, appAuthorization.Persona) + data.UpdatedAt = fwflex.TimeToFramework(ctx, appAuthorization.UpdatedAt) response.Diagnostics.Append(response.State.Set(ctx, data)...) } @@ -288,7 +281,7 @@ func (r *appAuthorizationResource) Read(ctx context.Context, request resource.Re } if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("reading App Fabric AppAuthorization ID (%s)", data.AppAuthorizationARN.ValueString()), err.Error()) + response.Diagnostics.AddError(fmt.Sprintf("reading AppFabric App Authorization (%s)", data.ID.ValueString()), err.Error()) return } @@ -298,8 +291,8 @@ func (r *appAuthorizationResource) Read(ctx context.Context, request resource.Re return } - //Seting it because of the dynamic nature of Auth Url - data.AuthUrl = fwflex.StringToFramework(ctx, output.AuthUrl) + // Setting it because of the dynamic nature of Auth URL. + data.AuthURL = fwflex.StringToFramework(ctx, output.AuthUrl) if err := data.parseAuthURL(); err != nil { response.Diagnostics.AddError("parsing Auth URL", err.Error()) @@ -323,7 +316,7 @@ func (r *appAuthorizationResource) Update(ctx context.Context, request resource. conn := r.Meta().AppFabricClient(ctx) // Check if updates are necessary based on the changed attributes - if !old.Credential.Equal(new.Credential) || !old.Tenant.Equal(new.Tenant) || !new.Tags.Equal(old.Tags) { + if !old.Credential.Equal(new.Credential) || !old.Tenant.Equal(new.Tenant) { var credentialsData []credentialModel response.Diagnostics.Append(new.Credential.ElementsAs(ctx, &credentialsData, false)...) if response.Diagnostics.HasError() { @@ -336,45 +329,42 @@ func (r *appAuthorizationResource) Update(ctx context.Context, request resource. return } - input := &appfabric.UpdateAppAuthorizationInput{ - AppAuthorizationIdentifier: aws.String(new.AppAuthorizationARN.ValueString()), - AppBundleIdentifier: aws.String(new.AppBundleARN.ValueString()), - } + input := &appfabric.UpdateAppAuthorizationInput{} response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) if response.Diagnostics.HasError() { return } + input.AppAuthorizationIdentifier = fwflex.StringFromFramework(ctx, new.AppAuthorizationARN) + input.AppBundleIdentifier = fwflex.StringFromFramework(ctx, new.AppBundleARN) input.Credential = credential _, err := conn.UpdateAppAuthorization(ctx, input) + if err != nil { - response.Diagnostics.AddError( - "Failed to update App Fabric App Authorization", - fmt.Sprintf("Error updating AppAuthorization with ID %s: %s", new.AppAuthorizationARN.String(), err.Error()), - ) + response.Diagnostics.AddError(fmt.Sprintf("updating AppFabric App Authorization (%s)", new.ID.ValueString()), err.Error()) + return } - appAuth, err := waitAppAuthorizationUpdated(ctx, conn, new.AppAuthorizationARN.ValueString(), new.AppBundleARN.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)) + appAuthorization, err := waitAppAuthorizationUpdated(ctx, conn, new.AppAuthorizationARN.ValueString(), new.AppBundleARN.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)) + if err != nil { - response.Diagnostics.AddError( - "Failed to fetch App Fabric App Authorization after update", - fmt.Sprintf("Error reading AppAuthorization with ARN %s post update: %s", new.AppAuthorizationARN.ValueString(), err.Error()), - ) + response.Diagnostics.AddError(fmt.Sprintf("waiting for AppFabric App Authorization (%s) update", new.ID.ValueString()), err.Error()) + return } - // Set values for unknowns after creation is complete. - new.UpdatedAt = fwflex.TimeToFramework(ctx, appAuth.UpdatedAt) - new.Persona = fwflex.StringValueToFramework(ctx, appAuth.Persona) - new.AuthUrl = fwflex.StringToFramework(ctx, appAuth.AuthUrl) + // Set values for unknowns. + new.AuthURL = fwflex.StringToFramework(ctx, appAuthorization.AuthUrl) if err := new.parseAuthURL(); err != nil { response.Diagnostics.AddError("parsing Auth URL", err.Error()) return } + new.UpdatedAt = fwflex.TimeToFramework(ctx, appAuthorization.UpdatedAt) } else { + new.AuthURL = old.AuthURL new.UpdatedAt = old.UpdatedAt } @@ -391,8 +381,8 @@ func (r *appAuthorizationResource) Delete(ctx context.Context, request resource. conn := r.Meta().AppFabricClient(ctx) _, err := conn.DeleteAppAuthorization(ctx, &appfabric.DeleteAppAuthorizationInput{ - AppAuthorizationIdentifier: aws.String(data.AppAuthorizationARN.ValueString()), - AppBundleIdentifier: aws.String(data.AppBundleARN.ValueString()), + AppAuthorizationIdentifier: fwflex.StringFromFramework(ctx, data.AppAuthorizationARN), + AppBundleIdentifier: fwflex.StringFromFramework(ctx, data.AppBundleARN), }) if errs.IsA[*awstypes.ResourceNotFoundException](err) { @@ -400,20 +390,62 @@ func (r *appAuthorizationResource) Delete(ctx context.Context, request resource. } if err != nil { - response.Diagnostics.AddError(fmt.Sprintf("deleting App Fabric AppAuthorizations (%s)", data.AppAuthorizationARN.ValueString()), err.Error()) + response.Diagnostics.AddError(fmt.Sprintf("deleting AppFabric App Authorization (%s)", data.ID.ValueString()), err.Error()) return } if _, err = waitAppAuthorizationDeleted(ctx, conn, data.AppAuthorizationARN.ValueString(), data.AppBundleARN.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { - response.Diagnostics.AddError(fmt.Sprintf("waiting for App Fabric AppAuthenticator (%s) delete", data.AppAuthorizationARN.ValueString()), err.Error()) + response.Diagnostics.AddError(fmt.Sprintf("waiting for AppFabric AppAuthenticator (%s) delete", data.ID.ValueString()), err.Error()) return } } -func (r *appAuthorizationResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { - r.SetTagsAll(ctx, request, resp) +func (r *appAuthorizationResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func findAppAuthorizationByTwoPartKey(ctx context.Context, conn *appfabric.Client, appAuthorizationARN, appBundleIdentifier string) (*awstypes.AppAuthorization, error) { + in := &appfabric.GetAppAuthorizationInput{ + AppAuthorizationIdentifier: aws.String(appAuthorizationARN), + AppBundleIdentifier: aws.String(appBundleIdentifier), + } + + output, err := conn.GetAppAuthorization(ctx, in) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.AppAuthorization == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return output.AppAuthorization, nil +} + +func statusAppAuthorization(ctx context.Context, conn *appfabric.Client, appAuthorizationARN, appBundleIdentifier string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findAppAuthorizationByTwoPartKey(ctx, conn, appAuthorizationARN, appBundleIdentifier) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } } func waitAppAuthorizationCreated(ctx context.Context, conn *appfabric.Client, appAuthorizationARN, appBundleIdentifier string, timeout time.Duration) (*awstypes.AppAuthorization, error) { @@ -467,48 +499,6 @@ func waitAppAuthorizationDeleted(ctx context.Context, conn *appfabric.Client, ap return nil, err } -func statusAppAuthorization(ctx context.Context, conn *appfabric.Client, appAuthorizationARN, appBundleIdentifier string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := findAppAuthorizationByTwoPartKey(ctx, conn, appAuthorizationARN, appBundleIdentifier) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.Status), nil - } -} - -func findAppAuthorizationByTwoPartKey(ctx context.Context, conn *appfabric.Client, appAuthorizationARN, appBundleIdentifier string) (*awstypes.AppAuthorization, error) { - in := &appfabric.GetAppAuthorizationInput{ - AppAuthorizationIdentifier: aws.String(appAuthorizationARN), - AppBundleIdentifier: aws.String(appBundleIdentifier), - } - - output, err := conn.GetAppAuthorization(ctx, in) - - if errs.IsA[*awstypes.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: in, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.AppAuthorization == nil { - return nil, tfresource.NewEmptyResultError(in) - } - - return output.AppAuthorization, nil -} - func expandCredentialsValue(ctx context.Context, credentialModels []credentialModel) (awstypes.Credential, diag.Diagnostics) { credentials := []awstypes.Credential{} var diags diag.Diagnostics @@ -561,7 +551,7 @@ type appAuthorizationResourceModel struct { AppAuthorizationARN types.String `tfsdk:"arn"` AppBundleARN fwtypes.ARN `tfsdk:"app_bundle_arn"` AuthType fwtypes.StringEnum[awstypes.AuthType] `tfsdk:"auth_type"` - AuthUrl types.String `tfsdk:"auth_url"` + AuthURL types.String `tfsdk:"auth_url"` CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` Credential fwtypes.ListNestedObjectValueOf[credentialModel] `tfsdk:"credential"` ID types.String `tfsdk:"id"` @@ -613,11 +603,11 @@ type tenantModel struct { } func (m *appAuthorizationResourceModel) parseAuthURL() error { - if m.AuthUrl.IsNull() { + if m.AuthURL.IsNull() { return nil } - fullURL := m.AuthUrl.ValueString() + fullURL := m.AuthURL.ValueString() index := strings.Index(fullURL, "oauth2") if index == -1 { @@ -625,7 +615,7 @@ func (m *appAuthorizationResourceModel) parseAuthURL() error { } baseURL := fullURL[:index+len("oauth2")] - m.AuthUrl = types.StringValue(baseURL) + m.AuthURL = types.StringValue(baseURL) return nil } diff --git a/internal/service/appfabric/app_authorization_connection.go b/internal/service/appfabric/app_authorization_connection.go new file mode 100644 index 00000000000..12864566ae9 --- /dev/null +++ b/internal/service/appfabric/app_authorization_connection.go @@ -0,0 +1,293 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package appfabric + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appfabric" + awstypes "github.com/aws/aws-sdk-go-v2/service/appfabric/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource(name="App Authorization Connection") +func newAppAuthorizationConnectionResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &appAuthorizationConnectionResource{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + + return r, nil +} + +type appAuthorizationConnectionResource struct { + framework.ResourceWithConfigure + framework.WithNoUpdate + framework.WithNoOpDelete + framework.WithTimeouts +} + +func (*appAuthorizationConnectionResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_appfabric_app_authorization_connection" +} + +func (r *appAuthorizationConnectionResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "app": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "app_authorization_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "app_bundle_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "tenant": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[tenantModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[tenantModel](ctx), + }, + }, + names.AttrID: framework.IDAttribute(), + }, + Blocks: map[string]schema.Block{ + "auth_request": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[authRequestModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "code": schema.StringAttribute{ + Required: true, + }, + "redirect_uri": schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + }), + }, + } +} + +func (r *appAuthorizationConnectionResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data appAuthorizationConnectionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().AppFabricClient(ctx) + + input := &appfabric.ConnectAppAuthorizationInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + + input.AppBundleIdentifier = fwflex.StringFromFramework(ctx, data.AppBundleARN) + input.AppAuthorizationIdentifier = fwflex.StringFromFramework(ctx, data.AppAuthorizationARN) + + _, err := conn.ConnectAppAuthorization(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating AppFabric App Authorization (%s) Connection", data.AppAuthorizationARN.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + data.setID() + + appAuthorization, err := waitConnectAppAuthorizationCreated(ctx, conn, data.AppAuthorizationARN.ValueString(), data.AppBundleARN.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { + response.State.SetAttribute(ctx, path.Root(names.AttrID), data.ID) // Set 'id' so as to taint the resource. + response.Diagnostics.AddError(fmt.Sprintf("waiting for AppFabric App Authorization Connection (%s) create", data.ID.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + data.App = fwflex.StringToFramework(ctx, appAuthorization.App) + + var tenant tenantModel + response.Diagnostics.Append(fwflex.Flatten(ctx, appAuthorization.Tenant, &tenant)...) + if response.Diagnostics.HasError() { + return + } + data.Tenant = fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &tenant) + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *appAuthorizationConnectionResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data appAuthorizationConnectionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + + return + } + + conn := r.Meta().AppFabricClient(ctx) + + output, err := findAppAuthorizationConnectionByTwoPartKey(ctx, conn, data.AppAuthorizationARN.ValueString(), data.AppBundleARN.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading AppFabric App Authorization Connection (%s)", data.ID.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func findAppAuthorizationConnectionByTwoPartKey(ctx context.Context, conn *appfabric.Client, appAuthorizationARN, appBundleIdentifier string) (*awstypes.AppAuthorization, error) { + input := &appfabric.GetAppAuthorizationInput{ + AppAuthorizationIdentifier: aws.String(appAuthorizationARN), + AppBundleIdentifier: aws.String(appBundleIdentifier), + } + + output, err := conn.GetAppAuthorization(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.AppAuthorization == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.AppAuthorization, nil +} + +func statusConnectAppAuthorization(ctx context.Context, conn *appfabric.Client, appAuthorizationARN, appBundleArn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findAppAuthorizationConnectionByTwoPartKey(ctx, conn, appAuthorizationARN, appBundleArn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitConnectAppAuthorizationCreated(ctx context.Context, conn *appfabric.Client, appAuthorizationARN, appBundleArn string, timeout time.Duration) (*awstypes.AppAuthorization, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.AppAuthorizationStatusPendingConnect), + Target: enum.Slice(awstypes.AppAuthorizationStatusConnected), + Refresh: statusConnectAppAuthorization(ctx, conn, appAuthorizationARN, appBundleArn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if out, ok := outputRaw.(*awstypes.AppAuthorization); ok { + return out, err + } + + return nil, err +} + +type appAuthorizationConnectionResourceModel struct { + App types.String `tfsdk:"app"` + AppAuthorizationARN fwtypes.ARN `tfsdk:"app_authorization_arn"` + AppBundleARN fwtypes.ARN `tfsdk:"app_bundle_arn"` + AuthRequest fwtypes.ListNestedObjectValueOf[authRequestModel] `tfsdk:"auth_request"` + ID types.String `tfsdk:"id"` + Tenant fwtypes.ListNestedObjectValueOf[tenantModel] `tfsdk:"tenant"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +const ( + appAuthorizationConnectionResourceIDPartCount = 2 +) + +func (m *appAuthorizationConnectionResourceModel) InitFromID() error { + parts, err := flex.ExpandResourceId(m.ID.ValueString(), appAuthorizationConnectionResourceIDPartCount, false) + if err != nil { + return err + } + + m.AppAuthorizationARN = fwtypes.ARNValue(parts[0]) + m.AppBundleARN = fwtypes.ARNValue(parts[1]) + + return nil +} + +func (m *appAuthorizationConnectionResourceModel) setID() { + m.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{m.AppAuthorizationARN.ValueString(), m.AppBundleARN.ValueString()}, appAuthorizationConnectionResourceIDPartCount, false))) +} + +type authRequestModel struct { + Code types.String `tfsdk:"code"` + RedirectURI types.String `tfsdk:"redirect_uri"` +} diff --git a/internal/service/appfabric/app_authorization_connection_test.go b/internal/service/appfabric/app_authorization_connection_test.go new file mode 100644 index 00000000000..acef9fde656 --- /dev/null +++ b/internal/service/appfabric/app_authorization_connection_test.go @@ -0,0 +1,173 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package appfabric_test + +import ( + "context" + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfappfabric "github.com/hashicorp/terraform-provider-aws/internal/service/appfabric" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccAppAuthorizationConnection_basic(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_appfabric_app_authorization_connection.test" + appBudleResourceName := "aws_appfabric_app_bundle.test" + appAuthorization := "aws_appfabric_app_authorization.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + // See https://docs.aws.amazon.com/appfabric/latest/adminguide/terraform.html#terraform-appfabric-connecting. + tenantID := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_TENANT_ID") + serviceAccountToken := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_SERVICE_ACCOUNT_TOKEN") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccAppAuthorizationConnectionConfig_basic(rName, tenantID, serviceAccountToken), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAppAuthorizationConnectionExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "app"), + resource.TestCheckResourceAttrPair(resourceName, "app_bundle_arn", appBudleResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "app_authorization_arn", appAuthorization, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "auth_request.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tenant.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tenant.0.tenant_display_name", rName), + resource.TestCheckResourceAttr(resourceName, "tenant.0.tenant_identifier", tenantID), + ), + }, + }, + }) +} +func testAccAppAuthorizationConnection_OAuth2(t *testing.T) { + acctest.Skip(t, "Currently not able to test") + + ctx := acctest.Context(t) + resourceName := "aws_appfabric_app_authorization_connection.test" + appBudleResourceName := "aws_appfabric_app_bundle.test" + appAuthorization := "aws_appfabric_app_authorization.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccAppAuthorizationConnectionConfig_OAuth2(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppAuthorizationConnectionExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "app_bundle_arn", appBudleResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "app_authorization_arn", appAuthorization, names.AttrARN), + ), + }, + }, + }) +} + +func testAccCheckAppAuthorizationConnectionExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).AppFabricClient(ctx) + + _, err := tfappfabric.FindAppAuthorizationConnectionByTwoPartKey(ctx, conn, rs.Primary.Attributes["app_authorization_arn"], rs.Primary.Attributes["app_bundle_arn"]) + + return err + } +} + +func testAccAppAuthorizationConnectionConfig_basic(rName, tenantID, serviceAccountToken string) string { + return fmt.Sprintf(` +resource "aws_appfabric_app_bundle" "test" { + tags = { + Name = %[1]q + } +} + +resource "aws_appfabric_app_authorization" "test" { + app_bundle_arn = aws_appfabric_app_bundle.test.arn + app = "TERRAFORMCLOUD" + auth_type = "apiKey" + + credential { + api_key_credential { + api_key = %[3]q + } + } + + tenant { + tenant_display_name = %[1]q + tenant_identifier = %[2]q + } + + tags = { + Name = %[1]q + } +} + +resource "aws_appfabric_app_authorization_connection" "test" { + app_bundle_arn = aws_appfabric_app_bundle.test.arn + app_authorization_arn = aws_appfabric_app_authorization.test.arn +} +`, rName, tenantID, serviceAccountToken) +} + +func testAccAppAuthorizationConnectionConfig_OAuth2(rName string) string { + return fmt.Sprintf(` +resource "aws_appfabric_app_bundle" "test" { + tags = { + Name = %[1]q + } +} + +resource "aws_appfabric_app_authorization" "test" { + app_bundle_arn = aws_appfabric_app_bundle.test.arn + app = "DROPBOX" + auth_type = "oauth2" + + credential { + oauth2_credential { + client_id = "newClinentID" + client_secret = "newSecretforOath2" + } + } + tenant { + tenant_display_name = "test" + tenant_identifier = "test" + } +} + +resource "aws_appfabric_app_authorization_connection" "test" { + app_bundle_arn = aws_appfabric_app_bundle.test.arn + app_authorization_arn = aws_appfabric_app_authorization.test.arn + auth_request { + code = "testcode" + redirect_uri = aws_appfabric_app_authorization.test.auth_url + } + +} +`, rName) +} diff --git a/internal/service/appfabric/app_authorization_test.go b/internal/service/appfabric/app_authorization_test.go index cc5f0e344f6..f8f954c1d52 100644 --- a/internal/service/appfabric/app_authorization_test.go +++ b/internal/service/appfabric/app_authorization_test.go @@ -29,6 +29,7 @@ func testAccAppAuthorization_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -68,6 +69,7 @@ func testAccAppAuthorization_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -103,6 +105,7 @@ func testAccAppAuthorization_apiKeyUpdate(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -162,6 +165,7 @@ func testAccAppAuthorization_oath2Update(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -223,6 +227,7 @@ func testAccAppAuthorization_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/appfabric/app_bundle_test.go b/internal/service/appfabric/app_bundle_test.go index b1791d7aa89..5f8f44cd97a 100644 --- a/internal/service/appfabric/app_bundle_test.go +++ b/internal/service/appfabric/app_bundle_test.go @@ -28,6 +28,7 @@ func testAccAppBundle_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -59,6 +60,7 @@ func testAccAppBundle_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -86,6 +88,7 @@ func testAccAppBundle_cmk(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -116,6 +119,7 @@ func testAccAppBundle_tags(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/appfabric/appfabric_test.go b/internal/service/appfabric/appfabric_test.go index 86cad1a8d7d..710d7eeca2e 100644 --- a/internal/service/appfabric/appfabric_test.go +++ b/internal/service/appfabric/appfabric_test.go @@ -4,10 +4,13 @@ package appfabric_test import ( + "context" "testing" "time" + "github.com/aws/aws-sdk-go-v2/service/appfabric" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" ) const serializeDelay = 10 * time.Second @@ -30,7 +33,38 @@ func TestAccAppFabric_serial(t *testing.T) { "oath2Update": testAccAppAuthorization_oath2Update, "tags": testAccAppAuthorization_tags, }, + "AppAuthorizationConnection": { + acctest.CtBasic: testAccAppAuthorizationConnection_basic, + "oath2Connect": testAccAppAuthorizationConnection_OAuth2, + }, + "Ingestion": { + acctest.CtBasic: testAccIngestion_basic, + acctest.CtDisappears: testAccIngestion_disappears, + "tags": testAccIngestion_tags, + }, + "IngestionDestination": { + acctest.CtBasic: testAccIngestionDestination_basic, + acctest.CtDisappears: testAccIngestionDestination_disappears, + "tags": testAccIngestionDestination_tags, + "update": testAccIngestionDestination_update, + "firehose": testAccIngestionDestination_firehose, + }, } acctest.RunSerialTests2Levels(t, testCases, serializeDelay) } + +func testAccPreCheck(ctx context.Context, t *testing.T) { + conn := acctest.Provider.Meta().(*conns.AWSClient).AppFabricClient(ctx) + + input := &appfabric.ListAppBundlesInput{} + _, err := conn.ListAppBundles(ctx, input) + + if acctest.PreCheckSkipError(err) { + t.Skipf("skipping acceptance testing: %s", err) + } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } +} diff --git a/internal/service/appfabric/exports_test.go b/internal/service/appfabric/exports_test.go index ff2fa01a037..bc02e253a39 100644 --- a/internal/service/appfabric/exports_test.go +++ b/internal/service/appfabric/exports_test.go @@ -5,9 +5,15 @@ package appfabric // Exports for use in tests only. var ( - ResourceAppAuthorization = newAppAuthorizationResource - ResourceAppBundle = newAppBundleResource + ResourceAppAuthorization = newAppAuthorizationResource + ResourceAppAuthorizationConnection = newAppAuthorizationConnectionResource + ResourceAppBundle = newAppBundleResource + ResourceIngestion = newIngestionResource + ResourceIngestionDestination = newIngestionDestinationResource - FindAppAuthorizationByTwoPartKey = findAppAuthorizationByTwoPartKey - FindAppBundleByID = findAppBundleByID + FindAppAuthorizationByTwoPartKey = findAppAuthorizationByTwoPartKey + FindAppAuthorizationConnectionByTwoPartKey = findAppAuthorizationConnectionByTwoPartKey + FindAppBundleByID = findAppBundleByID + FindIngestionByTwoPartKey = findIngestionByTwoPartKey + FindIngestionDestinationByThreePartKey = findIngestionDestinationByThreePartKey ) diff --git a/internal/service/appfabric/ingestion.go b/internal/service/appfabric/ingestion.go new file mode 100644 index 00000000000..b3d22c25125 --- /dev/null +++ b/internal/service/appfabric/ingestion.go @@ -0,0 +1,256 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package appfabric + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appfabric" + awstypes "github.com/aws/aws-sdk-go-v2/service/appfabric/types" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource(name="Ingestion") +// @Tags(identifierAttribute="arn") +func newIngestionResource(context.Context) (resource.ResourceWithConfigure, error) { + r := &ingestionResource{} + + return r, nil +} + +type ingestionResource struct { + framework.ResourceWithConfigure + framework.WithNoOpUpdate[ingestionResourceModel] + framework.WithImportByID +} + +func (*ingestionResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_appfabric_ingestion" +} + +func (r *ingestionResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "app": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "app_bundle_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "ingestion_type": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.IngestionType](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + "tenant_id": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 1024), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *ingestionResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data ingestionResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().AppFabricClient(ctx) + + input := &appfabric.CreateIngestionInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + + // Additional fields. + input.AppBundleIdentifier = fwflex.StringFromFramework(ctx, data.AppBundleARN) + input.ClientToken = aws.String(errs.Must(uuid.GenerateUUID())) + input.Tags = getTagsIn(ctx) + + output, err := conn.CreateIngestion(ctx, input) + + if err != nil { + response.Diagnostics.AddError("creating AppFabric Ingestion", err.Error()) + + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + // Set values for unknowns. + data.ARN = fwflex.StringToFramework(ctx, output.Ingestion.Arn) + data.setID() + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *ingestionResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data ingestionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + return + } + + conn := r.Meta().AppFabricClient(ctx) + + ingestion, err := findIngestionByTwoPartKey(ctx, conn, data.AppBundleARN.ValueString(), data.ARN.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading AppFabric Ingestion (%s)", data.ID.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, ingestion, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *ingestionResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data ingestionResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().AppFabricClient(ctx) + + _, err := conn.DeleteIngestion(ctx, &appfabric.DeleteIngestionInput{ + AppBundleIdentifier: aws.String(data.AppBundleARN.ValueString()), + IngestionIdentifier: aws.String(data.ARN.ValueString()), + }) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting AppFabric Ingestion (%s)", data.ID.ValueString()), err.Error()) + + return + } +} + +func (r *ingestionResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func findIngestionByTwoPartKey(ctx context.Context, conn *appfabric.Client, appBundleARN, arn string) (*awstypes.Ingestion, error) { + input := &appfabric.GetIngestionInput{ + AppBundleIdentifier: aws.String(appBundleARN), + IngestionIdentifier: aws.String(arn), + } + + output, err := conn.GetIngestion(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Ingestion == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Ingestion, nil +} + +type ingestionResourceModel struct { + App types.String `tfsdk:"app"` + AppBundleARN fwtypes.ARN `tfsdk:"app_bundle_arn"` + ARN types.String `tfsdk:"arn"` + ID types.String `tfsdk:"id"` + IngestionType fwtypes.StringEnum[awstypes.IngestionType] `tfsdk:"ingestion_type"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + TenantId types.String `tfsdk:"tenant_id"` +} + +const ( + ingestionResourceIDPartCount = 2 +) + +func (m *ingestionResourceModel) InitFromID() error { + id := m.ID.ValueString() + parts, err := flex.ExpandResourceId(id, ingestionResourceIDPartCount, false) + if err != nil { + return err + } + + m.AppBundleARN = fwtypes.ARNValue(parts[0]) + m.ARN = types.StringValue(parts[1]) + + return nil +} + +func (m *ingestionResourceModel) setID() { + m.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{m.AppBundleARN.ValueString(), m.ARN.ValueString()}, ingestionResourceIDPartCount, false))) +} diff --git a/internal/service/appfabric/ingestion_destination.go b/internal/service/appfabric/ingestion_destination.go new file mode 100644 index 00000000000..a2b6c366c88 --- /dev/null +++ b/internal/service/appfabric/ingestion_destination.go @@ -0,0 +1,765 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package appfabric + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appfabric" + awstypes "github.com/aws/aws-sdk-go-v2/service/appfabric/types" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource(name="Ingestion Destination") +// @Tags(identifierAttribute="arn") +func newIngestionDestinationResource(context.Context) (resource.ResourceWithConfigure, error) { + r := &ingestionDestinationResource{} + + r.SetDefaultCreateTimeout(5 * time.Minute) + r.SetDefaultUpdateTimeout(5 * time.Minute) + r.SetDefaultDeleteTimeout(5 * time.Minute) + + return r, nil +} + +type ingestionDestinationResource struct { + framework.ResourceWithConfigure + framework.WithImportByID + framework.WithTimeouts +} + +func (*ingestionDestinationResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_appfabric_ingestion_destination" +} + +func (r *ingestionDestinationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "app_bundle_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "ingestion_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + "destination_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[destinationConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "audit_log": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[auditLogDestinationConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + names.AttrDestination: schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[destinationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "firehose_stream": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[firehoseStreamModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "stream_name": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(3, 64), + }, + }, + }, + }, + }, + names.AttrS3Bucket: schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[s3BucketModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrBucketName: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(3, 63), + }, + }, + names.AttrPrefix: schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 120), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "processing_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[processingConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "audit_log": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[auditLogProcessingConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrFormat: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.Format](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrSchema: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.Schema](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + }, + }, + }, + }, + }, + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func (r *ingestionDestinationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data ingestionDestinationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().AppFabricClient(ctx) + + input := &appfabric.CreateIngestionDestinationInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + + // AutoFlEx doesn't yet handle union types. + if !data.DestinationConfiguration.IsNull() { + destinationConfigurationData, diags := data.DestinationConfiguration.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + destinationConfiguration, diags := expandDestinationConfiguration(ctx, destinationConfigurationData) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + input.DestinationConfiguration = destinationConfiguration + } + + if !data.ProcessingConfiguration.IsNull() { + processingConfigurationData, diags := data.ProcessingConfiguration.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + processingConfiguration, diags := expandProcessingConfiguration(ctx, processingConfigurationData) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + input.ProcessingConfiguration = processingConfiguration + } + + // Additional fields. + input.AppBundleIdentifier = aws.String(data.AppBundleARN.ValueString()) + input.ClientToken = aws.String(errs.Must(uuid.GenerateUUID())) + input.IngestionIdentifier = aws.String(data.IngestionARN.ValueString()) + input.Tags = getTagsIn(ctx) + + output, err := conn.CreateIngestionDestination(ctx, input) + + if err != nil { + response.Diagnostics.AddError("creating AppFabric Ingestion Destination", err.Error()) + + return + } + + // Set values for unknowns. + data.ARN = fwflex.StringToFramework(ctx, output.IngestionDestination.Arn) + data.setID() + + if _, err := waitIngestionDestinationActive(ctx, conn, data.AppBundleARN.ValueString(), data.IngestionARN.ValueString(), data.ARN.ValueString(), r.CreateTimeout(ctx, data.Timeouts)); err != nil { + response.State.SetAttribute(ctx, path.Root(names.AttrID), data.ID) // Set 'id' so as to taint the resource. + response.Diagnostics.AddError(fmt.Sprintf("waiting for AppFabric Ingestion Destination (%s) create", data.ID.ValueString()), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *ingestionDestinationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data ingestionDestinationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + + return + } + + conn := r.Meta().AppFabricClient(ctx) + + output, err := findIngestionDestinationByThreePartKey(ctx, conn, data.AppBundleARN.ValueString(), data.IngestionARN.ValueString(), data.ARN.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading AppFabric Ingestion Destination (%s)", data.ID.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + // AutoFlEx doesn't yet handle union types. + if output.DestinationConfiguration != nil { + destinationConfigurationData, diags := flattenDestinationConfiguration(ctx, output.DestinationConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + data.DestinationConfiguration = fwtypes.NewListNestedObjectValueOfPtrMust(ctx, destinationConfigurationData) + } + + if output.ProcessingConfiguration != nil { + processingConfigurationData, diags := flattenProcessingConfiguration(ctx, output.ProcessingConfiguration) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + data.ProcessingConfiguration = fwtypes.NewListNestedObjectValueOfPtrMust(ctx, processingConfigurationData) + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *ingestionDestinationResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new ingestionDestinationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().AppFabricClient(ctx) + + if !old.DestinationConfiguration.Equal(new.DestinationConfiguration) { + input := &appfabric.UpdateIngestionDestinationInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) + if response.Diagnostics.HasError() { + return + } + + // AutoFlEx doesn't yet handle union types. + if !new.DestinationConfiguration.IsNull() { + destinationConfigurationData, diags := new.DestinationConfiguration.ToPtr(ctx) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + destinationConfiguration, diags := expandDestinationConfiguration(ctx, destinationConfigurationData) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + input.DestinationConfiguration = destinationConfiguration + } + + // Additional fields. + input.AppBundleIdentifier = aws.String(new.AppBundleARN.ValueString()) + input.IngestionDestinationIdentifier = aws.String(new.ARN.ValueString()) + input.IngestionIdentifier = aws.String(new.IngestionARN.ValueString()) + + _, err := conn.UpdateIngestionDestination(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating AppFabric Ingestion Destination (%s)", new.ID.ValueString()), err.Error()) + + return + } + + if _, err := waitIngestionDestinationActive(ctx, conn, new.AppBundleARN.ValueString(), new.IngestionARN.ValueString(), new.ARN.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for AppFabric Ingestion Destination (%s) update", new.ID.ValueString()), err.Error()) + + return + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *ingestionDestinationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data ingestionDestinationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().AppFabricClient(ctx) + + _, err := conn.DeleteIngestionDestination(ctx, &appfabric.DeleteIngestionDestinationInput{ + AppBundleIdentifier: aws.String(data.AppBundleARN.ValueString()), + IngestionDestinationIdentifier: aws.String(data.ARN.ValueString()), + IngestionIdentifier: aws.String(data.IngestionARN.ValueString()), + }) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting AppFabric Ingestion Destination (%s)", data.ID.ValueString()), err.Error()) + + return + } + + if _, err = waitIngestionDestinationDeleted(ctx, conn, data.AppBundleARN.ValueString(), data.IngestionARN.ValueString(), data.ARN.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for AppFabric Ingestion Destination (%s) delete", data.ID.ValueString()), err.Error()) + + return + } +} + +func (r *ingestionDestinationResource) ConfigValidators(context.Context) []resource.ConfigValidator { + return []resource.ConfigValidator{ + resourcevalidator.AtLeastOneOf( + path.MatchRoot("destination_configuration").AtListIndex(0).AtName("audit_log").AtListIndex(0).AtName(names.AttrDestination).AtListIndex(0).AtName("firehose_stream"), + path.MatchRoot("destination_configuration").AtListIndex(0).AtName("audit_log").AtListIndex(0).AtName(names.AttrDestination).AtListIndex(0).AtName(names.AttrS3Bucket), + ), + } +} + +func (r *ingestionDestinationResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func findIngestionDestinationByThreePartKey(ctx context.Context, conn *appfabric.Client, appBundleARN, ingestionARN, arn string) (*awstypes.IngestionDestination, error) { + in := &appfabric.GetIngestionDestinationInput{ + AppBundleIdentifier: aws.String(appBundleARN), + IngestionDestinationIdentifier: aws.String(arn), + IngestionIdentifier: aws.String(ingestionARN), + } + + output, err := conn.GetIngestionDestination(ctx, in) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.IngestionDestination == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return output.IngestionDestination, nil +} + +func statusIngestionDestination(ctx context.Context, conn *appfabric.Client, appBundleARN, ingestionARN, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findIngestionDestinationByThreePartKey(ctx, conn, appBundleARN, ingestionARN, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func waitIngestionDestinationActive(ctx context.Context, conn *appfabric.Client, appBundleARN, ingestionARN, arn string, timeout time.Duration) (*awstypes.IngestionDestination, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: []string{}, + Target: enum.Slice(awstypes.IngestionDestinationStatusActive), + Refresh: statusIngestionDestination(ctx, conn, appBundleARN, ingestionARN, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.IngestionDestination); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusReason))) + + return output, err + } + + return nil, err +} + +func waitIngestionDestinationDeleted(ctx context.Context, conn *appfabric.Client, appBundleARN, ingestionARN, arn string, timeout time.Duration) (*awstypes.IngestionDestination, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.IngestionDestinationStatusActive), + Target: []string{}, + Refresh: statusIngestionDestination(ctx, conn, appBundleARN, ingestionARN, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.IngestionDestination); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusReason))) + + return output, err + } + + return nil, err +} + +type ingestionDestinationResourceModel struct { + AppBundleARN fwtypes.ARN `tfsdk:"app_bundle_arn"` + ARN types.String `tfsdk:"arn"` + DestinationConfiguration fwtypes.ListNestedObjectValueOf[destinationConfigurationModel] `tfsdk:"destination_configuration"` + ID types.String `tfsdk:"id"` + IngestionARN fwtypes.ARN `tfsdk:"ingestion_arn"` + ProcessingConfiguration fwtypes.ListNestedObjectValueOf[processingConfigurationModel] `tfsdk:"processing_configuration"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +const ( + ingestionDestinationResourceIDPartCount = 3 +) + +func (m *ingestionDestinationResourceModel) InitFromID() error { + parts, err := flex.ExpandResourceId(m.ID.ValueString(), ingestionDestinationResourceIDPartCount, false) + if err != nil { + return err + } + + m.AppBundleARN = fwtypes.ARNValue(parts[0]) + m.IngestionARN = fwtypes.ARNValue(parts[1]) + m.ARN = types.StringValue(parts[2]) + + return nil +} + +func (m *ingestionDestinationResourceModel) setID() { + m.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{m.AppBundleARN.ValueString(), m.IngestionARN.ValueString(), m.ARN.ValueString()}, ingestionDestinationResourceIDPartCount, false))) +} + +type destinationConfigurationModel struct { + AuditLog fwtypes.ListNestedObjectValueOf[auditLogDestinationConfigurationModel] `tfsdk:"audit_log"` +} + +type auditLogDestinationConfigurationModel struct { + Destination fwtypes.ListNestedObjectValueOf[destinationModel] `tfsdk:"destination"` +} + +type destinationModel struct { + FirehoseStream fwtypes.ListNestedObjectValueOf[firehoseStreamModel] `tfsdk:"firehose_stream"` + S3Bucket fwtypes.ListNestedObjectValueOf[s3BucketModel] `tfsdk:"s3_bucket"` +} + +type firehoseStreamModel struct { + StreamName types.String `tfsdk:"stream_name"` +} + +type s3BucketModel struct { + BucketName types.String `tfsdk:"bucket_name"` + Prefix types.String `tfsdk:"prefix"` +} + +type processingConfigurationModel struct { + AuditLog fwtypes.ListNestedObjectValueOf[auditLogProcessingConfigurationModel] `tfsdk:"audit_log"` +} + +type auditLogProcessingConfigurationModel struct { + Format fwtypes.StringEnum[awstypes.Format] `tfsdk:"format"` + Schema fwtypes.StringEnum[awstypes.Schema] `tfsdk:"schema"` +} + +func expandDestinationConfiguration(ctx context.Context, destinationConfigurationData *destinationConfigurationModel) (awstypes.DestinationConfiguration, diag.Diagnostics) { + var diags diag.Diagnostics + + if !destinationConfigurationData.AuditLog.IsNull() { + auditLogDestinationConfigurationData, d := destinationConfigurationData.AuditLog.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + destinationData, d := auditLogDestinationConfigurationData.Destination.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + destination, d := expandDestination(ctx, destinationData) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + apiObject := &awstypes.DestinationConfigurationMemberAuditLog{ + Value: awstypes.AuditLogDestinationConfiguration{ + Destination: destination, + }, + } + + return apiObject, diags + } + + return nil, diags +} + +func expandDestination(ctx context.Context, destinationData *destinationModel) (awstypes.Destination, diag.Diagnostics) { + var diags diag.Diagnostics + + if !destinationData.FirehoseStream.IsNull() { + firehoseStreamData, d := destinationData.FirehoseStream.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + apiObject := &awstypes.DestinationMemberFirehoseStream{} + diags.Append(fwflex.Expand(ctx, firehoseStreamData, &apiObject.Value)...) + if diags.HasError() { + return nil, diags + } + + return apiObject, diags + } + if !destinationData.S3Bucket.IsNull() { + s3BucketData, d := destinationData.S3Bucket.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + apiObject := &awstypes.DestinationMemberS3Bucket{} + diags.Append(fwflex.Expand(ctx, s3BucketData, &apiObject.Value)...) + if diags.HasError() { + return nil, diags + } + + return apiObject, diags + } + + return nil, diags +} + +func expandProcessingConfiguration(ctx context.Context, processingConfigurationData *processingConfigurationModel) (awstypes.ProcessingConfiguration, diag.Diagnostics) { + var diags diag.Diagnostics + + if !processingConfigurationData.AuditLog.IsNull() { + auditLogProcessingConfigurationData, d := processingConfigurationData.AuditLog.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + apiObject := &awstypes.ProcessingConfigurationMemberAuditLog{} + diags.Append(fwflex.Expand(ctx, auditLogProcessingConfigurationData, &apiObject.Value)...) + if diags.HasError() { + return nil, diags + } + + return apiObject, diags + } + + return nil, diags +} + +func flattenDestinationConfiguration(ctx context.Context, apiObject awstypes.DestinationConfiguration) (*destinationConfigurationModel, diag.Diagnostics) { + var diags diag.Diagnostics + var destinationConfigurationData *destinationConfigurationModel + + switch v := apiObject.(type) { + case *awstypes.DestinationConfigurationMemberAuditLog: + destinationData, d := flattenDestination(ctx, v.Value.Destination) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + auditLogDestinationConfigurationData := &auditLogDestinationConfigurationModel{ + Destination: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, destinationData), + } + destinationConfigurationData = &destinationConfigurationModel{ + AuditLog: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, auditLogDestinationConfigurationData), + } + } + + return destinationConfigurationData, diags +} + +func flattenDestination(ctx context.Context, apiObject awstypes.Destination) (*destinationModel, diag.Diagnostics) { + var diags diag.Diagnostics + var destinationData *destinationModel + + switch v := apiObject.(type) { + case *awstypes.DestinationMemberFirehoseStream: + var firehoseStreamData firehoseStreamModel + d := fwflex.Flatten(ctx, v.Value, &firehoseStreamData) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + destinationData = &destinationModel{ + FirehoseStream: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &firehoseStreamData), + S3Bucket: fwtypes.NewListNestedObjectValueOfNull[s3BucketModel](ctx), + } + + case *awstypes.DestinationMemberS3Bucket: + var s3BucketData s3BucketModel + d := fwflex.Flatten(ctx, v.Value, &s3BucketData) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + destinationData = &destinationModel{ + FirehoseStream: fwtypes.NewListNestedObjectValueOfNull[firehoseStreamModel](ctx), + S3Bucket: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &s3BucketData), + } + } + + return destinationData, diags +} + +func flattenProcessingConfiguration(ctx context.Context, apiObject awstypes.ProcessingConfiguration) (*processingConfigurationModel, diag.Diagnostics) { + var diags diag.Diagnostics + var processingConfigurationData *processingConfigurationModel + + switch v := apiObject.(type) { + case *awstypes.ProcessingConfigurationMemberAuditLog: + var auditLogProcessingConfigurationData auditLogProcessingConfigurationModel + d := fwflex.Flatten(ctx, v.Value, &auditLogProcessingConfigurationData) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + processingConfigurationData = &processingConfigurationModel{ + AuditLog: fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &auditLogProcessingConfigurationData), + } + } + + return processingConfigurationData, diags +} diff --git a/internal/service/appfabric/ingestion_destination_test.go b/internal/service/appfabric/ingestion_destination_test.go new file mode 100644 index 00000000000..bcaa7c114d3 --- /dev/null +++ b/internal/service/appfabric/ingestion_destination_test.go @@ -0,0 +1,568 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package appfabric_test + +import ( + "context" + "fmt" + "testing" + + awstypes "github.com/aws/aws-sdk-go-v2/service/appfabric/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfappfabric "github.com/hashicorp/terraform-provider-aws/internal/service/appfabric" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccIngestionDestination_basic(t *testing.T) { + ctx := acctest.Context(t) + var ingestiondestination awstypes.IngestionDestination + resourceName := "aws_appfabric_ingestion_destination.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + // See https://docs.aws.amazon.com/appfabric/latest/adminguide/terraform.html#terraform-appfabric-connecting. + tenantID := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_TENANT_ID") + serviceAccountToken := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_SERVICE_ACCOUNT_TOKEN") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIngestionDestinationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIngestionDestinationConfig_basic(rName, tenantID, serviceAccountToken), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIngestionDestinationExists(ctx, resourceName, &ingestiondestination), + resource.TestCheckResourceAttrSet(resourceName, "app_bundle_arn"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.firehose_stream.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.0.bucket_name", rName), + resource.TestCheckNoResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.0.prefix"), + resource.TestCheckResourceAttrSet(resourceName, "ingestion_arn"), + resource.TestCheckResourceAttr(resourceName, "processing_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "processing_configuration.0.audit_log.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "processing_configuration.0.audit_log.0.format", names.AttrJSON), + resource.TestCheckResourceAttr(resourceName, "processing_configuration.0.audit_log.0.schema", "raw"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccIngestionDestination_disappears(t *testing.T) { + ctx := acctest.Context(t) + var ingestiondestination awstypes.IngestionDestination + resourceName := "aws_appfabric_ingestion_destination.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + // See https://docs.aws.amazon.com/appfabric/latest/adminguide/terraform.html#terraform-appfabric-connecting. + tenantID := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_TENANT_ID") + serviceAccountToken := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_SERVICE_ACCOUNT_TOKEN") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIngestionDestinationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIngestionDestinationConfig_basic(rName, tenantID, serviceAccountToken), + Check: resource.ComposeTestCheckFunc( + testAccCheckIngestionDestinationExists(ctx, resourceName, &ingestiondestination), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfappfabric.ResourceIngestionDestination, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccIngestionDestination_tags(t *testing.T) { + ctx := acctest.Context(t) + var ingestiondestination awstypes.IngestionDestination + resourceName := "aws_appfabric_ingestion_destination.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + // See https://docs.aws.amazon.com/appfabric/latest/adminguide/terraform.html#terraform-appfabric-connecting. + tenantID := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_TENANT_ID") + serviceAccountToken := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_SERVICE_ACCOUNT_TOKEN") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIngestionDestinationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIngestionDestinationConfig_tags1(rName, tenantID, serviceAccountToken, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckIngestionDestinationExists(ctx, resourceName, &ingestiondestination), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIngestionDestinationConfig_tags2(rName, tenantID, serviceAccountToken, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIngestionDestinationExists(ctx, resourceName, &ingestiondestination), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccIngestionDestinationConfig_tags1(rName, tenantID, serviceAccountToken, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckIngestionDestinationExists(ctx, resourceName, &ingestiondestination), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + +func testAccIngestionDestination_update(t *testing.T) { + ctx := acctest.Context(t) + var ingestiondestination awstypes.IngestionDestination + resourceName := "aws_appfabric_ingestion_destination.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + // See https://docs.aws.amazon.com/appfabric/latest/adminguide/terraform.html#terraform-appfabric-connecting. + tenantID := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_TENANT_ID") + serviceAccountToken := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_SERVICE_ACCOUNT_TOKEN") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIngestionDestinationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIngestionDestinationConfig_basic(rName, tenantID, serviceAccountToken), + Check: resource.ComposeTestCheckFunc( + testAccCheckIngestionDestinationExists(ctx, resourceName, &ingestiondestination), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.firehose_stream.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.0.bucket_name", rName), + resource.TestCheckNoResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.0.prefix"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIngestionDestinationConfig_s3Prefix(rName, tenantID, serviceAccountToken, "testing"), + Check: resource.ComposeTestCheckFunc( + testAccCheckIngestionDestinationExists(ctx, resourceName, &ingestiondestination), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.firehose_stream.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.0.bucket_name", rName), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.0.prefix", "testing"), + ), + }, + }, + }) +} + +func testAccIngestionDestination_firehose(t *testing.T) { + ctx := acctest.Context(t) + var ingestiondestination awstypes.IngestionDestination + resourceName := "aws_appfabric_ingestion_destination.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + // See https://docs.aws.amazon.com/appfabric/latest/adminguide/terraform.html#terraform-appfabric-connecting. + tenantID := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_TENANT_ID") + serviceAccountToken := acctest.SkipIfEnvVarNotSet(t, "AWS_APPFABRIC_TERRAFORMCLOUD_SERVICE_ACCOUNT_TOKEN") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, names.USEast1RegionID, names.APNortheast1RegionID, names.EUWest1RegionID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.AppFabricServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckIngestionDestinationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccIngestionDestinationConfig_firehose(rName, tenantID, serviceAccountToken), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckIngestionDestinationExists(ctx, resourceName, &ingestiondestination), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.firehose_stream.#", acctest.Ct1), + resource.TestCheckResourceAttrSet(resourceName, "destination_configuration.0.audit_log.0.destination.0.firehose_stream.0.stream_name"), + resource.TestCheckResourceAttr(resourceName, "destination_configuration.0.audit_log.0.destination.0.s3_bucket.#", acctest.Ct0), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckIngestionDestinationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).AppFabricClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_appfabric_ingestion_destination" { + continue + } + + _, err := tfappfabric.FindIngestionDestinationByThreePartKey(ctx, conn, rs.Primary.Attributes["app_bundle_arn"], rs.Primary.Attributes["ingestion_arn"], rs.Primary.Attributes[names.AttrARN]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("AppFabric Ingestion Destination %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckIngestionDestinationExists(ctx context.Context, n string, v *awstypes.IngestionDestination) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).AppFabricClient(ctx) + + output, err := tfappfabric.FindIngestionDestinationByThreePartKey(ctx, conn, rs.Primary.Attributes["app_bundle_arn"], rs.Primary.Attributes["ingestion_arn"], rs.Primary.Attributes[names.AttrARN]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccIngestionDestinationConfig_base(rName, tenantID, serviceAccountToken string) string { + return acctest.ConfigCompose(testAccIngestionConfig_base(rName, tenantID, serviceAccountToken), fmt.Sprintf(` +resource "aws_appfabric_ingestion" "test" { + app = aws_appfabric_app_authorization_connection.test.app + app_bundle_arn = aws_appfabric_app_bundle.test.arn + tenant_id = %[2]q + ingestion_type = "auditLog" + + tags = { + Name = %[1]q + } +} +`, rName, tenantID)) +} + +func testAccIngestionDestinationConfig_basic(rName, tenantID, serviceAccountToken string) string { + return acctest.ConfigCompose(testAccIngestionDestinationConfig_base(rName, tenantID, serviceAccountToken), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_appfabric_ingestion_destination" "test" { + app_bundle_arn = aws_appfabric_app_bundle.test.arn + ingestion_arn = aws_appfabric_ingestion.test.arn + + processing_configuration { + audit_log { + format = "json" + schema = "raw" + } + } + + destination_configuration { + audit_log { + destination { + s3_bucket { + bucket_name = aws_s3_bucket.test.bucket + } + } + } + } +} +`, rName)) +} + +func testAccIngestionDestinationConfig_s3Prefix(rName, tenantID, serviceAccountToken, prefix string) string { + return acctest.ConfigCompose(testAccIngestionDestinationConfig_base(rName, tenantID, serviceAccountToken), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_appfabric_ingestion_destination" "test" { + app_bundle_arn = aws_appfabric_app_bundle.test.arn + ingestion_arn = aws_appfabric_ingestion.test.arn + + processing_configuration { + audit_log { + format = "json" + schema = "raw" + } + } + + destination_configuration { + audit_log { + destination { + s3_bucket { + bucket_name = aws_s3_bucket.test.bucket + prefix = %[2]q + } + } + } + } +} +`, rName, prefix)) +} + +func testAccIngestionDestinationConfig_tags1(rName, tenantID, serviceAccountToken, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccIngestionDestinationConfig_base(rName, tenantID, serviceAccountToken), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_appfabric_ingestion_destination" "test" { + app_bundle_arn = aws_appfabric_app_bundle.test.arn + ingestion_arn = aws_appfabric_ingestion.test.arn + + processing_configuration { + audit_log { + format = "json" + schema = "raw" + } + } + + destination_configuration { + audit_log { + destination { + s3_bucket { + bucket_name = aws_s3_bucket.test.bucket + } + } + } + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccIngestionDestinationConfig_tags2(rName, tenantID, serviceAccountToken, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccIngestionDestinationConfig_base(rName, tenantID, serviceAccountToken), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_appfabric_ingestion_destination" "test" { + app_bundle_arn = aws_appfabric_app_bundle.test.arn + ingestion_arn = aws_appfabric_ingestion.test.arn + + processing_configuration { + audit_log { + format = "json" + schema = "raw" + } + } + + destination_configuration { + audit_log { + destination { + s3_bucket { + bucket_name = aws_s3_bucket.test.bucket + } + } + } + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} + +func testAccIngestionDestinationConfig_baseFirehose(rName string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets applicationsignals service tags in Context. +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) + } +} + +// updateTags updates applicationsignals service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *applicationsignals.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*applicationsignals.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.ApplicationSignals) + if len(removedTags) > 0 { + input := &applicationsignals.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("untagging resource (%s): %w", identifier, err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.ApplicationSignals) + if len(updatedTags) > 0 { + input := &applicationsignals.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: Tags(updatedTags), + } + + _, err := conn.TagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// UpdateTags updates applicationsignals service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).ApplicationSignalsClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/appmesh/service_endpoint_resolver_gen.go b/internal/service/appmesh/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..973c251fc8c --- /dev/null +++ b/internal/service/appmesh/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package appmesh + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/appmesh/service_endpoints_gen_test.go b/internal/service/appmesh/service_endpoints_gen_test.go index 55fa1ac31aa..4057e3aca52 100644 --- a/internal/service/appmesh/service_endpoints_gen_test.go +++ b/internal/service/appmesh/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(appmesh_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(appmesh_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/appmesh/service_package_gen.go b/internal/service/appmesh/service_package_gen.go index 390eaf80254..37c06727ff6 100644 --- a/internal/service/appmesh/service_package_gen.go +++ b/internal/service/appmesh/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package appmesh @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" appmesh_sdkv1 "github.com/aws/aws-sdk-go/service/appmesh" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -148,11 +147,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*a "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return appmesh_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/apprunner/service_endpoint_resolver_gen.go b/internal/service/apprunner/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..af87f59ac6a --- /dev/null +++ b/internal/service/apprunner/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package apprunner + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + apprunner_sdkv2 "github.com/aws/aws-sdk-go-v2/service/apprunner" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ apprunner_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver apprunner_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: apprunner_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params apprunner_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up apprunner endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*apprunner_sdkv2.Options) { + return func(o *apprunner_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/apprunner/service_endpoints_gen_test.go b/internal/service/apprunner/service_endpoints_gen_test.go index a65c93e42e0..28192b44c00 100644 --- a/internal/service/apprunner/service_endpoints_gen_test.go +++ b/internal/service/apprunner/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := apprunner_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), apprunner_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := apprunner_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), apprunner_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/apprunner/service_package_gen.go b/internal/service/apprunner/service_package_gen.go index 36370fec44f..2a5092ed121 100644 --- a/internal/service/apprunner/service_package_gen.go +++ b/internal/service/apprunner/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package apprunner @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" apprunner_sdkv2 "github.com/aws/aws-sdk-go-v2/service/apprunner" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -107,19 +106,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*apprunner_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return apprunner_sdkv2.NewFromConfig(cfg, func(o *apprunner_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return apprunner_sdkv2.NewFromConfig(cfg, + apprunner_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/appstream/find.go b/internal/service/appstream/find.go index 162f324b75c..6d4d3c66238 100644 --- a/internal/service/appstream/find.go +++ b/internal/service/appstream/find.go @@ -182,3 +182,29 @@ func FindFleetStackAssociation(ctx context.Context, conn *appstream.Client, flee return nil } + +// findImages finds all images from a describe images input +func findImages(ctx context.Context, conn *appstream.Client, input *appstream.DescribeImagesInput) ([]awstypes.Image, error) { + var output []awstypes.Image + + pages := appstream.NewDescribeImagesPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.Images...) + } + + return output, nil +} diff --git a/internal/service/appstream/fleet.go b/internal/service/appstream/fleet.go index 53e2741c347..b3522e5d6fc 100644 --- a/internal/service/appstream/fleet.go +++ b/internal/service/appstream/fleet.go @@ -148,10 +148,13 @@ func ResourceFleet() *schema.Resource { ValidateFunc: verify.ValidARN, }, "idle_disconnect_timeout_in_seconds": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - ValidateFunc: validation.IntBetween(60, 360000), + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.Any( + validation.IntBetween(60, 360000), + validation.IntInSlice([]int{0}), + ), }, "image_arn": { Type: schema.TypeString, diff --git a/internal/service/appstream/fleet_test.go b/internal/service/appstream/fleet_test.go index e0310ff7d87..7d8066c59de 100644 --- a/internal/service/appstream/fleet_test.go +++ b/internal/service/appstream/fleet_test.go @@ -55,6 +55,7 @@ func TestAccAppStreamFleet_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, names.AttrInstanceType, instanceType), resource.TestCheckResourceAttr(resourceName, names.AttrState, string(awstypes.FleetStateRunning)), + resource.TestCheckResourceAttr(resourceName, "idle_disconnect_timeout_in_seconds", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "stream_view", string(awstypes.StreamViewApp)), acctest.CheckResourceAttrRFC3339(resourceName, names.AttrCreatedTime), ), diff --git a/internal/service/appstream/image_data_source.go b/internal/service/appstream/image_data_source.go new file mode 100644 index 00000000000..0bfad243842 --- /dev/null +++ b/internal/service/appstream/image_data_source.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package appstream + +import ( + "context" + "sort" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appstream" + awstypes "github.com/aws/aws-sdk-go-v2/service/appstream/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource(name="Image") +func newDataSourceImage(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceImage{}, nil +} + +const ( + DSNameImage = "Image Data Source" +) + +type dataSourceImage struct { + framework.DataSourceWithConfigure +} + +func (d *dataSourceImage) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + resp.TypeName = "aws_appstream_image" +} + +func (d *dataSourceImage) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + + names.AttrARN: schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Computed: true, + Optional: true, + Validators: []validator.String{ + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot(names.AttrName), + }...), + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot("name_regex"), + }...), + }, + }, + "applications": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dsApplications](ctx), + Computed: true, + }, + "appstream_agent_version": schema.StringAttribute{ + Computed: true, + }, + names.AttrMostRecent: schema.BoolAttribute{ + Optional: true, + }, + "base_image_arn": schema.StringAttribute{ + Computed: true, + }, + names.AttrCreatedTime: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + }, + names.AttrDescription: schema.StringAttribute{ + Computed: true, + }, + names.AttrDisplayName: schema.StringAttribute{ + Computed: true, + }, + "image_builder_name": schema.StringAttribute{ + Computed: true, + }, + "image_builder_supported": schema.BoolAttribute{ + Computed: true, + }, + "image_permissions": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dsImagePermissions](ctx), + Computed: true, + }, + names.AttrName: schema.StringAttribute{ + Computed: true, + Optional: true, + Validators: []validator.String{ + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot(names.AttrARN), + }...), + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot("name_regex"), + }...), + }, + }, + "name_regex": schema.StringAttribute{ + CustomType: fwtypes.RegexpType, + Optional: true, + Validators: []validator.String{ + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot(names.AttrName), + }...), + stringvalidator.ConflictsWith(path.Expressions{ + path.MatchRoot(names.AttrARN), + }...), + }, + }, + + "platform": schema.StringAttribute{ + Computed: true, + }, + "public_base_image_released_date": schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + }, + names.AttrState: schema.StringAttribute{ + Computed: true, + }, + "state_change_reason": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dsStateChange](ctx), + Computed: true, + }, + names.AttrType: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.VisibilityType](), + Optional: true, + }, + }, + } + } +} +func (d *dataSourceImage) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().AppStreamClient(ctx) + + var data dsImage + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + var describeImagesInput appstream.DescribeImagesInput + if !data.Name.IsNull() { + describeImagesInput.Names = []string{data.Name.ValueString()} + } + if !data.Arn.IsNull() { + describeImagesInput.Arns = []string{data.Arn.ValueString()} + } + images, err := findImages(ctx, conn, &describeImagesInput) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.AppStream, create.ErrActionReading, DSNameImage, data.Arn.String(), err), + err.Error(), + ) + return + } + + var filteredImages []awstypes.Image + if !data.NameRegex.IsNull() { + r := regexache.MustCompile(data.NameRegex.ValueString()) + for _, img := range images { + name := aws.ToString(img.Name) + + // Check for a very rare case where the response would include no + // image name. No name means nothing to attempt a match against, + // therefore we are skipping such image. + if name == "" { + continue + } + + if r.MatchString(name) { + filteredImages = append(filteredImages, img) + } + } + } else { + filteredImages = images[:] + } + + if len(filteredImages) < 1 { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.AppStream, create.ErrActionReading, DSNameImage, data.Arn.String(), err), + "Your query returned no results. Please change your search criteria and try again.", + ) + return + } + + if len(filteredImages) > 1 { + if !data.MostRecent.ValueBool() { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.AppStream, create.ErrActionReading, DSNameImage, data.Arn.String(), err), + "Your query returned more than one result. Please try a more specific search criteria, or set `most_recent` attribute to true.", + ) + return + } + sort.Slice(filteredImages, func(i, j int) bool { + itime, _ := time.Parse(time.RFC3339, images[i].CreatedTime.Month().String()) + jtime, _ := time.Parse(time.RFC3339, images[j].CreatedTime.Month().String()) + return itime.Unix() > jtime.Unix() + }) + } + image := filteredImages[0] + + data.Type = fwtypes.StringEnumValue[awstypes.VisibilityType](image.Visibility) + resp.Diagnostics.Append(flex.Flatten(ctx, &image, &data)...) + if resp.Diagnostics.HasError() { + return + } + + if image.PublicBaseImageReleasedDate != nil { + data.PubilcBaseImageReleasedDate = timetypes.NewRFC3339TimeValue(*image.PublicBaseImageReleasedDate) + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dsApplications struct { + AppBlockArn types.String `tfsdk:"app_block_arn"` + Arn fwtypes.ARN `tfsdk:"arn"` + CreatedTime timetypes.RFC3339 `tfsdk:"created_time"` + Description types.String `tfsdk:"description"` + DisplayName types.String `tfsdk:"display_name"` + Enabled types.Bool `tfsdk:"enabled"` + IconS3Location fwtypes.ListNestedObjectValueOf[dsIconS3] `tfsdk:"icon_s3_location"` + IconUrl types.String `tfsdk:"icon_url"` + InstanceFamilies fwtypes.ListValueOf[types.String] `tfsdk:"instance_families"` + LaunchParameters types.String `tfsdk:"launch_parameters"` + LaunchPath types.String `tfsdk:"launch_path"` + Metadata fwtypes.MapValueOf[types.String] `tfsdk:"metadata"` + Name types.String `tfsdk:"name"` + Platforms fwtypes.ListValueOf[types.String] `tfsdk:"platforms"` + WorkingDirectory types.String `tfsdk:"working_directory"` +} + +type dsIconS3 struct { + S3Bucket types.String `tfsdk:"s3_bucket"` + S3Key types.String `tfsdk:"s3_key"` +} + +type dsStateChange struct { + Code types.String `tfsdk:"code"` + Message types.String `tfsdk:"message"` +} + +type dsImage struct { + Applications fwtypes.ListNestedObjectValueOf[dsApplications] `tfsdk:"applications"` + AppStreamAgentVersion types.String `tfsdk:"appstream_agent_version"` + Arn fwtypes.ARN `tfsdk:"arn"` + BaseImageArn types.String `tfsdk:"base_image_arn"` + CreatedTime timetypes.RFC3339 `tfsdk:"created_time"` + Description types.String `tfsdk:"description"` + DisplayName types.String `tfsdk:"display_name"` + ImageBuilderName types.String `tfsdk:"image_builder_name"` + ImageBuilderSupported types.Bool `tfsdk:"image_builder_supported"` + ImagePermissions fwtypes.ListNestedObjectValueOf[dsImagePermissions] `tfsdk:"image_permissions"` + MostRecent types.Bool `tfsdk:"most_recent"` + Name types.String `tfsdk:"name"` + NameRegex fwtypes.Regexp `tfsdk:"name_regex"` + Platform types.String `tfsdk:"platform"` + PubilcBaseImageReleasedDate timetypes.RFC3339 `tfsdk:"public_base_image_released_date"` + State types.String `tfsdk:"state"` + StateChangeReason fwtypes.ListNestedObjectValueOf[dsStateChange] `tfsdk:"state_change_reason"` + Type fwtypes.StringEnum[awstypes.VisibilityType] `tfsdk:"type"` +} + +type dsImagePermissions struct { + AllowFleet types.Bool `tfsdk:"allow_fleet"` + AllowImageBuilder types.Bool `tfsdk:"allow_image_builder"` +} diff --git a/internal/service/appstream/image_data_source_test.go b/internal/service/appstream/image_data_source_test.go new file mode 100644 index 00000000000..96f918c03bb --- /dev/null +++ b/internal/service/appstream/image_data_source_test.go @@ -0,0 +1,60 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package appstream_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccAppStreamImageDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + dataSourceName := "data.aws_appstream_image.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.AppStreamEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.AppStreamServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccImageDataSourceConfig_basic(), + Check: resource.ComposeTestCheckFunc( + + resource.TestCheckResourceAttrSet(dataSourceName, "applications.#"), + resource.TestCheckResourceAttrSet(dataSourceName, "appstream_agent_version"), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrARN), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrCreatedTime), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrDescription), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrDisplayName), + resource.TestCheckResourceAttrSet(dataSourceName, "image_builder_supported"), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrName), + resource.TestCheckResourceAttrSet(dataSourceName, "name_regex"), + resource.TestCheckResourceAttrSet(dataSourceName, "platform"), + resource.TestCheckResourceAttrSet(dataSourceName, "public_base_image_released_date"), + resource.TestCheckResourceAttrSet(dataSourceName, names.AttrType), + ), + }, + }, + }) +} + +// name = "AppStream-WinServer2019-06-17-2024" +func testAccImageDataSourceConfig_basic() string { + return (` +data "aws_appstream_image" "test" { + name_regex = "^AppStream-WinServer.*$" + type = "PUBLIC" + most_recent = true +} +`) +} diff --git a/internal/service/appstream/service_endpoint_resolver_gen.go b/internal/service/appstream/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..3514cc65271 --- /dev/null +++ b/internal/service/appstream/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package appstream + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + appstream_sdkv2 "github.com/aws/aws-sdk-go-v2/service/appstream" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ appstream_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver appstream_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: appstream_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params appstream_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up appstream endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*appstream_sdkv2.Options) { + return func(o *appstream_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/appstream/service_endpoints_gen_test.go b/internal/service/appstream/service_endpoints_gen_test.go index 82235597b5e..2d22f4739af 100644 --- a/internal/service/appstream/service_endpoints_gen_test.go +++ b/internal/service/appstream/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := appstream_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), appstream_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := appstream_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), appstream_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/appstream/service_package_gen.go b/internal/service/appstream/service_package_gen.go index 00740848b9f..8e004b4841a 100644 --- a/internal/service/appstream/service_package_gen.go +++ b/internal/service/appstream/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package appstream @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" appstream_sdkv2 "github.com/aws/aws-sdk-go-v2/service/appstream" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -16,7 +15,12 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newDataSourceImage, + Name: "Image", + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { @@ -80,19 +84,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*appstream_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return appstream_sdkv2.NewFromConfig(cfg, func(o *appstream_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return appstream_sdkv2.NewFromConfig(cfg, + appstream_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/appsync/api_cache.go b/internal/service/appsync/api_cache.go index 188e409268a..dd8ad2f6f81 100644 --- a/internal/service/appsync/api_cache.go +++ b/internal/service/appsync/api_cache.go @@ -6,47 +6,42 @@ package appsync import ( "context" "log" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_appsync_api_cache") -func ResourceAPICache() *schema.Resource { +// @SDKResource("aws_appsync_api_cache", name="API Cache") +func resourceAPICache() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAPICacheCreate, ReadWithoutTimeout: resourceAPICacheRead, UpdateWithoutTimeout: resourceAPICacheUpdate, DeleteWithoutTimeout: resourceAPICacheDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ - "api_id": { - Type: schema.TypeString, - Required: true, - }, "api_caching_behavior": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.ApiCachingBehavior_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ApiCachingBehavior](), }, - names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.ApiCacheType_Values(), false), - }, - "ttl": { - Type: schema.TypeInt, + "api_id": { + Type: schema.TypeString, Required: true, }, "at_rest_encryption_enabled": { @@ -59,40 +54,49 @@ func ResourceAPICache() *schema.Resource { Optional: true, ForceNew: true, }, + "ttl": { + Type: schema.TypeInt, + Required: true, + }, + names.AttrType: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ApiCacheType](), + }, }, } } func resourceAPICacheCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) apiID := d.Get("api_id").(string) - - params := &appsync.CreateApiCacheInput{ + input := &appsync.CreateApiCacheInput{ + ApiCachingBehavior: awstypes.ApiCachingBehavior(d.Get("api_caching_behavior").(string)), ApiId: aws.String(apiID), - Type: aws.String(d.Get(names.AttrType).(string)), - ApiCachingBehavior: aws.String(d.Get("api_caching_behavior").(string)), - Ttl: aws.Int64(int64(d.Get("ttl").(int))), + Ttl: int64(d.Get("ttl").(int)), + Type: awstypes.ApiCacheType(d.Get(names.AttrType).(string)), } if v, ok := d.GetOk("at_rest_encryption_enabled"); ok { - params.AtRestEncryptionEnabled = aws.Bool(v.(bool)) + input.AtRestEncryptionEnabled = v.(bool) } if v, ok := d.GetOk("transit_encryption_enabled"); ok { - params.TransitEncryptionEnabled = aws.Bool(v.(bool)) + input.TransitEncryptionEnabled = v.(bool) } - _, err := conn.CreateApiCacheWithContext(ctx, params) + _, err := conn.CreateApiCache(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Appsync API Cache: %s", err) + return sdkdiag.AppendErrorf(diags, "creating Appsync API Cache (%s): %s", apiID, err) } d.SetId(apiID) - if err := waitAPICacheAvailable(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Appsync API Cache (%s) availability: %s", d.Id(), err) + if _, err := waitAPICacheAvailable(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Appsync API Cache (%s) create: %s", d.Id(), err) } return append(diags, resourceAPICacheRead(ctx, d, meta)...) @@ -100,9 +104,10 @@ func resourceAPICacheCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceAPICacheRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) + + cache, err := findAPICacheByID(ctx, conn, d.Id()) - cache, err := FindAPICacheByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] AppSync API Cache (%s) not found, removing from state", d.Id()) d.SetId("") @@ -110,46 +115,47 @@ func resourceAPICacheRead(ctx context.Context, d *schema.ResourceData, meta inte } if err != nil { - return sdkdiag.AppendErrorf(diags, "getting Appsync API Cache %q: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Appsync API Cache (%s): %s", d.Id(), err) } - d.Set("api_id", d.Id()) - d.Set(names.AttrType, cache.Type) d.Set("api_caching_behavior", cache.ApiCachingBehavior) - d.Set("ttl", cache.Ttl) + d.Set("api_id", d.Id()) d.Set("at_rest_encryption_enabled", cache.AtRestEncryptionEnabled) d.Set("transit_encryption_enabled", cache.TransitEncryptionEnabled) + d.Set("ttl", cache.Ttl) + d.Set(names.AttrType, cache.Type) return diags } func resourceAPICacheUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - params := &appsync.UpdateApiCacheInput{ + input := &appsync.UpdateApiCacheInput{ ApiId: aws.String(d.Id()), } - if d.HasChange(names.AttrType) { - params.Type = aws.String(d.Get(names.AttrType).(string)) - } - if d.HasChange("api_caching_behavior") { - params.ApiCachingBehavior = aws.String(d.Get("api_caching_behavior").(string)) + input.ApiCachingBehavior = awstypes.ApiCachingBehavior(d.Get("api_caching_behavior").(string)) } if d.HasChange("ttl") { - params.Ttl = aws.Int64(int64(d.Get("ttl").(int))) + input.Ttl = int64(d.Get("ttl").(int)) + } + + if d.HasChange(names.AttrType) { + input.Type = awstypes.ApiCacheType(d.Get(names.AttrType).(string)) } - _, err := conn.UpdateApiCacheWithContext(ctx, params) + _, err := conn.UpdateApiCache(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "updating Appsync API Cache %q: %s", d.Id(), err) } - if err := waitAPICacheAvailable(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Appsync API Cache (%s) availability: %s", d.Id(), err) + if _, err := waitAPICacheAvailable(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Appsync API Cache (%s) update: %s", d.Id(), err) } return append(diags, resourceAPICacheRead(ctx, d, meta)...) @@ -157,22 +163,105 @@ func resourceAPICacheUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceAPICacheDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - input := &appsync.DeleteApiCacheInput{ + log.Printf("[INFO] Deleting Appsync API Cache: %s", d.Id()) + _, err := conn.DeleteApiCache(ctx, &appsync.DeleteApiCacheInput{ ApiId: aws.String(d.Id()), + }) + + if errs.IsA[*awstypes.NotFoundException](err) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting Appsync API Cache (%s): %s", d.Id(), err) + } + + if _, err := waitAPICacheDeleted(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Appsync API Cache (%s) delete: %s", d.Id(), err) + } + + return diags +} + +func findAPICacheByID(ctx context.Context, conn *appsync.Client, id string) (*awstypes.ApiCache, error) { + input := &appsync.GetApiCacheInput{ + ApiId: aws.String(id), } - _, err := conn.DeleteApiCacheWithContext(ctx, input) + + output, err := conn.GetApiCache(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return diags + return nil, err + } + + if output == nil || output.ApiCache == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ApiCache, nil +} + +func statusAPICache(ctx context.Context, conn *appsync.Client, name string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findAPICacheByID(ctx, conn, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err } - return sdkdiag.AppendErrorf(diags, "deleting Appsync API Cache: %s", err) + + return output, string(output.Status), nil } +} - if err := waitAPICacheDeleted(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Appsync API Cache (%s) to be deleted: %s", d.Id(), err) +func waitAPICacheAvailable(ctx context.Context, conn *appsync.Client, id string) (*awstypes.ApiCache, error) { //nolint:unparam + const ( + timeout = 60 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ApiCacheStatusCreating, awstypes.ApiCacheStatusModifying), + Target: enum.Slice(awstypes.ApiCacheStatusAvailable), + Refresh: statusAPICache(ctx, conn, id), + Timeout: timeout, } - return diags + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ApiCache); ok { + return output, err + } + + return nil, err +} + +func waitAPICacheDeleted(ctx context.Context, conn *appsync.Client, id string) (*awstypes.ApiCache, error) { + const ( + timeout = 60 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ApiCacheStatusDeleting), + Target: []string{}, + Refresh: statusAPICache(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ApiCache); ok { + return output, err + } + + return nil, err } diff --git a/internal/service/appsync/api_cache_test.go b/internal/service/appsync/api_cache_test.go index dd4be59fc4c..e3a7c4f8876 100644 --- a/internal/service/appsync/api_cache_test.go +++ b/internal/service/appsync/api_cache_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,12 +21,12 @@ import ( func testAccAPICache_basic(t *testing.T) { ctx := acctest.Context(t) - var apiCache appsync.ApiCache + var apiCache awstypes.ApiCache resourceName := "aws_appsync_api_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAPICacheDestroy(ctx), @@ -51,12 +51,12 @@ func testAccAPICache_basic(t *testing.T) { func testAccAPICache_disappears(t *testing.T) { ctx := acctest.Context(t) - var apiCache appsync.ApiCache + var apiCache awstypes.ApiCache resourceName := "aws_appsync_api_cache.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAPICacheDestroy(ctx), @@ -75,40 +75,46 @@ func testAccAPICache_disappears(t *testing.T) { func testAccCheckAPICacheDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_api_cache" { continue } _, err := tfappsync.FindAPICacheByID(ctx, conn, rs.Primary.ID) - if err == nil { - if tfresource.NotFound(err) { - return nil - } + + if tfresource.NotFound(err) { + continue + } + + if err != nil { return err } - return nil + return fmt.Errorf("Appsync API Cache %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckAPICacheExists(ctx context.Context, resourceName string, apiCache *appsync.ApiCache) resource.TestCheckFunc { +func testAccCheckAPICacheExists(ctx context.Context, n string, v *awstypes.ApiCache) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Appsync Api Cache Not found in state: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) - cache, err := tfappsync.FindAPICacheByID(ctx, conn, rs.Primary.ID) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + + output, err := tfappsync.FindAPICacheByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - *apiCache = *cache + *v = *output return nil } diff --git a/internal/service/appsync/api_key.go b/internal/service/appsync/api_key.go index fbdf814b82a..fbdca7da916 100644 --- a/internal/service/appsync/api_key.go +++ b/internal/service/appsync/api_key.go @@ -10,43 +10,52 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_appsync_api_key") -func ResourceAPIKey() *schema.Resource { +// @SDKResource("aws_appsync_api_key", name="API Key") +func resourceAPIKey() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAPIKeyCreate, ReadWithoutTimeout: resourceAPIKeyRead, UpdateWithoutTimeout: resourceAPIKeyUpdate, DeleteWithoutTimeout: resourceAPIKeyDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ + "api_id": { + Type: schema.TypeString, + Required: true, + }, + "api_key_id": { + Type: schema.TypeString, + Computed: true, + }, names.AttrDescription: { Type: schema.TypeString, Optional: true, Default: "Managed by Terraform", }, - "api_id": { - Type: schema.TypeString, - Required: true, - }, "expires": { Type: schema.TypeString, Optional: true, DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - // Ignore unsetting value + // Ignore unsetting value. if old != "" && new == "" { return true } @@ -65,75 +74,85 @@ func ResourceAPIKey() *schema.Resource { func resourceAPIKeyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) apiID := d.Get("api_id").(string) - - params := &appsync.CreateApiKeyInput{ + input := &appsync.CreateApiKeyInput{ ApiId: aws.String(apiID), Description: aws.String(d.Get(names.AttrDescription).(string)), } + if v, ok := d.GetOk("expires"); ok { t, _ := time.Parse(time.RFC3339, v.(string)) - params.Expires = aws.Int64(t.Unix()) + input.Expires = t.Unix() } - resp, err := conn.CreateApiKeyWithContext(ctx, params) + + output, err := conn.CreateApiKey(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "creating Appsync API Key: %s", err) } - d.SetId(fmt.Sprintf("%s:%s", apiID, aws.StringValue(resp.ApiKey.Id))) + d.SetId(apiKeyCreateResourceID(apiID, aws.ToString(output.ApiKey.Id))) + return append(diags, resourceAPIKeyRead(ctx, d, meta)...) } func resourceAPIKeyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, keyID, err := DecodeAPIKeyID(d.Id()) + apiID, keyID, err := apiKeyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Appsync API Key (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - key, err := GetAPIKey(ctx, apiID, keyID, conn) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Appsync API Key (%s): %s", d.Id(), err) - } - if key == nil && !d.IsNewResource() { + key, err := findAPIKeyByTwoPartKey(ctx, conn, apiID, keyID) + + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] AppSync API Key (%s) not found, removing from state", d.Id()) d.SetId("") return diags } + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Appsync API Key (%s): %s", d.Id(), err) + } + d.Set("api_id", apiID) - d.Set(names.AttrKey, key.Id) + d.Set("api_key_id", keyID) d.Set(names.AttrDescription, key.Description) - d.Set("expires", time.Unix(aws.Int64Value(key.Expires), 0).UTC().Format(time.RFC3339)) + d.Set("expires", time.Unix(key.Expires, 0).UTC().Format(time.RFC3339)) + d.Set(names.AttrKey, key.Id) + return diags } func resourceAPIKeyUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, keyID, err := DecodeAPIKeyID(d.Id()) + apiID, keyID, err := apiKeyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Appsync API Key (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - params := &appsync.UpdateApiKeyInput{ + input := &appsync.UpdateApiKeyInput{ ApiId: aws.String(apiID), Id: aws.String(keyID), } + if d.HasChange(names.AttrDescription) { - params.Description = aws.String(d.Get(names.AttrDescription).(string)) + input.Description = aws.String(d.Get(names.AttrDescription).(string)) } + if d.HasChange("expires") { t, _ := time.Parse(time.RFC3339, d.Get("expires").(string)) - params.Expires = aws.Int64(t.Unix()) + input.Expires = t.Unix() } - _, err = conn.UpdateApiKeyWithContext(ctx, params) + _, err = conn.UpdateApiKey(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "updating Appsync API Key (%s): %s", d.Id(), err) } @@ -143,54 +162,96 @@ func resourceAPIKeyUpdate(ctx context.Context, d *schema.ResourceData, meta inte func resourceAPIKeyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, keyID, err := DecodeAPIKeyID(d.Id()) + apiID, keyID, err := apiKeyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Appsync API Key (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - input := &appsync.DeleteApiKeyInput{ + log.Printf("[INFO] Deleting Appsync API Key: %s", d.Id()) + _, err = conn.DeleteApiKey(ctx, &appsync.DeleteApiKeyInput{ ApiId: aws.String(apiID), Id: aws.String(keyID), + }) + + if errs.IsA[*awstypes.NotFoundException](err) { + return diags } - _, err = conn.DeleteApiKeyWithContext(ctx, input) + if err != nil { - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return diags - } return sdkdiag.AppendErrorf(diags, "deleting Appsync API Key (%s): %s", d.Id(), err) } return diags } -func DecodeAPIKeyID(id string) (string, string, error) { - parts := strings.Split(id, ":") - if len(parts) != 2 { - return "", "", fmt.Errorf("Unexpected format of ID (%q), expected API-ID:API-KEY-ID", id) +const apiKeyResourceIDSeparator = ":" + +func apiKeyCreateResourceID(apiID, keyID string) string { + parts := []string{apiID, keyID} + id := strings.Join(parts, apiKeyResourceIDSeparator) + + return id +} + +func apiKeyParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, apiKeyResourceIDSeparator) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected API-ID%[2]sAPI-KEY-ID", id, apiKeyResourceIDSeparator) } + return parts[0], parts[1], nil } -func GetAPIKey(ctx context.Context, apiID, keyID string, conn *appsync.AppSync) (*appsync.ApiKey, error) { - input := &appsync.ListApiKeysInput{ - ApiId: aws.String(apiID), +func findAPIKey(ctx context.Context, conn *appsync.Client, input *appsync.ListApiKeysInput, filter tfslices.Predicate[*awstypes.ApiKey]) (*awstypes.ApiKey, error) { + output, err := findAPIKeys(ctx, conn, input, filter) + + if err != nil { + return nil, err } - for { - resp, err := conn.ListApiKeysWithContext(ctx, input) - if err != nil { - return nil, err + + return tfresource.AssertSingleValueResult(output) +} + +func findAPIKeys(ctx context.Context, conn *appsync.Client, input *appsync.ListApiKeysInput, filter tfslices.Predicate[*awstypes.ApiKey]) ([]awstypes.ApiKey, error) { + var output []awstypes.ApiKey + + err := listAPIKeysPages(ctx, conn, input, func(page *appsync.ListApiKeysOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - for _, apiKey := range resp.ApiKeys { - if aws.StringValue(apiKey.Id) == keyID { - return apiKey, nil + + for _, v := range page.ApiKeys { + if filter(&v) { + output = append(output, v) } } - if resp.NextToken == nil { - break + + return !lastPage + }) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } - input.NextToken = resp.NextToken } - return nil, nil + + if err != nil { + return nil, err + } + + return output, nil +} + +func findAPIKeyByTwoPartKey(ctx context.Context, conn *appsync.Client, apiID, keyID string) (*awstypes.ApiKey, error) { + input := &appsync.ListApiKeysInput{ + ApiId: aws.String(apiID), + } + + return findAPIKey(ctx, conn, input, func(v *awstypes.ApiKey) bool { + return aws.ToString(v.Id) == keyID + }) } diff --git a/internal/service/appsync/api_key_test.go b/internal/service/appsync/api_key_test.go index e552094a157..b7331674857 100644 --- a/internal/service/appsync/api_key_test.go +++ b/internal/service/appsync/api_key_test.go @@ -10,27 +10,26 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfappsync "github.com/hashicorp/terraform-provider-aws/internal/service/appsync" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func testAccAPIKey_basic(t *testing.T) { ctx := acctest.Context(t) - var apiKey appsync.ApiKey + var apiKey awstypes.ApiKey dateAfterSevenDays := time.Now().UTC().Add(time.Hour * 24 * time.Duration(7)).Truncate(time.Hour) resourceName := "aws_appsync_api_key.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAPIKeyDestroy(ctx), @@ -55,12 +54,12 @@ func testAccAPIKey_basic(t *testing.T) { func testAccAPIKey_description(t *testing.T) { ctx := acctest.Context(t) - var apiKey appsync.ApiKey + var apiKey awstypes.ApiKey resourceName := "aws_appsync_api_key.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAPIKeyDestroy(ctx), @@ -90,14 +89,14 @@ func testAccAPIKey_description(t *testing.T) { func testAccAPIKey_expires(t *testing.T) { ctx := acctest.Context(t) - var apiKey appsync.ApiKey + var apiKey awstypes.ApiKey dateAfterTenDays := time.Now().UTC().Add(time.Hour * 24 * time.Duration(10)).Truncate(time.Hour) dateAfterTwentyDays := time.Now().UTC().Add(time.Hour * 24 * time.Duration(20)).Truncate(time.Hour) resourceName := "aws_appsync_api_key.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckAPIKeyDestroy(ctx), @@ -127,66 +126,54 @@ func testAccAPIKey_expires(t *testing.T) { func testAccCheckAPIKeyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_api_key" { continue } - apiID, keyID, err := tfappsync.DecodeAPIKeyID(rs.Primary.ID) - if err != nil { - return err - } + _, err := tfappsync.FindAPIKeyByTwoPartKey(ctx, conn, rs.Primary.Attributes["api_id"], rs.Primary.Attributes["api_key_id"]) - apiKey, err := tfappsync.GetAPIKey(ctx, apiID, keyID, conn) - if err == nil { - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return nil - } - return err + if tfresource.NotFound(err) { + continue } - if apiKey != nil && aws.StringValue(apiKey.Id) == keyID { - return fmt.Errorf("Appsync API Key ID %q still exists", rs.Primary.ID) + if err != nil { + return err } - return nil + return fmt.Errorf("Appsync API Key %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckAPIKeyExists(ctx context.Context, resourceName string, apiKey *appsync.ApiKey) resource.TestCheckFunc { +func testAccCheckAPIKeyExists(ctx context.Context, n string, v *awstypes.ApiKey) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Appsync API Key Not found in state: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - apiID, keyID, err := tfappsync.DecodeAPIKeyID(rs.Primary.ID) - if err != nil { - return err - } + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + + output, err := tfappsync.FindAPIKeyByTwoPartKey(ctx, conn, rs.Primary.Attributes["api_id"], rs.Primary.Attributes["api_key_id"]) - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) - key, err := tfappsync.GetAPIKey(ctx, apiID, keyID, conn) if err != nil { return err } - if key == nil || key.Id == nil { - return fmt.Errorf("Appsync API Key %q not found", rs.Primary.ID) - } - - *apiKey = *key + *v = *output return nil } } -func testAccCheckAPIKeyExpiresDate(apiKey *appsync.ApiKey, expectedTime time.Time) resource.TestCheckFunc { +func testAccCheckAPIKeyExpiresDate(apiKey *awstypes.ApiKey, expectedTime time.Time) resource.TestCheckFunc { return func(s *terraform.State) error { - apiKeyExpiresTime := time.Unix(aws.Int64Value(apiKey.Expires), 0) + apiKeyExpiresTime := time.Unix(apiKey.Expires, 0) if !apiKeyExpiresTime.Equal(expectedTime) { return fmt.Errorf("Appsync API Key expires difference: got %s and expected %s", apiKeyExpiresTime.Format(time.RFC3339), expectedTime.Format(time.RFC3339)) } diff --git a/internal/service/appsync/appsync_test.go b/internal/service/appsync/appsync_test.go index 16622c4122e..a25ca0d604e 100644 --- a/internal/service/appsync/appsync_test.go +++ b/internal/service/appsync/appsync_test.go @@ -4,7 +4,6 @@ package appsync_test import ( - "os" "testing" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -113,15 +112,3 @@ func TestAccAppSync_serial(t *testing.T) { acctest.RunSerialTests2Levels(t, testCases, 0) } - -func getCertDomain(t *testing.T) string { - value := os.Getenv("AWS_APPSYNC_DOMAIN_NAME_CERTIFICATE_DOMAIN") - if value == "" { - t.Skip( - "Environment variable AWS_APPSYNC_DOMAIN_NAME_CERTIFICATE_DOMAIN is not set. " + - "This environment variable must be set to any non-empty value " + - "to enable the test.") - } - - return value -} diff --git a/internal/service/appsync/datasource.go b/internal/service/appsync/datasource.go index 5d84ef17a3a..b520af2976f 100644 --- a/internal/service/appsync/datasource.go +++ b/internal/service/appsync/datasource.go @@ -10,22 +10,25 @@ import ( "strings" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_appsync_datasource") -func ResourceDataSource() *schema.Resource { +// @SDKResource("aws_appsync_datasource", name="Data Source") +func resourceDataSource() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDataSourceCreate, ReadWithoutTimeout: resourceDataSourceRead, @@ -144,10 +147,10 @@ func ResourceDataSource() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "authorization_type": { - Type: schema.TypeString, - Optional: true, - Default: appsync.AuthorizationTypeAwsIam, - ValidateFunc: validation.StringInSlice(appsync.AuthorizationType_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.AuthorizationTypeAwsIam, + ValidateDiagFunc: enum.Validate[awstypes.AuthorizationType](), }, "aws_iam_config": { Type: schema.TypeList, @@ -254,10 +257,10 @@ func ResourceDataSource() *schema.Resource { }, }, names.AttrSourceType: { - Type: schema.TypeString, - Optional: true, - Default: appsync.RelationalDatabaseSourceTypeRdsHttpEndpoint, - ValidateFunc: validation.StringInSlice(appsync.RelationalDatabaseSourceType_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.RelationalDatabaseSourceTypeRdsHttpEndpoint, + ValidateDiagFunc: enum.Validate[awstypes.RelationalDatabaseSourceType](), }, }, }, @@ -269,12 +272,10 @@ func ResourceDataSource() *schema.Resource { ValidateFunc: verify.ValidARN, }, names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.DataSourceType_Values(), true), - StateFunc: func(v interface{}) string { - return strings.ToUpper(v.(string)) - }, + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.DataSourceType](), + StateFunc: sdkv2.ToUpperSchemaStateFunc, }, }, } @@ -282,14 +283,16 @@ func ResourceDataSource() *schema.Resource { func resourceDataSourceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) region := meta.(*conns.AWSClient).Region + apiID := d.Get("api_id").(string) name := d.Get(names.AttrName).(string) + id := dataSourceCreateResourceID(apiID, name) input := &appsync.CreateDataSourceInput{ - ApiId: aws.String(d.Get("api_id").(string)), + ApiId: aws.String(apiID), Name: aws.String(name), - Type: aws.String(d.Get(names.AttrType).(string)), + Type: awstypes.DataSourceType(d.Get(names.AttrType).(string)), } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -328,28 +331,27 @@ func resourceDataSourceCreate(ctx context.Context, d *schema.ResourceData, meta input.ServiceRoleArn = aws.String(v.(string)) } - _, err := conn.CreateDataSourceWithContext(ctx, input) + _, err := conn.CreateDataSource(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Appsync Data Source (%s): %s", name, err) + return sdkdiag.AppendErrorf(diags, "creating Appsync Data Source (%s): %s", id, err) } - d.SetId(d.Get("api_id").(string) + "-" + d.Get(names.AttrName).(string)) + d.SetId(id) return append(diags, resourceDataSourceRead(ctx, d, meta)...) } func resourceDataSourceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) - - apiID, name, err := DecodeID(d.Id()) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) + apiID, name, err := dataSourceParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - dataSource, err := FindDataSourceByTwoPartKey(ctx, conn, apiID, name) + dataSource, err := findDataSourceByTwoPartKey(ctx, conn, apiID, name) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] AppSync Datasource %q not found, removing from state", d.Id()) @@ -394,11 +396,10 @@ func resourceDataSourceRead(ctx context.Context, d *schema.ResourceData, meta in func resourceDataSourceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) region := meta.(*conns.AWSClient).Region - apiID, name, err := DecodeID(d.Id()) - + apiID, name, err := dataSourceParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -406,7 +407,7 @@ func resourceDataSourceUpdate(ctx context.Context, d *schema.ResourceData, meta input := &appsync.UpdateDataSourceInput{ ApiId: aws.String(apiID), Name: aws.String(name), - Type: aws.String(d.Get(names.AttrType).(string)), + Type: awstypes.DataSourceType(d.Get(names.AttrType).(string)), } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -441,7 +442,7 @@ func resourceDataSourceUpdate(ctx context.Context, d *schema.ResourceData, meta input.ServiceRoleArn = aws.String(v.(string)) } - _, err = conn.UpdateDataSourceWithContext(ctx, input) + _, err = conn.UpdateDataSource(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Appsync Data Source (%s): %s", d.Id(), err) @@ -452,22 +453,20 @@ func resourceDataSourceUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceDataSourceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) - - apiID, name, err := DecodeID(d.Id()) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) + apiID, name, err := dataSourceParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - input := &appsync.DeleteDataSourceInput{ + log.Printf("[INFO] Deleting Appsync Data Source: %s", d.Id()) + _, err = conn.DeleteDataSource(ctx, &appsync.DeleteDataSourceInput{ ApiId: aws.String(apiID), Name: aws.String(name), - } - - _, err = conn.DeleteDataSourceWithContext(ctx, input) + }) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } @@ -478,15 +477,34 @@ func resourceDataSourceDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func FindDataSourceByTwoPartKey(ctx context.Context, conn *appsync.AppSync, apiID, name string) (*appsync.DataSource, error) { +const dataSourceResourceIDSeparator = "-" + +func dataSourceCreateResourceID(apiID, name string) string { + parts := []string{apiID, name} + id := strings.Join(parts, dataSourceResourceIDSeparator) + + return id +} + +func dataSourceParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, dataSourceResourceIDSeparator, 2) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected API-ID%[2]sDATA-SOURCE-NAME", id, dataSourceResourceIDSeparator) + } + + return parts[0], parts[1], nil +} + +func findDataSourceByTwoPartKey(ctx context.Context, conn *appsync.Client, apiID, name string) (*awstypes.DataSource, error) { input := &appsync.GetDataSourceInput{ ApiId: aws.String(apiID), Name: aws.String(name), } - output, err := conn.GetDataSourceWithContext(ctx, input) + output, err := conn.GetDataSource(ctx, input) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -504,421 +522,391 @@ func FindDataSourceByTwoPartKey(ctx context.Context, conn *appsync.AppSync, apiI return output.DataSource, nil } -func DecodeID(id string) (string, string, error) { - idParts := strings.SplitN(id, "-", 2) - if len(idParts) != 2 { - return "", "", fmt.Errorf("expected ID in format ApiID-DataSourceName, received: %s", id) - } - return idParts[0], idParts[1], nil -} - -func expandDynamoDBDataSourceConfig(l []interface{}, currentRegion string) *appsync.DynamodbDataSourceConfig { - if len(l) == 0 || l[0] == nil { +func expandDynamoDBDataSourceConfig(tfList []interface{}, currentRegion string) *awstypes.DynamodbDataSourceConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.DynamodbDataSourceConfig{ + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.DynamodbDataSourceConfig{ AwsRegion: aws.String(currentRegion), - TableName: aws.String(configured[names.AttrTableName].(string)), + TableName: aws.String(tfMap[names.AttrTableName].(string)), } - if v, ok := configured[names.AttrRegion]; ok && v.(string) != "" { - result.AwsRegion = aws.String(v.(string)) + if v, ok := tfMap["delta_sync_config"].([]interface{}); ok && len(v) > 0 { + apiObject.DeltaSyncConfig = expandDeltaSyncConfig(v) } - if v, ok := configured["use_caller_credentials"]; ok { - result.UseCallerCredentials = aws.Bool(v.(bool)) + if v, ok := tfMap[names.AttrRegion]; ok && v.(string) != "" { + apiObject.AwsRegion = aws.String(v.(string)) } - if v, ok := configured["versioned"]; ok { - result.Versioned = aws.Bool(v.(bool)) + if v, ok := tfMap["use_caller_credentials"]; ok { + apiObject.UseCallerCredentials = v.(bool) } - if v, ok := configured["delta_sync_config"].([]interface{}); ok && len(v) > 0 { - result.DeltaSyncConfig = expandDynamoDBDataSourceDeltaSyncConfig(v) + if v, ok := tfMap["versioned"]; ok { + apiObject.Versioned = v.(bool) } - return result + return apiObject } -func expandDynamoDBDataSourceDeltaSyncConfig(l []interface{}) *appsync.DeltaSyncConfig { - if len(l) == 0 || l[0] == nil { +func expandDeltaSyncConfig(tfList []interface{}) *awstypes.DeltaSyncConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.DeltaSyncConfig{} - result := &appsync.DeltaSyncConfig{} - - if v, ok := configured["base_table_ttl"].(int); ok { - result.BaseTableTTL = aws.Int64(int64(v)) + if v, ok := tfMap["base_table_ttl"].(int); ok { + apiObject.BaseTableTTL = int64(v) } - if v, ok := configured["delta_sync_table_ttl"].(int); ok { - result.DeltaSyncTableTTL = aws.Int64(int64(v)) + if v, ok := tfMap["delta_sync_table_ttl"].(int); ok { + apiObject.DeltaSyncTableTTL = int64(v) } - if v, ok := configured["delta_sync_table_name"].(string); ok { - result.DeltaSyncTableName = aws.String(v) + if v, ok := tfMap["delta_sync_table_name"].(string); ok { + apiObject.DeltaSyncTableName = aws.String(v) } - return result + return apiObject } -func flattenDynamoDBDataSourceConfig(config *appsync.DynamodbDataSourceConfig) []map[string]interface{} { - if config == nil { +func flattenDynamoDBDataSourceConfig(apiObject *awstypes.DynamodbDataSourceConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - names.AttrRegion: aws.StringValue(config.AwsRegion), - names.AttrTableName: aws.StringValue(config.TableName), - } - - if config.UseCallerCredentials != nil { - result["use_caller_credentials"] = aws.BoolValue(config.UseCallerCredentials) - } - - if config.Versioned != nil { - result["versioned"] = aws.BoolValue(config.Versioned) + tfMap := map[string]interface{}{ + names.AttrRegion: aws.ToString(apiObject.AwsRegion), + names.AttrTableName: aws.ToString(apiObject.TableName), + "use_caller_credentials": apiObject.UseCallerCredentials, + "versioned": apiObject.Versioned, } - if config.DeltaSyncConfig != nil { - result["delta_sync_config"] = flattenDynamoDBDataSourceDeltaSyncConfig(config.DeltaSyncConfig) + if apiObject.DeltaSyncConfig != nil { + tfMap["delta_sync_config"] = flattenDeltaSyncConfig(apiObject.DeltaSyncConfig) } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func flattenDynamoDBDataSourceDeltaSyncConfig(config *appsync.DeltaSyncConfig) []map[string]interface{} { - if config == nil { +func flattenDeltaSyncConfig(apiObject *awstypes.DeltaSyncConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{} - - if config.DeltaSyncTableName != nil { - result["delta_sync_table_name"] = aws.StringValue(config.DeltaSyncTableName) - } - - if config.BaseTableTTL != nil { - result["base_table_ttl"] = aws.Int64Value(config.BaseTableTTL) + tfMap := map[string]interface{}{ + "base_table_ttl": apiObject.BaseTableTTL, + "delta_sync_table_ttl": apiObject.DeltaSyncTableTTL, } - if config.DeltaSyncTableTTL != nil { - result["delta_sync_table_ttl"] = aws.Int64Value(config.DeltaSyncTableTTL) + if apiObject.DeltaSyncTableName != nil { + tfMap["delta_sync_table_name"] = aws.ToString(apiObject.DeltaSyncTableName) } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func expandElasticsearchDataSourceConfig(l []interface{}, currentRegion string) *appsync.ElasticsearchDataSourceConfig { - if len(l) == 0 || l[0] == nil { +func expandElasticsearchDataSourceConfig(tfList []interface{}, currentRegion string) *awstypes.ElasticsearchDataSourceConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.ElasticsearchDataSourceConfig{ + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.ElasticsearchDataSourceConfig{ AwsRegion: aws.String(currentRegion), - Endpoint: aws.String(configured[names.AttrEndpoint].(string)), + Endpoint: aws.String(tfMap[names.AttrEndpoint].(string)), } - if v, ok := configured[names.AttrRegion]; ok && v.(string) != "" { - result.AwsRegion = aws.String(v.(string)) + if v, ok := tfMap[names.AttrRegion]; ok && v.(string) != "" { + apiObject.AwsRegion = aws.String(v.(string)) } - return result + return apiObject } -func expandOpenSearchServiceDataSourceConfig(l []interface{}, currentRegion string) *appsync.OpenSearchServiceDataSourceConfig { - if len(l) == 0 || l[0] == nil { +func expandOpenSearchServiceDataSourceConfig(tfList []interface{}, currentRegion string) *awstypes.OpenSearchServiceDataSourceConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.OpenSearchServiceDataSourceConfig{ + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.OpenSearchServiceDataSourceConfig{ AwsRegion: aws.String(currentRegion), - Endpoint: aws.String(configured[names.AttrEndpoint].(string)), + Endpoint: aws.String(tfMap[names.AttrEndpoint].(string)), } - if v, ok := configured[names.AttrRegion]; ok && v.(string) != "" { - result.AwsRegion = aws.String(v.(string)) + if v, ok := tfMap[names.AttrRegion]; ok && v.(string) != "" { + apiObject.AwsRegion = aws.String(v.(string)) } - return result + return apiObject } -func flattenElasticsearchDataSourceConfig(config *appsync.ElasticsearchDataSourceConfig) []map[string]interface{} { - if config == nil { +func flattenElasticsearchDataSourceConfig(apiObject *awstypes.ElasticsearchDataSourceConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - names.AttrEndpoint: aws.StringValue(config.Endpoint), - names.AttrRegion: aws.StringValue(config.AwsRegion), + tfMap := map[string]interface{}{ + names.AttrEndpoint: aws.ToString(apiObject.Endpoint), + names.AttrRegion: aws.ToString(apiObject.AwsRegion), } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func flattenOpenSearchServiceDataSourceConfig(config *appsync.OpenSearchServiceDataSourceConfig) []map[string]interface{} { - if config == nil { +func flattenOpenSearchServiceDataSourceConfig(apiObject *awstypes.OpenSearchServiceDataSourceConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - names.AttrEndpoint: aws.StringValue(config.Endpoint), - names.AttrRegion: aws.StringValue(config.AwsRegion), + tfMap := map[string]interface{}{ + names.AttrEndpoint: aws.ToString(apiObject.Endpoint), + names.AttrRegion: aws.ToString(apiObject.AwsRegion), } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func expandHTTPDataSourceConfig(l []interface{}) *appsync.HttpDataSourceConfig { - if len(l) == 0 || l[0] == nil { +func expandHTTPDataSourceConfig(tfList []interface{}) *awstypes.HttpDataSourceConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.HttpDataSourceConfig{ - Endpoint: aws.String(configured[names.AttrEndpoint].(string)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.HttpDataSourceConfig{ + Endpoint: aws.String(tfMap[names.AttrEndpoint].(string)), } - if v, ok := configured["authorization_config"].([]interface{}); ok && len(v) > 0 { - result.AuthorizationConfig = expandHTTPDataSourceAuthorizationConfig(v) + if v, ok := tfMap["authorization_config"].([]interface{}); ok && len(v) > 0 { + apiObject.AuthorizationConfig = expandAuthorizationConfig(v) } - return result + return apiObject } -func flattenHTTPDataSourceConfig(config *appsync.HttpDataSourceConfig) []map[string]interface{} { - if config == nil { +func flattenHTTPDataSourceConfig(apiObject *awstypes.HttpDataSourceConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - names.AttrEndpoint: aws.StringValue(config.Endpoint), + tfMap := map[string]interface{}{ + names.AttrEndpoint: aws.ToString(apiObject.Endpoint), } - if config.AuthorizationConfig != nil { - result["authorization_config"] = flattenHTTPDataSourceAuthorizationConfig(config.AuthorizationConfig) + if apiObject.AuthorizationConfig != nil { + tfMap["authorization_config"] = flattenAuthorizationConfig(apiObject.AuthorizationConfig) } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func expandHTTPDataSourceAuthorizationConfig(l []interface{}) *appsync.AuthorizationConfig { - if len(l) == 0 || l[0] == nil { +func expandAuthorizationConfig(tfList []interface{}) *awstypes.AuthorizationConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.AuthorizationConfig{ - AuthorizationType: aws.String(configured["authorization_type"].(string)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.AuthorizationConfig{ + AuthorizationType: awstypes.AuthorizationType(tfMap["authorization_type"].(string)), } - if v, ok := configured["aws_iam_config"].([]interface{}); ok && len(v) > 0 { - result.AwsIamConfig = expandHTTPDataSourceIAMConfig(v) + if v, ok := tfMap["aws_iam_config"].([]interface{}); ok && len(v) > 0 { + apiObject.AwsIamConfig = expandIAMConfig(v) } - return result + return apiObject } -func flattenHTTPDataSourceAuthorizationConfig(config *appsync.AuthorizationConfig) []map[string]interface{} { - if config == nil { +func flattenAuthorizationConfig(apiObject *awstypes.AuthorizationConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - "authorization_type": aws.StringValue(config.AuthorizationType), + tfMap := map[string]interface{}{ + "authorization_type": apiObject.AuthorizationType, } - if config.AwsIamConfig != nil { - result["aws_iam_config"] = flattenHTTPDataSourceIAMConfig(config.AwsIamConfig) + if apiObject.AwsIamConfig != nil { + tfMap["aws_iam_config"] = flattenIAMConfig(apiObject.AwsIamConfig) } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func expandHTTPDataSourceIAMConfig(l []interface{}) *appsync.AwsIamConfig { - if len(l) == 0 || l[0] == nil { +func expandIAMConfig(tfList []interface{}) *awstypes.AwsIamConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.AwsIamConfig{} - result := &appsync.AwsIamConfig{} - - if v, ok := configured["signing_region"].(string); ok && v != "" { - result.SigningRegion = aws.String(v) + if v, ok := tfMap["signing_region"].(string); ok && v != "" { + apiObject.SigningRegion = aws.String(v) } - if v, ok := configured["signing_service_name"].(string); ok && v != "" { - result.SigningServiceName = aws.String(v) + if v, ok := tfMap["signing_service_name"].(string); ok && v != "" { + apiObject.SigningServiceName = aws.String(v) } - return result + return apiObject } -func flattenHTTPDataSourceIAMConfig(config *appsync.AwsIamConfig) []map[string]interface{} { - if config == nil { +func flattenIAMConfig(apiObject *awstypes.AwsIamConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - "signing_region": aws.StringValue(config.SigningRegion), - "signing_service_name": aws.StringValue(config.SigningServiceName), + tfMap := map[string]interface{}{ + "signing_region": aws.ToString(apiObject.SigningRegion), + "signing_service_name": aws.ToString(apiObject.SigningServiceName), } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func expandLambdaDataSourceConfig(l []interface{}) *appsync.LambdaDataSourceConfig { - if len(l) == 0 || l[0] == nil { +func expandLambdaDataSourceConfig(tfList []interface{}) *awstypes.LambdaDataSourceConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.LambdaDataSourceConfig{ - LambdaFunctionArn: aws.String(configured[names.AttrFunctionARN].(string)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.LambdaDataSourceConfig{ + LambdaFunctionArn: aws.String(tfMap[names.AttrFunctionARN].(string)), } - return result + return apiObject } -func flattenLambdaDataSourceConfig(config *appsync.LambdaDataSourceConfig) []map[string]interface{} { - if config == nil { +func flattenLambdaDataSourceConfig(apiObject *awstypes.LambdaDataSourceConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - names.AttrFunctionARN: aws.StringValue(config.LambdaFunctionArn), + tfMap := map[string]interface{}{ + names.AttrFunctionARN: aws.ToString(apiObject.LambdaFunctionArn), } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func expandRelationalDatabaseDataSourceConfig(l []interface{}, currentRegion string) *appsync.RelationalDatabaseDataSourceConfig { - if len(l) == 0 || l[0] == nil { +func expandRelationalDatabaseDataSourceConfig(tfList []interface{}, currentRegion string) *awstypes.RelationalDatabaseDataSourceConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.RelationalDatabaseDataSourceConfig{ - RelationalDatabaseSourceType: aws.String(configured[names.AttrSourceType].(string)), - RdsHttpEndpointConfig: testAccDataSourceConfig_expandRDSHTTPEndpoint(configured["http_endpoint_config"].([]interface{}), currentRegion), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.RelationalDatabaseDataSourceConfig{ + RelationalDatabaseSourceType: awstypes.RelationalDatabaseSourceType(tfMap[names.AttrSourceType].(string)), + RdsHttpEndpointConfig: expandRDSHTTPEndpointConfig(tfMap["http_endpoint_config"].([]interface{}), currentRegion), } - return result + return apiObject } -func flattenRelationalDatabaseDataSourceConfig(config *appsync.RelationalDatabaseDataSourceConfig) []map[string]interface{} { - if config == nil { +func flattenRelationalDatabaseDataSourceConfig(apiObject *awstypes.RelationalDatabaseDataSourceConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - names.AttrSourceType: aws.StringValue(config.RelationalDatabaseSourceType), - "http_endpoint_config": flattenRDSHTTPEndpointConfig(config.RdsHttpEndpointConfig), + tfMap := map[string]interface{}{ + names.AttrSourceType: apiObject.RelationalDatabaseSourceType, + "http_endpoint_config": flattenRDSHTTPEndpointConfig(apiObject.RdsHttpEndpointConfig), } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func expandEventBridgeDataSourceConfig(l []interface{}) *appsync.EventBridgeDataSourceConfig { - if len(l) == 0 || l[0] == nil { +func expandEventBridgeDataSourceConfig(tfList []interface{}) *awstypes.EventBridgeDataSourceConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.EventBridgeDataSourceConfig{ - EventBusArn: aws.String(configured["event_bus_arn"].(string)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.EventBridgeDataSourceConfig{ + EventBusArn: aws.String(tfMap["event_bus_arn"].(string)), } - return result + return apiObject } -func flattenEventBridgeDataSourceConfig(config *appsync.EventBridgeDataSourceConfig) []map[string]interface{} { - if config == nil { +func flattenEventBridgeDataSourceConfig(apiObject *awstypes.EventBridgeDataSourceConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - "event_bus_arn": aws.StringValue(config.EventBusArn), + tfMap := map[string]interface{}{ + "event_bus_arn": aws.ToString(apiObject.EventBusArn), } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func testAccDataSourceConfig_expandRDSHTTPEndpoint(l []interface{}, currentRegion string) *appsync.RdsHttpEndpointConfig { - if len(l) == 0 || l[0] == nil { +func expandRDSHTTPEndpointConfig(tfList []interface{}, currentRegion string) *awstypes.RdsHttpEndpointConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.RdsHttpEndpointConfig{ + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.RdsHttpEndpointConfig{ AwsRegion: aws.String(currentRegion), } - if v, ok := configured[names.AttrRegion]; ok && v.(string) != "" { - result.AwsRegion = aws.String(v.(string)) + if v, ok := tfMap[names.AttrRegion]; ok && v.(string) != "" { + apiObject.AwsRegion = aws.String(v.(string)) } - if v, ok := configured["aws_secret_store_arn"]; ok && v.(string) != "" { - result.AwsSecretStoreArn = aws.String(v.(string)) + if v, ok := tfMap["aws_secret_store_arn"]; ok && v.(string) != "" { + apiObject.AwsSecretStoreArn = aws.String(v.(string)) } - if v, ok := configured[names.AttrDatabaseName]; ok && v.(string) != "" { - result.DatabaseName = aws.String(v.(string)) + if v, ok := tfMap[names.AttrDatabaseName]; ok && v.(string) != "" { + apiObject.DatabaseName = aws.String(v.(string)) } - if v, ok := configured["db_cluster_identifier"]; ok && v.(string) != "" { - result.DbClusterIdentifier = aws.String(v.(string)) + if v, ok := tfMap["db_cluster_identifier"]; ok && v.(string) != "" { + apiObject.DbClusterIdentifier = aws.String(v.(string)) } - if v, ok := configured[names.AttrSchema]; ok && v.(string) != "" { - result.Schema = aws.String(v.(string)) + if v, ok := tfMap[names.AttrSchema]; ok && v.(string) != "" { + apiObject.Schema = aws.String(v.(string)) } - return result + return apiObject } -func flattenRDSHTTPEndpointConfig(config *appsync.RdsHttpEndpointConfig) []map[string]interface{} { - if config == nil { +func flattenRDSHTTPEndpointConfig(apiObject *awstypes.RdsHttpEndpointConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{} + tfMap := map[string]interface{}{} - if config.AwsRegion != nil { - result[names.AttrRegion] = aws.StringValue(config.AwsRegion) + if apiObject.AwsRegion != nil { + tfMap[names.AttrRegion] = aws.ToString(apiObject.AwsRegion) } - if config.AwsSecretStoreArn != nil { - result["aws_secret_store_arn"] = aws.StringValue(config.AwsSecretStoreArn) + if apiObject.AwsSecretStoreArn != nil { + tfMap["aws_secret_store_arn"] = aws.ToString(apiObject.AwsSecretStoreArn) } - if config.DatabaseName != nil { - result[names.AttrDatabaseName] = aws.StringValue(config.DatabaseName) + if apiObject.DatabaseName != nil { + tfMap[names.AttrDatabaseName] = aws.ToString(apiObject.DatabaseName) } - if config.DbClusterIdentifier != nil { - result["db_cluster_identifier"] = aws.StringValue(config.DbClusterIdentifier) + if apiObject.DbClusterIdentifier != nil { + tfMap["db_cluster_identifier"] = aws.ToString(apiObject.DbClusterIdentifier) } - if config.Schema != nil { - result[names.AttrSchema] = aws.StringValue(config.Schema) + if apiObject.Schema != nil { + tfMap[names.AttrSchema] = aws.ToString(apiObject.Schema) } - return []map[string]interface{}{result} + return []interface{}{tfMap} } diff --git a/internal/service/appsync/datasource_test.go b/internal/service/appsync/datasource_test.go index 5f7e82abcda..e56e181944c 100644 --- a/internal/service/appsync/datasource_test.go +++ b/internal/service/appsync/datasource_test.go @@ -9,7 +9,6 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/appsync" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -27,7 +26,7 @@ func testAccDataSource_basic(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -64,7 +63,7 @@ func testAccDataSource_description(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -98,7 +97,7 @@ func testAccDataSource_DynamoDB_region(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -134,7 +133,7 @@ func testAccDataSource_DynamoDB_useCallerCredentials(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -171,7 +170,7 @@ func TestAccAppSyncDataSource_Elasticsearch_region(t *testing.T) { // Keep this test Parallel as it takes considerably longer to run than any non-Elasticsearch tests. resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -208,7 +207,7 @@ func TestAccAppSyncDataSource_OpenSearchService_region(t *testing.T) { // Keep this test Parallel as it takes considerably longer to run than any non-OpenSearchService tests. resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -244,7 +243,7 @@ func testAccDataSource_HTTP_endpoint(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -282,7 +281,7 @@ func testAccDataSource_type(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -313,7 +312,7 @@ func testAccDataSource_Type_dynamoDB(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -346,7 +345,7 @@ func TestAccAppSyncDataSource_Type_elasticSearch(t *testing.T) { // Keep this test Parallel as it takes considerably longer to run than any non-Elasticsearch tests. resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -379,7 +378,7 @@ func TestAccAppSyncDataSource_Type_openSearchService(t *testing.T) { // Keep this test Parallel as it takes considerably longer to run than any non-OpenSearchService tests. resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -410,7 +409,7 @@ func testAccDataSource_Type_http(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -439,7 +438,7 @@ func testAccDataSource_Type_httpAuth(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -473,7 +472,7 @@ func testAccDataSource_Type_relationalDatabase(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -502,7 +501,7 @@ func testAccDataSource_Type_relationalDatabaseWithOptions(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -535,7 +534,7 @@ func testAccDataSource_Type_lambda(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -567,7 +566,7 @@ func testAccDataSource_Type_eventBridge(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -597,7 +596,7 @@ func testAccDataSource_Type_none(t *testing.T) { resourceName := "aws_appsync_datasource.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSourceDestroy(ctx), @@ -620,19 +619,14 @@ func testAccDataSource_Type_none(t *testing.T) { func testAccCheckDataSourceDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_datasource" { continue } - apiID, name, err := tfappsync.DecodeID(rs.Primary.ID) - - if err != nil { - return err - } - - _, err = tfappsync.FindDataSourceByTwoPartKey(ctx, conn, apiID, name) + _, err := tfappsync.FindDataSourceByTwoPartKey(ctx, conn, rs.Primary.Attributes["api_id"], rs.Primary.Attributes[names.AttrName]) if tfresource.NotFound(err) { continue @@ -649,26 +643,16 @@ func testAccCheckDataSourceDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckExistsDataSource(ctx context.Context, name string) resource.TestCheckFunc { +func testAccCheckExistsDataSource(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No Appsync Data Source ID found: %s", name) - } - - apiID, name, err := tfappsync.DecodeID(rs.Primary.ID) - - if err != nil { - return err + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) - _, err = tfappsync.FindDataSourceByTwoPartKey(ctx, conn, apiID, name) + _, err := tfappsync.FindDataSourceByTwoPartKey(ctx, conn, rs.Primary.Attributes["api_id"], rs.Primary.Attributes[names.AttrName]) return err } diff --git a/internal/service/appsync/domain_name.go b/internal/service/appsync/domain_name.go index ea231219ed9..137f572ef18 100644 --- a/internal/service/appsync/domain_name.go +++ b/internal/service/appsync/domain_name.go @@ -5,30 +5,31 @@ package appsync import ( "context" - "fmt" "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_appsync_domain_name") -func ResourceDomainName() *schema.Resource { +// @SDKResource("aws_appsync_domain_name", name="Domain Name") +func resourceDomainName() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDomainNameCreate, ReadWithoutTimeout: resourceDomainNameRead, UpdateWithoutTimeout: resourceDomainNameUpdate, DeleteWithoutTimeout: resourceDomainNameDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -63,63 +64,67 @@ func ResourceDomainName() *schema.Resource { func resourceDomainNameCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - params := &appsync.CreateDomainNameInput{ + domainName := d.Get(names.AttrDomainName).(string) + input := &appsync.CreateDomainNameInput{ CertificateArn: aws.String(d.Get(names.AttrCertificateARN).(string)), Description: aws.String(d.Get(names.AttrDescription).(string)), - DomainName: aws.String(d.Get(names.AttrDomainName).(string)), + DomainName: aws.String(domainName), } - resp, err := conn.CreateDomainNameWithContext(ctx, params) + output, err := conn.CreateDomainName(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Appsync Domain Name: %s", err) + return sdkdiag.AppendErrorf(diags, "creating Appsync Domain Name (%s): %s", domainName, err) } - d.SetId(aws.StringValue(resp.DomainNameConfig.DomainName)) + d.SetId(aws.ToString(output.DomainNameConfig.DomainName)) return append(diags, resourceDomainNameRead(ctx, d, meta)...) } func resourceDomainNameRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - domainName, err := FindDomainNameByID(ctx, conn, d.Id()) - if domainName == nil && !d.IsNewResource() { + domainName, err := findDomainNameByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] AppSync Domain Name (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "getting Appsync Domain Name %q: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Appsync Domain Name (%s): %s", d.Id(), err) } - d.Set(names.AttrDomainName, domainName.DomainName) - d.Set(names.AttrDescription, domainName.Description) + d.Set("appsync_domain_name", domainName.AppsyncDomainName) d.Set(names.AttrCertificateARN, domainName.CertificateArn) + d.Set(names.AttrDescription, domainName.Description) + d.Set(names.AttrDomainName, domainName.DomainName) d.Set(names.AttrHostedZoneID, domainName.HostedZoneId) - d.Set("appsync_domain_name", domainName.AppsyncDomainName) return diags } func resourceDomainNameUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - params := &appsync.UpdateDomainNameInput{ + input := &appsync.UpdateDomainNameInput{ DomainName: aws.String(d.Id()), } if d.HasChange(names.AttrDescription) { - params.Description = aws.String(d.Get(names.AttrDescription).(string)) + input.Description = aws.String(d.Get(names.AttrDescription).(string)) } - _, err := conn.UpdateDomainNameWithContext(ctx, params) + _, err := conn.UpdateDomainName(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Appsync Domain Name %q: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating Appsync Domain Name (%s): %s", d.Id(), err) } return append(diags, resourceDomainNameRead(ctx, d, meta)...) @@ -127,29 +132,46 @@ func resourceDomainNameUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceDomainNameDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) + + log.Printf("[INFO] Deleting Appsync Domain Name: %s", d.Id()) + const ( + timeout = 5 * time.Minute + ) + _, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, timeout, func() (interface{}, error) { + return conn.DeleteDomainName(ctx, &appsync.DeleteDomainNameInput{ + DomainName: aws.String(d.Id()), + }) + }) - input := &appsync.DeleteDomainNameInput{ - DomainName: aws.String(d.Id()), + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting Appsync Domain Name (%s): %s", d.Id(), err) } - err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { - _, err := conn.DeleteDomainNameWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeConcurrentModificationException) { - return retry.RetryableError(fmt.Errorf("deleting Appsync Domain Name %q: %w", d.Id(), err)) - } - if err != nil { - return retry.NonRetryableError(err) - } + return diags +} - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.DeleteDomainNameWithContext(ctx, input) +func findDomainNameByID(ctx context.Context, conn *appsync.Client, id string) (*awstypes.DomainNameConfig, error) { + input := &appsync.GetDomainNameInput{ + DomainName: aws.String(id), } + + output, err := conn.GetDomainName(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Appsync Domain Name %q: %s", d.Id(), err) + return nil, err } - return diags + if output == nil || output.DomainNameConfig == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.DomainNameConfig, nil } diff --git a/internal/service/appsync/domain_name_api_association.go b/internal/service/appsync/domain_name_api_association.go index dda172514f4..32e0d3dc016 100644 --- a/internal/service/appsync/domain_name_api_association.go +++ b/internal/service/appsync/domain_name_api_association.go @@ -5,25 +5,32 @@ package appsync import ( "context" + "errors" "log" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_appsync_domain_name_api_association") -func ResourceDomainNameAPIAssociation() *schema.Resource { +// @SDKResource("aws_appsync_domain_name_api_association", name="Domain Name API Association") +func resourceDomainNameAPIAssociation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDomainNameAPIAssociationCreate, ReadWithoutTimeout: resourceDomainNameAPIAssociationRead, UpdateWithoutTimeout: resourceDomainNameAPIAssociationUpdate, DeleteWithoutTimeout: resourceDomainNameAPIAssociationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -44,22 +51,24 @@ func ResourceDomainNameAPIAssociation() *schema.Resource { func resourceDomainNameAPIAssociationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - params := &appsync.AssociateApiInput{ + domainName := d.Get(names.AttrDomainName).(string) + input := &appsync.AssociateApiInput{ ApiId: aws.String(d.Get("api_id").(string)), - DomainName: aws.String(d.Get(names.AttrDomainName).(string)), + DomainName: aws.String(domainName), } - resp, err := conn.AssociateApiWithContext(ctx, params) + output, err := conn.AssociateApi(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Appsync Domain Name API Association: %s", err) + return sdkdiag.AppendErrorf(diags, "creating Appsync Domain Name API Association (%s): %s", domainName, err) } - d.SetId(aws.StringValue(resp.ApiAssociation.DomainName)) + d.SetId(aws.ToString(output.ApiAssociation.DomainName)) - if err := waitDomainNameAPIAssociation(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Appsync Domain Name API (%s) Association: %s", d.Id(), err) + if _, err := waitDomainNameAPIAssociation(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Appsync Domain Name API Association (%s) create: %s", d.Id(), err) } return append(diags, resourceDomainNameAPIAssociationRead(ctx, d, meta)...) @@ -67,41 +76,43 @@ func resourceDomainNameAPIAssociationCreate(ctx context.Context, d *schema.Resou func resourceDomainNameAPIAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) + + association, err := findDomainNameAPIAssociationByID(ctx, conn, d.Id()) - association, err := FindDomainNameAPIAssociationByID(ctx, conn, d.Id()) - if association == nil && !d.IsNewResource() { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Appsync Domain Name API Association (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "getting Appsync Domain Name API Association %q: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Appsync Domain Name API Association (%s): %s", d.Id(), err) } - d.Set(names.AttrDomainName, association.DomainName) d.Set("api_id", association.ApiId) + d.Set(names.AttrDomainName, association.DomainName) return diags } func resourceDomainNameAPIAssociationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - params := &appsync.AssociateApiInput{ + input := &appsync.AssociateApiInput{ ApiId: aws.String(d.Get("api_id").(string)), - DomainName: aws.String(d.Get(names.AttrDomainName).(string)), + DomainName: aws.String(d.Id()), } - _, err := conn.AssociateApiWithContext(ctx, params) + _, err := conn.AssociateApi(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Appsync Domain Name API Association: %s", err) + return sdkdiag.AppendErrorf(diags, "updating Appsync Domain Name API Association (%s): %s", d.Id(), err) } - if err := waitDomainNameAPIAssociation(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Appsync Domain Name API (%s) Association: %s", d.Id(), err) + if _, err := waitDomainNameAPIAssociation(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Appsync Domain Name API Association (%s) update: %s", d.Id(), err) } return append(diags, resourceDomainNameAPIAssociationRead(ctx, d, meta)...) @@ -109,22 +120,107 @@ func resourceDomainNameAPIAssociationUpdate(ctx context.Context, d *schema.Resou func resourceDomainNameAPIAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - input := &appsync.DisassociateApiInput{ + log.Printf("[INFO] Deleting Appsync Domain Name API Association: %s", d.Id()) + _, err := conn.DisassociateApi(ctx, &appsync.DisassociateApiInput{ DomainName: aws.String(d.Id()), + }) + + if errs.IsA[*awstypes.NotFoundException](err) { + return diags } - _, err := conn.DisassociateApiWithContext(ctx, input) + if err != nil { - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return diags - } - return sdkdiag.AppendErrorf(diags, "deleting Appsync Domain Name API Association: %s", err) + return sdkdiag.AppendErrorf(diags, "deleting Appsync Domain Name API Association (%s): %s", d.Id(), err) } - if err := waitDomainNameAPIDisassociation(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Appsync Domain Name API (%s) Disassociation: %s", d.Id(), err) + if _, err := waitDomainNameAPIDisassociation(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Appsync Domain Name API Association (%s) delete: %s", d.Id(), err) } return diags } + +func findDomainNameAPIAssociationByID(ctx context.Context, conn *appsync.Client, id string) (*awstypes.ApiAssociation, error) { + input := &appsync.GetApiAssociationInput{ + DomainName: aws.String(id), + } + + output, err := conn.GetApiAssociation(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.ApiAssociation == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ApiAssociation, nil +} + +func statusDomainNameAPIAssociation(ctx context.Context, conn *appsync.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDomainNameAPIAssociationByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.AssociationStatus), nil + } +} + +func waitDomainNameAPIAssociation(ctx context.Context, conn *appsync.Client, id string) (*awstypes.ApiAssociation, error) { //nolint:unparam + const ( + domainNameAPIAssociationTimeout = 60 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.AssociationStatusProcessing), + Target: enum.Slice(awstypes.AssociationStatusSuccess), + Refresh: statusDomainNameAPIAssociation(ctx, conn, id), + Timeout: domainNameAPIAssociationTimeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ApiAssociation); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.DeploymentDetail))) + return output, err + } + + return nil, err +} + +func waitDomainNameAPIDisassociation(ctx context.Context, conn *appsync.Client, id string) (*awstypes.ApiAssociation, error) { + const ( + timeout = 60 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.AssociationStatusProcessing), + Target: []string{}, + Refresh: statusDomainNameAPIAssociation(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ApiAssociation); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.DeploymentDetail))) + return output, err + } + + return nil, err +} diff --git a/internal/service/appsync/domain_name_api_association_test.go b/internal/service/appsync/domain_name_api_association_test.go index a85e62b4c70..f7a1b6d1c89 100644 --- a/internal/service/appsync/domain_name_api_association_test.go +++ b/internal/service/appsync/domain_name_api_association_test.go @@ -8,30 +8,28 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfappsync "github.com/hashicorp/terraform-provider-aws/internal/service/appsync" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func testAccDomainNameAPIAssociation_basic(t *testing.T) { ctx := acctest.Context(t) - var association appsync.ApiAssociation - appsyncCertDomain := getCertDomain(t) - + var association awstypes.ApiAssociation + appsyncCertDomain := acctest.SkipIfEnvVarNotSet(t, "AWS_APPSYNC_DOMAIN_NAME_CERTIFICATE_DOMAIN") rName := sdkacctest.RandString(8) resourceName := "aws_appsync_domain_name_api_association.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDomainNameAPIAssociationDestroy(ctx), Steps: []resource.TestStep{ { @@ -61,16 +59,15 @@ func testAccDomainNameAPIAssociation_basic(t *testing.T) { func testAccDomainNameAPIAssociation_disappears(t *testing.T) { ctx := acctest.Context(t) - var association appsync.ApiAssociation - appsyncCertDomain := getCertDomain(t) - + var association awstypes.ApiAssociation + appsyncCertDomain := acctest.SkipIfEnvVarNotSet(t, "AWS_APPSYNC_DOMAIN_NAME_CERTIFICATE_DOMAIN") rName := sdkacctest.RandString(8) resourceName := "aws_appsync_domain_name_api_association.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDomainNameAPIAssociationDestroy(ctx), Steps: []resource.TestStep{ { @@ -87,57 +84,54 @@ func testAccDomainNameAPIAssociation_disappears(t *testing.T) { func testAccCheckDomainNameAPIAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_domain_name" { continue } - association, err := tfappsync.FindDomainNameAPIAssociationByID(ctx, conn, rs.Primary.ID) - if err == nil { - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return nil - } - return err + _, err := tfappsync.FindDomainNameAPIAssociationByID(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue } - if association != nil && aws.StringValue(association.DomainName) == rs.Primary.ID { - return fmt.Errorf("Appsync Domain Name ID %q still exists", rs.Primary.ID) + if err != nil { + return err } - return nil + return fmt.Errorf("Appsync Domain Name API Association %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckDomainNameAPIAssociationExists(ctx context.Context, resourceName string, DomainNameAPIAssociation *appsync.ApiAssociation) resource.TestCheckFunc { +func testAccCheckDomainNameAPIAssociationExists(ctx context.Context, n string, v *awstypes.ApiAssociation) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Appsync Domain Name Not found in state: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) - association, err := tfappsync.FindDomainNameAPIAssociationByID(ctx, conn, rs.Primary.ID) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + + output, err := tfappsync.FindDomainNameAPIAssociationByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if association == nil || association.DomainName == nil { - return fmt.Errorf("Appsync Domain Name %q not found", rs.Primary.ID) - } - - *DomainNameAPIAssociation = *association + *v = *output return nil } } -func testAccDomainNameAPIAssociationBaseConfig(domain, rName string) string { - return acctest.ConfigAlternateRegionProvider() + fmt.Sprintf(` +func testAccDomainNameAPIAssociationConfig_base(domain, rName string) string { + return fmt.Sprintf(` data "aws_acm_certificate" "test" { - provider = "awsalternate" domain = "*.%[1]s" most_recent = true } @@ -155,16 +149,16 @@ resource "aws_appsync_graphql_api" "test" { } func testAccDomainNameAPIAssociationConfig_basic(domain, rName string) string { - return testAccDomainNameAPIAssociationBaseConfig(domain, rName) + ` + return acctest.ConfigCompose(testAccDomainNameAPIAssociationConfig_base(domain, rName), ` resource "aws_appsync_domain_name_api_association" "test" { api_id = aws_appsync_graphql_api.test.id domain_name = aws_appsync_domain_name.test.domain_name } -` +`) } func testAccDomainNameAPIAssociationConfig_updated(domain, rName string) string { - return testAccDomainNameAPIAssociationBaseConfig(domain, rName) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccDomainNameAPIAssociationConfig_base(domain, rName), ` resource "aws_appsync_graphql_api" "test2" { authentication_type = "API_KEY" name = "%[1]s-2" diff --git a/internal/service/appsync/domain_name_test.go b/internal/service/appsync/domain_name_test.go index 4b28e443be8..36710ed83f7 100644 --- a/internal/service/appsync/domain_name_test.go +++ b/internal/service/appsync/domain_name_test.go @@ -8,31 +8,29 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfappsync "github.com/hashicorp/terraform-provider-aws/internal/service/appsync" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func testAccDomainName_basic(t *testing.T) { ctx := acctest.Context(t) - var domainName appsync.DomainNameConfig - appsyncCertDomain := getCertDomain(t) - + var domainName awstypes.DomainNameConfig + appsyncCertDomain := acctest.SkipIfEnvVarNotSet(t, "AWS_APPSYNC_DOMAIN_NAME_CERTIFICATE_DOMAIN") rName := sdkacctest.RandString(8) acmCertificateResourceName := "data.aws_acm_certificate.test" resourceName := "aws_appsync_domain_name.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDomainNameDestroy(ctx), Steps: []resource.TestStep{ { @@ -54,16 +52,15 @@ func testAccDomainName_basic(t *testing.T) { func testAccDomainName_description(t *testing.T) { ctx := acctest.Context(t) - var domainName appsync.DomainNameConfig - appsyncCertDomain := getCertDomain(t) - + var domainName awstypes.DomainNameConfig + appsyncCertDomain := acctest.SkipIfEnvVarNotSet(t, "AWS_APPSYNC_DOMAIN_NAME_CERTIFICATE_DOMAIN") rName := sdkacctest.RandString(8) resourceName := "aws_appsync_domain_name.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDomainNameDestroy(ctx), Steps: []resource.TestStep{ { @@ -91,16 +88,15 @@ func testAccDomainName_description(t *testing.T) { func testAccDomainName_disappears(t *testing.T) { ctx := acctest.Context(t) - var domainName appsync.DomainNameConfig - appsyncCertDomain := getCertDomain(t) - + var domainName awstypes.DomainNameConfig + appsyncCertDomain := acctest.SkipIfEnvVarNotSet(t, "AWS_APPSYNC_DOMAIN_NAME_CERTIFICATE_DOMAIN") rName := sdkacctest.RandString(8) resourceName := "aws_appsync_domain_name.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDomainNameDestroy(ctx), Steps: []resource.TestStep{ { @@ -117,57 +113,54 @@ func testAccDomainName_disappears(t *testing.T) { func testAccCheckDomainNameDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_domain_name" { continue } - domainName, err := tfappsync.FindDomainNameByID(ctx, conn, rs.Primary.ID) - if err == nil { - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return nil - } - return err + _, err := tfappsync.FindDomainNameByID(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue } - if domainName != nil && aws.StringValue(domainName.DomainName) == rs.Primary.ID { - return fmt.Errorf("Appsync Domain Name ID %q still exists", rs.Primary.ID) + if err != nil { + return err } - return nil + return fmt.Errorf("Appsync Domain Name %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckDomainNameExists(ctx context.Context, resourceName string, domainName *appsync.DomainNameConfig) resource.TestCheckFunc { +func testAccCheckDomainNameExists(ctx context.Context, n string, v *awstypes.DomainNameConfig) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Appsync Domain Name Not found in state: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) - domain, err := tfappsync.FindDomainNameByID(ctx, conn, rs.Primary.ID) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + + output, err := tfappsync.FindDomainNameByID(ctx, conn, rs.Primary.ID) + if err != nil { return err } - if domain == nil || domain.DomainName == nil { - return fmt.Errorf("Appsync Domain Name %q not found", rs.Primary.ID) - } - - *domainName = *domain + *v = *output return nil } } -func testAccDomainNameBaseConfig(domain string) string { - return acctest.ConfigAlternateRegionProvider() + fmt.Sprintf(` +func testAccDomainNameConfig_base(domain string) string { + return fmt.Sprintf(` data "aws_acm_certificate" "test" { - provider = "awsalternate" domain = "*.%[1]s" most_recent = true } @@ -175,20 +168,20 @@ data "aws_acm_certificate" "test" { } func testAccDomainNameConfig_description(rName, domain, desc string) string { - return testAccDomainNameBaseConfig(domain) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccDomainNameConfig_base(domain), fmt.Sprintf(` resource "aws_appsync_domain_name" "test" { domain_name = "%[2]s.%[1]s" certificate_arn = data.aws_acm_certificate.test.arn description = %[3]q } -`, domain, rName, desc) +`, domain, rName, desc)) } func testAccDomainNameConfig_basic(rName, domain string) string { - return testAccDomainNameBaseConfig(domain) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccDomainNameConfig_base(domain), fmt.Sprintf(` resource "aws_appsync_domain_name" "test" { domain_name = "%[2]s.%[1]s" certificate_arn = data.aws_acm_certificate.test.arn } -`, domain, rName) +`, domain, rName)) } diff --git a/internal/service/appsync/exports_test.go b/internal/service/appsync/exports_test.go new file mode 100644 index 00000000000..8a96bc50732 --- /dev/null +++ b/internal/service/appsync/exports_test.go @@ -0,0 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package appsync + +// Exports for use in tests only. +var ( + ResourceAPICache = resourceAPICache + ResourceAPIKey = resourceAPIKey + ResourceDataSource = resourceDataSource + ResourceDomainName = resourceDomainName + ResourceDomainNameAPIAssociation = resourceDomainNameAPIAssociation + ResourceFunction = resourceFunction + ResourceGraphQLAPI = resourceGraphQLAPI + ResourceResolver = resourceResolver + ResourceType = resourceType + + DefaultAuthorizerResultTTLInSeconds = defaultAuthorizerResultTTLInSeconds + FindAPICacheByID = findAPICacheByID + FindAPIKeyByTwoPartKey = findAPIKeyByTwoPartKey + FindDataSourceByTwoPartKey = findDataSourceByTwoPartKey + FindDomainNameAPIAssociationByID = findDomainNameAPIAssociationByID + FindDomainNameByID = findDomainNameByID + FindFunctionByTwoPartKey = findFunctionByTwoPartKey + FindGraphQLAPIByID = findGraphQLAPIByID + FindResolverByThreePartKey = findResolverByThreePartKey + FindTypeByThreePartKey = findTypeByThreePartKey +) diff --git a/internal/service/appsync/find.go b/internal/service/appsync/find.go deleted file mode 100644 index 05906df4040..00000000000 --- a/internal/service/appsync/find.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package appsync - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindAPICacheByID(ctx context.Context, conn *appsync.AppSync, id string) (*appsync.ApiCache, error) { - input := &appsync.GetApiCacheInput{ - ApiId: aws.String(id), - } - out, err := conn.GetApiCacheWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if out == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return out.ApiCache, nil -} - -func FindDomainNameByID(ctx context.Context, conn *appsync.AppSync, id string) (*appsync.DomainNameConfig, error) { - input := &appsync.GetDomainNameInput{ - DomainName: aws.String(id), - } - out, err := conn.GetDomainNameWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if out == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return out.DomainNameConfig, nil -} - -func FindDomainNameAPIAssociationByID(ctx context.Context, conn *appsync.AppSync, id string) (*appsync.ApiAssociation, error) { - input := &appsync.GetApiAssociationInput{ - DomainName: aws.String(id), - } - out, err := conn.GetApiAssociationWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if out == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return out.ApiAssociation, nil -} - -func FindTypeByThreePartKey(ctx context.Context, conn *appsync.AppSync, apiID, format, name string) (*appsync.Type, error) { - input := &appsync.GetTypeInput{ - ApiId: aws.String(apiID), - Format: aws.String(format), - TypeName: aws.String(name), - } - - output, err := conn.GetTypeWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Type == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.Type, nil -} diff --git a/internal/service/appsync/function.go b/internal/service/appsync/function.go index 1007441b65c..4bf0e1a7c9f 100644 --- a/internal/service/appsync/function.go +++ b/internal/service/appsync/function.go @@ -10,20 +10,28 @@ import ( "strings" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_appsync_function") -func ResourceFunction() *schema.Resource { +const ( + functionVersion2018_05_29 = "2018-05-29" +) + +// @SDKResource("aws_appsync_function", name="Function") +func resourceFunction() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceFunctionCreate, ReadWithoutTimeout: resourceFunctionRead, @@ -67,7 +75,7 @@ func ResourceFunction() *schema.Resource { Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{ - "2018-05-29", + functionVersion2018_05_29, }, true), }, "max_batch_size": { @@ -96,9 +104,9 @@ func ResourceFunction() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrName: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.RuntimeName_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RuntimeName](), }, "runtime_version": { Type: schema.TypeString, @@ -114,14 +122,14 @@ func ResourceFunction() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "conflict_detection": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(appsync.ConflictDetectionType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ConflictDetectionType](), }, "conflict_handler": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(appsync.ConflictHandlerType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ConflictHandlerType](), }, "lambda_conflict_handler_config": { Type: schema.TypeList, @@ -146,15 +154,15 @@ func ResourceFunction() *schema.Resource { func resourceFunctionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) apiID := d.Get("api_id").(string) - + name := d.Get(names.AttrName).(string) input := &appsync.CreateFunctionInput{ ApiId: aws.String(apiID), DataSourceName: aws.String(d.Get("data_source").(string)), FunctionVersion: aws.String(d.Get("function_version").(string)), - Name: aws.String(d.Get(names.AttrName).(string)), + Name: aws.String(name), } if v, ok := d.GetOk("code"); ok { @@ -165,92 +173,87 @@ func resourceFunctionCreate(ctx context.Context, d *schema.ResourceData, meta in input.Description = aws.String(v.(string)) } + if v, ok := d.GetOk("max_batch_size"); ok { + input.MaxBatchSize = int32(v.(int)) + } + if v, ok := d.GetOk("request_mapping_template"); ok { input.RequestMappingTemplate = aws.String(v.(string)) - input.FunctionVersion = aws.String("2018-05-29") + input.FunctionVersion = aws.String(functionVersion2018_05_29) } if v, ok := d.GetOk("response_mapping_template"); ok { input.ResponseMappingTemplate = aws.String(v.(string)) } - if v, ok := d.GetOkExists("max_batch_size"); ok { - input.MaxBatchSize = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("runtime"); ok && len(v.([]interface{})) > 0 { + input.Runtime = expandRuntime(v.([]interface{})) } if v, ok := d.GetOk("sync_config"); ok && len(v.([]interface{})) > 0 { input.SyncConfig = expandSyncConfig(v.([]interface{})) } - if v, ok := d.GetOk("runtime"); ok && len(v.([]interface{})) > 0 { - input.Runtime = expandRuntime(v.([]interface{})) - } + output, err := conn.CreateFunction(ctx, input) - resp, err := conn.CreateFunctionWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating AppSync Function: %s", err) } - d.SetId(fmt.Sprintf("%s-%s", apiID, aws.StringValue(resp.FunctionConfiguration.FunctionId))) + d.SetId(functionCreateResourceID(apiID, aws.ToString(output.FunctionConfiguration.FunctionId))) return append(diags, resourceFunctionRead(ctx, d, meta)...) } func resourceFunctionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, functionID, err := DecodeFunctionID(d.Id()) + apiID, functionID, err := functionParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading AppSync Function (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - input := &appsync.GetFunctionInput{ - ApiId: aws.String(apiID), - FunctionId: aws.String(functionID), - } + function, err := findFunctionByTwoPartKey(ctx, conn, apiID, functionID) - resp, err := conn.GetFunctionWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) && !d.IsNewResource() { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] AppSync Function (%s) not found, removing from state", d.Id()) d.SetId("") return diags } + if err != nil { - return sdkdiag.AppendErrorf(diags, "reading AppSync Function (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Appsync Function (%s): %s", d.Id(), err) } - function := resp.FunctionConfiguration d.Set("api_id", apiID) - d.Set("function_id", functionID) + d.Set(names.AttrARN, function.FunctionArn) + d.Set("code", function.Code) d.Set("data_source", function.DataSourceName) d.Set(names.AttrDescription, function.Description) - d.Set(names.AttrARN, function.FunctionArn) + d.Set("function_id", functionID) d.Set("function_version", function.FunctionVersion) + d.Set("max_batch_size", function.MaxBatchSize) d.Set(names.AttrName, function.Name) d.Set("request_mapping_template", function.RequestMappingTemplate) d.Set("response_mapping_template", function.ResponseMappingTemplate) - d.Set("max_batch_size", function.MaxBatchSize) - d.Set("code", function.Code) - - if err := d.Set("sync_config", flattenSyncConfig(function.SyncConfig)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting sync_config: %s", err) - } - if err := d.Set("runtime", flattenRuntime(function.Runtime)); err != nil { return sdkdiag.AppendErrorf(diags, "setting runtime: %s", err) } + if err := d.Set("sync_config", flattenSyncConfig(function.SyncConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting sync_config: %s", err) + } return diags } func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, functionID, err := DecodeFunctionID(d.Id()) + apiID, functionID, err := functionParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating AppSync Function (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } input := &appsync.UpdateFunctionInput{ @@ -261,12 +264,16 @@ func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta in Name: aws.String(d.Get(names.AttrName).(string)), } + if v, ok := d.GetOk("code"); ok { + input.Code = aws.String(v.(string)) + } + if v, ok := d.GetOk(names.AttrDescription); ok { input.Description = aws.String(v.(string)) } - if v, ok := d.GetOk("code"); ok { - input.Code = aws.String(v.(string)) + if v, ok := d.GetOk("max_batch_size"); ok { + input.MaxBatchSize = int32(v.(int)) } if v, ok := d.GetOk("request_mapping_template"); ok { @@ -277,19 +284,16 @@ func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta in input.ResponseMappingTemplate = aws.String(v.(string)) } - if v, ok := d.GetOk("max_batch_size"); ok { - input.MaxBatchSize = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("runtime"); ok && len(v.([]interface{})) > 0 { + input.Runtime = expandRuntime(v.([]interface{})) } if v, ok := d.GetOk("sync_config"); ok && len(v.([]interface{})) > 0 { input.SyncConfig = expandSyncConfig(v.([]interface{})) } - if v, ok := d.GetOk("runtime"); ok && len(v.([]interface{})) > 0 { - input.Runtime = expandRuntime(v.([]interface{})) - } + _, err = conn.UpdateFunction(ctx, input) - _, err = conn.UpdateFunctionWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating AppSync Function (%s): %s", d.Id(), err) } @@ -299,22 +303,23 @@ func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceFunctionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, functionID, err := DecodeFunctionID(d.Id()) + apiID, functionID, err := functionParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting AppSync Function (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - input := &appsync.DeleteFunctionInput{ + log.Printf("[INFO] Deleting Appsync Function: %s", d.Id()) + _, err = conn.DeleteFunction(ctx, &appsync.DeleteFunctionInput{ ApiId: aws.String(apiID), FunctionId: aws.String(functionID), - } + }) - _, err = conn.DeleteFunctionWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting AppSync Function (%s): %s", d.Id(), err) } @@ -322,109 +327,143 @@ func resourceFunctionDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } -func DecodeFunctionID(id string) (string, string, error) { - idParts := strings.SplitN(id, "-", 2) - if len(idParts) != 2 { - return "", "", fmt.Errorf("expected ID in format ApiID-FunctionID, received: %s", id) +const functionResourceIDSeparator = "-" + +func functionCreateResourceID(apiID, functionID string) string { + parts := []string{apiID, functionID} + id := strings.Join(parts, functionResourceIDSeparator) + + return id +} + +func functionParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, functionResourceIDSeparator, 2) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected API-ID%[2]sFUNCTION-ID", id, functionResourceIDSeparator) } - return idParts[0], idParts[1], nil + + return parts[0], parts[1], nil } -func expandRuntime(l []interface{}) *appsync.AppSyncRuntime { - if len(l) == 0 || l[0] == nil { - return nil +func findFunctionByTwoPartKey(ctx context.Context, conn *appsync.Client, apiID, functionID string) (*awstypes.FunctionConfiguration, error) { + input := &appsync.GetFunctionInput{ + ApiId: aws.String(apiID), + FunctionId: aws.String(functionID), } - configured := l[0].(map[string]interface{}) + output, err := conn.GetFunction(ctx, input) - result := &appsync.AppSyncRuntime{} + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } - if v, ok := configured[names.AttrName].(string); ok { - result.Name = aws.String(v) + if err != nil { + return nil, err } - if v, ok := configured["runtime_version"].(string); ok { - result.RuntimeVersion = aws.String(v) + if output == nil || output.FunctionConfiguration == nil { + return nil, tfresource.NewEmptyResultError(input) } - return result + return output.FunctionConfiguration, nil } -func flattenRuntime(config *appsync.AppSyncRuntime) []map[string]interface{} { - if config == nil { +func expandRuntime(tfList []interface{}) *awstypes.AppSyncRuntime { + if len(tfList) == 0 || tfList[0] == nil { return nil } - result := map[string]interface{}{ - names.AttrName: aws.StringValue(config.Name), - "runtime_version": aws.StringValue(config.RuntimeVersion), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.AppSyncRuntime{} + + if v, ok := tfMap[names.AttrName].(string); ok { + apiObject.Name = awstypes.RuntimeName(v) + } + + if v, ok := tfMap["runtime_version"].(string); ok { + apiObject.RuntimeVersion = aws.String(v) } - return []map[string]interface{}{result} + return apiObject } -func expandSyncConfig(l []interface{}) *appsync.SyncConfig { - if len(l) == 0 || l[0] == nil { +func flattenRuntime(apiObject *awstypes.AppSyncRuntime) []interface{} { + if apiObject == nil { return nil } - configured := l[0].(map[string]interface{}) + tfMap := map[string]interface{}{ + names.AttrName: apiObject.Name, + "runtime_version": aws.ToString(apiObject.RuntimeVersion), + } - result := &appsync.SyncConfig{} + return []interface{}{tfMap} +} - if v, ok := configured["conflict_detection"].(string); ok { - result.ConflictDetection = aws.String(v) +func expandSyncConfig(tfList []interface{}) *awstypes.SyncConfig { + if len(tfList) == 0 || tfList[0] == nil { + return nil } - if v, ok := configured["conflict_handler"].(string); ok { - result.ConflictHandler = aws.String(v) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.SyncConfig{} + + if v, ok := tfMap["conflict_detection"].(string); ok { + apiObject.ConflictDetection = awstypes.ConflictDetectionType(v) + } + + if v, ok := tfMap["conflict_handler"].(string); ok { + apiObject.ConflictHandler = awstypes.ConflictHandlerType(v) } - if v, ok := configured["lambda_conflict_handler_config"].([]interface{}); ok && len(v) > 0 { - result.LambdaConflictHandlerConfig = expandLambdaConflictHandlerConfig(v) + if v, ok := tfMap["lambda_conflict_handler_config"].([]interface{}); ok && len(v) > 0 { + apiObject.LambdaConflictHandlerConfig = expandLambdaConflictHandlerConfig(v) } - return result + return apiObject } -func flattenSyncConfig(config *appsync.SyncConfig) []map[string]interface{} { - if config == nil { +func flattenSyncConfig(apiObject *awstypes.SyncConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - "conflict_detection": aws.StringValue(config.ConflictDetection), - "conflict_handler": aws.StringValue(config.ConflictHandler), - "lambda_conflict_handler_config": flattenLambdaConflictHandlerConfig(config.LambdaConflictHandlerConfig), + tfMap := map[string]interface{}{ + "conflict_detection": apiObject.ConflictDetection, + "conflict_handler": apiObject.ConflictHandler, + "lambda_conflict_handler_config": flattenLambdaConflictHandlerConfig(apiObject.LambdaConflictHandlerConfig), } - return []map[string]interface{}{result} + return []interface{}{tfMap} } -func expandLambdaConflictHandlerConfig(l []interface{}) *appsync.LambdaConflictHandlerConfig { - if len(l) == 0 || l[0] == nil { +func expandLambdaConflictHandlerConfig(tfList []interface{}) *awstypes.LambdaConflictHandlerConfig { + if len(tfList) == 0 || tfList[0] == nil { return nil } - configured := l[0].(map[string]interface{}) - - result := &appsync.LambdaConflictHandlerConfig{} + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.LambdaConflictHandlerConfig{} - if v, ok := configured["lambda_conflict_handler_arn"].(string); ok { - result.LambdaConflictHandlerArn = aws.String(v) + if v, ok := tfMap["lambda_conflict_handler_arn"].(string); ok { + apiObject.LambdaConflictHandlerArn = aws.String(v) } - return result + return apiObject } -func flattenLambdaConflictHandlerConfig(config *appsync.LambdaConflictHandlerConfig) []map[string]interface{} { - if config == nil { +func flattenLambdaConflictHandlerConfig(apiObject *awstypes.LambdaConflictHandlerConfig) []interface{} { + if apiObject == nil { return nil } - result := map[string]interface{}{ - "lambda_conflict_handler_arn": aws.StringValue(config.LambdaConflictHandlerArn), + tfMap := map[string]interface{}{ + "lambda_conflict_handler_arn": aws.ToString(apiObject.LambdaConflictHandlerArn), } - return []map[string]interface{}{result} + return []interface{}{tfMap} } diff --git a/internal/service/appsync/function_test.go b/internal/service/appsync/function_test.go index 193e5734725..64077312e8a 100644 --- a/internal/service/appsync/function_test.go +++ b/internal/service/appsync/function_test.go @@ -9,15 +9,14 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfappsync "github.com/hashicorp/terraform-provider-aws/internal/service/appsync" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -27,10 +26,10 @@ func testAccFunction_basic(t *testing.T) { rName2 := fmt.Sprintf("tfexample%s", sdkacctest.RandString(8)) rName3 := fmt.Sprintf("tfexample%s", sdkacctest.RandString(8)) resourceName := "aws_appsync_function.test" - var config appsync.FunctionConfiguration + var config awstypes.FunctionConfiguration resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -70,10 +69,10 @@ func testAccFunction_code(t *testing.T) { rName1 := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) rName2 := fmt.Sprintf("tfexample%s", sdkacctest.RandString(8)) resourceName := "aws_appsync_function.test" - var config appsync.FunctionConfiguration + var config awstypes.FunctionConfiguration resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -113,10 +112,10 @@ func testAccFunction_syncConfig(t *testing.T) { ctx := acctest.Context(t) rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_function.test" - var config appsync.FunctionConfiguration + var config awstypes.FunctionConfiguration resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -144,10 +143,10 @@ func testAccFunction_description(t *testing.T) { rName1 := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) rName2 := fmt.Sprintf("tfexample%s", sdkacctest.RandString(8)) resourceName := "aws_appsync_function.test" - var config appsync.FunctionConfiguration + var config awstypes.FunctionConfiguration resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -180,10 +179,10 @@ func testAccFunction_responseMappingTemplate(t *testing.T) { rName1 := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) rName2 := fmt.Sprintf("tfexample%s", sdkacctest.RandString(8)) resourceName := "aws_appsync_function.test" - var config appsync.FunctionConfiguration + var config awstypes.FunctionConfiguration resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -208,10 +207,10 @@ func testAccFunction_disappears(t *testing.T) { rName1 := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) rName2 := fmt.Sprintf("tfexample%s", sdkacctest.RandString(8)) resourceName := "aws_appsync_function.test" - var config appsync.FunctionConfiguration + var config awstypes.FunctionConfiguration resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckFunctionDestroy(ctx), @@ -230,60 +229,46 @@ func testAccFunction_disappears(t *testing.T) { func testAccCheckFunctionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_function" { continue } - apiID, functionID, err := tfappsync.DecodeFunctionID(rs.Primary.ID) - if err != nil { - return err - } + _, err := tfappsync.FindFunctionByTwoPartKey(ctx, conn, rs.Primary.Attributes["api_id"], rs.Primary.Attributes["function_id"]) - input := &appsync.GetFunctionInput{ - ApiId: aws.String(apiID), - FunctionId: aws.String(functionID), + if tfresource.NotFound(err) { + continue } - _, err = conn.GetFunctionWithContext(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return nil - } return err } + + return fmt.Errorf("Appsync Function %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckFunctionExists(ctx context.Context, name string, config *appsync.FunctionConfiguration) resource.TestCheckFunc { +func testAccCheckFunctionExists(ctx context.Context, n string, v *awstypes.FunctionConfiguration) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", name) + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) - - apiID, functionID, err := tfappsync.DecodeFunctionID(rs.Primary.ID) - if err != nil { - return err - } - - input := &appsync.GetFunctionInput{ - ApiId: aws.String(apiID), - FunctionId: aws.String(functionID), - } + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) - output, err := conn.GetFunctionWithContext(ctx, input) + output, err := tfappsync.FindFunctionByTwoPartKey(ctx, conn, rs.Primary.Attributes["api_id"], rs.Primary.Attributes["function_id"]) if err != nil { return err } - *config = *output.FunctionConfiguration + *v = *output return nil } diff --git a/internal/service/appsync/generate.go b/internal/service/appsync/generate.go index 3efb706569b..7e26da3e015 100644 --- a/internal/service/appsync/generate.go +++ b/internal/service/appsync/generate.go @@ -1,7 +1,8 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/listpages/main.go -AWSSDKVersion=2 -ListOps=ListApiKeys,ListDomainNames,ListGraphqlApis +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ServiceTagsMap -UpdateTags -KVTValues -SkipTypesImp //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/appsync/graphql_api.go b/internal/service/appsync/graphql_api.go index 005f8f6c637..f5cc5f6315c 100644 --- a/internal/service/appsync/graphql_api.go +++ b/internal/service/appsync/graphql_api.go @@ -11,14 +11,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -26,13 +28,15 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -var validateAuthorizerResultTTLInSeconds = validation.IntBetween(0, 3600) - -const DefaultAuthorizerResultTTLInSeconds = 300 +const ( + defaultAuthorizerResultTTLInSeconds = 300 +) // @SDKResource("aws_appsync_graphql_api", name="GraphQL API") // @Tags(identifierAttribute="arn") -func ResourceGraphQLAPI() *schema.Resource { +func resourceGraphQLAPI() *schema.Resource { + validateAuthorizerResultTTLInSeconds := validation.IntBetween(0, 3600) + return &schema.Resource{ CreateWithoutTimeout: resourceGraphQLAPICreate, ReadWithoutTimeout: resourceGraphQLAPIRead, @@ -50,9 +54,9 @@ func ResourceGraphQLAPI() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "authentication_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.AuthenticationType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.AuthenticationType](), }, "lambda_authorizer_config": { Type: schema.TypeList, @@ -63,7 +67,7 @@ func ResourceGraphQLAPI() *schema.Resource { "authorizer_result_ttl_in_seconds": { Type: schema.TypeInt, Optional: true, - Default: DefaultAuthorizerResultTTLInSeconds, + Default: defaultAuthorizerResultTTLInSeconds, ValidateFunc: validateAuthorizerResultTTLInSeconds, }, "authorizer_uri": { @@ -132,15 +136,15 @@ func ResourceGraphQLAPI() *schema.Resource { Computed: true, }, "authentication_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.AuthenticationType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.AuthenticationType](), }, "introspection_config": { - Type: schema.TypeString, - Optional: true, - Default: appsync.GraphQLApiIntrospectionConfigEnabled, - ValidateFunc: validation.StringInSlice(appsync.GraphQLApiIntrospectionConfig_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.GraphQLApiIntrospectionConfigEnabled, + ValidateDiagFunc: enum.Validate[awstypes.GraphQLApiIntrospectionConfig](), }, "lambda_authorizer_config": { Type: schema.TypeList, @@ -151,7 +155,7 @@ func ResourceGraphQLAPI() *schema.Resource { "authorizer_result_ttl_in_seconds": { Type: schema.TypeInt, Optional: true, - Default: DefaultAuthorizerResultTTLInSeconds, + Default: defaultAuthorizerResultTTLInSeconds, ValidateFunc: validateAuthorizerResultTTLInSeconds, }, "authorizer_uri": { @@ -182,23 +186,17 @@ func ResourceGraphQLAPI() *schema.Resource { Default: false, }, "field_log_level": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.FieldLogLevel_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.FieldLogLevel](), }, }, }, }, names.AttrName: { - Type: schema.TypeString, - Required: true, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !regexache.MustCompile(`[A-Za-z_][0-9A-Za-z_]*`).MatchString(value) { - errors = append(errors, fmt.Errorf("%q must match [A-Za-z_][0-9A-Za-z_]*", k)) - } - return - }, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`[A-Za-z_][0-9A-Za-z_]*`), ""), }, "openid_connect_config": { Type: schema.TypeList, @@ -264,9 +262,9 @@ func ResourceGraphQLAPI() *schema.Resource { Computed: true, }, names.AttrDefaultAction: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.DefaultAction_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.DefaultAction](), }, names.AttrUserPoolID: { Type: schema.TypeString, @@ -276,11 +274,11 @@ func ResourceGraphQLAPI() *schema.Resource { }, }, "visibility": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: appsync.GraphQLApiVisibilityGlobal, - ValidateFunc: validation.StringInSlice(appsync.GraphQLApiVisibility_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.GraphQLApiVisibilityGlobal, + ValidateDiagFunc: enum.Validate[awstypes.GraphQLApiVisibility](), }, "xray_enabled": { Type: schema.TypeBool, @@ -294,62 +292,62 @@ func ResourceGraphQLAPI() *schema.Resource { func resourceGraphQLAPICreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) name := d.Get(names.AttrName).(string) input := &appsync.CreateGraphqlApiInput{ - AuthenticationType: aws.String(d.Get("authentication_type").(string)), + AuthenticationType: awstypes.AuthenticationType(d.Get("authentication_type").(string)), Name: aws.String(name), Tags: getTagsIn(ctx), } if v, ok := d.GetOk("additional_authentication_provider"); ok { - input.AdditionalAuthenticationProviders = expandGraphQLAPIAdditionalAuthProviders(v.([]interface{}), meta.(*conns.AWSClient).Region) + input.AdditionalAuthenticationProviders = expandAdditionalAuthenticationProviders(v.([]interface{}), meta.(*conns.AWSClient).Region) + } + + if v, ok := d.GetOk("introspection_config"); ok { + input.IntrospectionConfig = awstypes.GraphQLApiIntrospectionConfig(v.(string)) } if v, ok := d.GetOk("lambda_authorizer_config"); ok { - input.LambdaAuthorizerConfig = expandGraphQLAPILambdaAuthorizerConfig(v.([]interface{})) + input.LambdaAuthorizerConfig = expandLambdaAuthorizerConfig(v.([]interface{})) } if v, ok := d.GetOk("log_config"); ok { - input.LogConfig = expandGraphQLAPILogConfig(v.([]interface{})) + input.LogConfig = expandLogConfig(v.([]interface{})) } if v, ok := d.GetOk("openid_connect_config"); ok { - input.OpenIDConnectConfig = expandGraphQLAPIOpenIDConnectConfig(v.([]interface{})) - } - - if v, ok := d.GetOk("user_pool_config"); ok { - input.UserPoolConfig = expandGraphQLAPIUserPoolConfig(v.([]interface{}), meta.(*conns.AWSClient).Region) - } - - if v, ok := d.GetOk("introspection_config"); ok { - input.IntrospectionConfig = aws.String(v.(string)) + input.OpenIDConnectConfig = expandOpenIDConnectConfig(v.([]interface{})) } if v, ok := d.GetOk("query_depth_limit"); ok { - input.QueryDepthLimit = aws.Int64(int64(v.(int))) + input.QueryDepthLimit = int32(v.(int)) } if v, ok := d.GetOk("resolver_count_limit"); ok { - input.ResolverCountLimit = aws.Int64(int64(v.(int))) + input.ResolverCountLimit = int32(v.(int)) + } + + if v, ok := d.GetOk("user_pool_config"); ok { + input.UserPoolConfig = expandUserPoolConfig(v.([]interface{}), meta.(*conns.AWSClient).Region) } if v, ok := d.GetOk("xray_enabled"); ok { - input.XrayEnabled = aws.Bool(v.(bool)) + input.XrayEnabled = v.(bool) } if v, ok := d.GetOk("visibility"); ok { - input.Visibility = aws.String(v.(string)) + input.Visibility = awstypes.GraphQLApiVisibility(v.(string)) } - output, err := conn.CreateGraphqlApiWithContext(ctx, input) + output, err := conn.CreateGraphqlApi(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating AppSync GraphQL API (%s): %s", name, err) } - d.SetId(aws.StringValue(output.GraphqlApi.ApiId)) + d.SetId(aws.ToString(output.GraphqlApi.ApiId)) if v, ok := d.GetOk(names.AttrSchema); ok { if err := putSchema(ctx, conn, d.Id(), v.(string), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -362,9 +360,9 @@ func resourceGraphQLAPICreate(ctx context.Context, d *schema.ResourceData, meta func resourceGraphQLAPIRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - api, err := FindGraphQLAPIByID(ctx, conn, d.Id()) + api, err := findGraphQLAPIByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] AppSync GraphQL API (%s) not found, removing from state", d.Id()) @@ -376,32 +374,30 @@ func resourceGraphQLAPIRead(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "reading AppSync GraphQL API (%s): %s", d.Id(), err) } - if err := d.Set("additional_authentication_provider", flattenGraphQLAPIAdditionalAuthenticationProviders(api.AdditionalAuthenticationProviders)); err != nil { + if err := d.Set("additional_authentication_provider", flattenAdditionalAuthenticationProviders(api.AdditionalAuthenticationProviders)); err != nil { return sdkdiag.AppendErrorf(diags, "setting additional_authentication_provider: %s", err) } d.Set(names.AttrARN, api.Arn) d.Set("authentication_type", api.AuthenticationType) - if err := d.Set("lambda_authorizer_config", flattenGraphQLAPILambdaAuthorizerConfig(api.LambdaAuthorizerConfig)); err != nil { + d.Set("introspection_config", api.IntrospectionConfig) + if err := d.Set("lambda_authorizer_config", flattenLambdaAuthorizerConfig(api.LambdaAuthorizerConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting lambda_authorizer_config: %s", err) } - if err := d.Set("log_config", flattenGraphQLAPILogConfig(api.LogConfig)); err != nil { + if err := d.Set("log_config", flattenLogConfig(api.LogConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting log_config: %s", err) } - if err := d.Set("openid_connect_config", flattenGraphQLAPIOpenIDConnectConfig(api.OpenIDConnectConfig)); err != nil { + d.Set(names.AttrName, api.Name) + if err := d.Set("openid_connect_config", flattenOpenIDConnectConfig(api.OpenIDConnectConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting openid_connect_config: %s", err) } - d.Set("introspection_config", api.IntrospectionConfig) - d.Set(names.AttrName, api.Name) d.Set("query_depth_limit", api.QueryDepthLimit) d.Set("resolver_count_limit", api.ResolverCountLimit) - d.Set("uris", aws.StringValueMap(api.Uris)) - if err := d.Set("user_pool_config", flattenGraphQLAPIUserPoolConfig(api.UserPoolConfig)); err != nil { + d.Set("uris", api.Uris) + if err := d.Set("user_pool_config", flattenUserPoolConfig(api.UserPoolConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting user_pool_config: %s", err) } d.Set("visibility", api.Visibility) - if err := d.Set("xray_enabled", api.XrayEnabled); err != nil { - return sdkdiag.AppendErrorf(diags, "setting xray_enabled: %s", err) - } + d.Set("xray_enabled", api.XrayEnabled) setTagsOut(ctx, api.Tags) @@ -410,52 +406,52 @@ func resourceGraphQLAPIRead(ctx context.Context, d *schema.ResourceData, meta in func resourceGraphQLAPIUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &appsync.UpdateGraphqlApiInput{ ApiId: aws.String(d.Id()), - AuthenticationType: aws.String(d.Get("authentication_type").(string)), + AuthenticationType: awstypes.AuthenticationType(d.Get("authentication_type").(string)), Name: aws.String(d.Get(names.AttrName).(string)), } if v, ok := d.GetOk("additional_authentication_provider"); ok { - input.AdditionalAuthenticationProviders = expandGraphQLAPIAdditionalAuthProviders(v.([]interface{}), meta.(*conns.AWSClient).Region) + input.AdditionalAuthenticationProviders = expandAdditionalAuthenticationProviders(v.([]interface{}), meta.(*conns.AWSClient).Region) + } + + if v, ok := d.GetOk("introspection_config"); ok { + input.IntrospectionConfig = awstypes.GraphQLApiIntrospectionConfig(v.(string)) } if v, ok := d.GetOk("lambda_authorizer_config"); ok { - input.LambdaAuthorizerConfig = expandGraphQLAPILambdaAuthorizerConfig(v.([]interface{})) + input.LambdaAuthorizerConfig = expandLambdaAuthorizerConfig(v.([]interface{})) } if v, ok := d.GetOk("log_config"); ok { - input.LogConfig = expandGraphQLAPILogConfig(v.([]interface{})) + input.LogConfig = expandLogConfig(v.([]interface{})) } if v, ok := d.GetOk("openid_connect_config"); ok { - input.OpenIDConnectConfig = expandGraphQLAPIOpenIDConnectConfig(v.([]interface{})) - } - - if v, ok := d.GetOk("user_pool_config"); ok { - input.UserPoolConfig = expandGraphQLAPIUserPoolConfig(v.([]interface{}), meta.(*conns.AWSClient).Region) - } - - if v, ok := d.GetOk("introspection_config"); ok { - input.IntrospectionConfig = aws.String(v.(string)) + input.OpenIDConnectConfig = expandOpenIDConnectConfig(v.([]interface{})) } if v, ok := d.GetOk("query_depth_limit"); ok { - input.QueryDepthLimit = aws.Int64(int64(v.(int))) + input.QueryDepthLimit = int32(v.(int)) } if v, ok := d.GetOk("resolver_count_limit"); ok { - input.ResolverCountLimit = aws.Int64(int64(v.(int))) + input.ResolverCountLimit = int32(v.(int)) + } + + if v, ok := d.GetOk("user_pool_config"); ok { + input.UserPoolConfig = expandUserPoolConfig(v.([]interface{}), meta.(*conns.AWSClient).Region) } if v, ok := d.GetOk("xray_enabled"); ok { - input.XrayEnabled = aws.Bool(v.(bool)) + input.XrayEnabled = v.(bool) } - _, err := conn.UpdateGraphqlApiWithContext(ctx, input) + _, err := conn.UpdateGraphqlApi(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating AppSync GraphQL API (%s): %s", d.Id(), err) @@ -463,7 +459,7 @@ func resourceGraphQLAPIUpdate(ctx context.Context, d *schema.ResourceData, meta if d.HasChange(names.AttrSchema) { if v, ok := d.GetOk(names.AttrSchema); ok { - if err := putSchema(ctx, conn, d.Id(), v.(string), d.Timeout(schema.TimeoutCreate)); err != nil { + if err := putSchema(ctx, conn, d.Id(), v.(string), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -475,14 +471,14 @@ func resourceGraphQLAPIUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceGraphQLAPIDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) log.Printf("[DEBUG] Deleting AppSync GraphQL API: %s", d.Id()) - _, err := conn.DeleteGraphqlApiWithContext(ctx, &appsync.DeleteGraphqlApiInput{ + _, err := conn.DeleteGraphqlApi(ctx, &appsync.DeleteGraphqlApiInput{ ApiId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } @@ -493,33 +489,33 @@ func resourceGraphQLAPIDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func putSchema(ctx context.Context, conn *appsync.AppSync, apiID, definition string, timeout time.Duration) error { +func putSchema(ctx context.Context, conn *appsync.Client, apiID, definition string, timeout time.Duration) error { input := &appsync.StartSchemaCreationInput{ ApiId: aws.String(apiID), Definition: ([]byte)(definition), } - _, err := conn.StartSchemaCreationWithContext(ctx, input) + _, err := conn.StartSchemaCreation(ctx, input) if err != nil { return fmt.Errorf("creating AppSync GraphQL API (%s) schema: %w", apiID, err) } - if err := waitSchemaCreated(ctx, conn, apiID, timeout); err != nil { + if _, err := waitSchemaCreated(ctx, conn, apiID, timeout); err != nil { return fmt.Errorf("waiting for AppSync GraphQL API (%s) schema create: %w", apiID, err) } return nil } -func FindGraphQLAPIByID(ctx context.Context, conn *appsync.AppSync, id string) (*appsync.GraphqlApi, error) { +func findGraphQLAPIByID(ctx context.Context, conn *appsync.Client, id string) (*awstypes.GraphqlApi, error) { input := &appsync.GetGraphqlApiInput{ ApiId: aws.String(id), } - output, err := conn.GetGraphqlApiWithContext(ctx, input) + output, err := conn.GetGraphqlApi(ctx, input) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -537,14 +533,14 @@ func FindGraphQLAPIByID(ctx context.Context, conn *appsync.AppSync, id string) ( return output.GraphqlApi, nil } -func findSchemaCreationStatusByID(ctx context.Context, conn *appsync.AppSync, id string) (*appsync.GetSchemaCreationStatusOutput, error) { +func findSchemaCreationStatusByID(ctx context.Context, conn *appsync.Client, id string) (*appsync.GetSchemaCreationStatusOutput, error) { input := &appsync.GetSchemaCreationStatusInput{ ApiId: aws.String(id), } - output, err := conn.GetSchemaCreationStatusWithContext(ctx, input) + output, err := conn.GetSchemaCreationStatus(ctx, input) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -562,7 +558,7 @@ func findSchemaCreationStatusByID(ctx context.Context, conn *appsync.AppSync, id return output, nil } -func statusSchemaCreation(ctx context.Context, conn *appsync.AppSync, id string) retry.StateRefreshFunc { +func statusSchemaCreation(ctx context.Context, conn *appsync.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findSchemaCreationStatusByID(ctx, conn, id) @@ -574,14 +570,14 @@ func statusSchemaCreation(ctx context.Context, conn *appsync.AppSync, id string) return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitSchemaCreated(ctx context.Context, conn *appsync.AppSync, id string, timeout time.Duration) error { +func waitSchemaCreated(ctx context.Context, conn *appsync.Client, id string, timeout time.Duration) (*appsync.GetSchemaCreationStatusOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{appsync.SchemaStatusProcessing}, - Target: []string{appsync.SchemaStatusActive, appsync.SchemaStatusSuccess}, + Pending: enum.Slice(awstypes.SchemaStatusProcessing), + Target: enum.Slice(awstypes.SchemaStatusActive, awstypes.SchemaStatusSuccess), Refresh: statusSchemaCreation(ctx, conn, id), Timeout: timeout, } @@ -589,254 +585,245 @@ func waitSchemaCreated(ctx context.Context, conn *appsync.AppSync, id string, ti outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*appsync.GetSchemaCreationStatusOutput); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.Details))) + tfresource.SetLastError(err, errors.New(aws.ToString(output.Details))) + return output, err } - return err + return nil, err } -func expandGraphQLAPILogConfig(l []interface{}) *appsync.LogConfig { - if len(l) < 1 || l[0] == nil { +func expandLogConfig(tfList []interface{}) *awstypes.LogConfig { + if len(tfList) < 1 || tfList[0] == nil { return nil } - m := l[0].(map[string]interface{}) - - logConfig := &appsync.LogConfig{ - CloudWatchLogsRoleArn: aws.String(m["cloudwatch_logs_role_arn"].(string)), - FieldLogLevel: aws.String(m["field_log_level"].(string)), - ExcludeVerboseContent: aws.Bool(m["exclude_verbose_content"].(bool)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.LogConfig{ + CloudWatchLogsRoleArn: aws.String(tfMap["cloudwatch_logs_role_arn"].(string)), + ExcludeVerboseContent: tfMap["exclude_verbose_content"].(bool), + FieldLogLevel: awstypes.FieldLogLevel(tfMap["field_log_level"].(string)), } - return logConfig + return apiObject } -func expandGraphQLAPIOpenIDConnectConfig(l []interface{}) *appsync.OpenIDConnectConfig { - if len(l) < 1 || l[0] == nil { +func expandOpenIDConnectConfig(tfList []interface{}) *awstypes.OpenIDConnectConfig { + if len(tfList) < 1 || tfList[0] == nil { return nil } - m := l[0].(map[string]interface{}) - - openIDConnectConfig := &appsync.OpenIDConnectConfig{ - Issuer: aws.String(m[names.AttrIssuer].(string)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.OpenIDConnectConfig{ + Issuer: aws.String(tfMap[names.AttrIssuer].(string)), } - if v, ok := m["auth_ttl"].(int); ok && v != 0 { - openIDConnectConfig.AuthTTL = aws.Int64(int64(v)) + if v, ok := tfMap["auth_ttl"].(int); ok && v != 0 { + apiObject.AuthTTL = int64(v) } - if v, ok := m[names.AttrClientID].(string); ok && v != "" { - openIDConnectConfig.ClientId = aws.String(v) + if v, ok := tfMap[names.AttrClientID].(string); ok && v != "" { + apiObject.ClientId = aws.String(v) } - if v, ok := m["iat_ttl"].(int); ok && v != 0 { - openIDConnectConfig.IatTTL = aws.Int64(int64(v)) + if v, ok := tfMap["iat_ttl"].(int); ok && v != 0 { + apiObject.IatTTL = int64(v) } - return openIDConnectConfig + return apiObject } -func expandGraphQLAPIUserPoolConfig(l []interface{}, currentRegion string) *appsync.UserPoolConfig { - if len(l) < 1 || l[0] == nil { +func expandUserPoolConfig(tfList []interface{}, currentRegion string) *awstypes.UserPoolConfig { + if len(tfList) < 1 || tfList[0] == nil { return nil } - m := l[0].(map[string]interface{}) - - userPoolConfig := &appsync.UserPoolConfig{ + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.UserPoolConfig{ AwsRegion: aws.String(currentRegion), - DefaultAction: aws.String(m[names.AttrDefaultAction].(string)), - UserPoolId: aws.String(m[names.AttrUserPoolID].(string)), + DefaultAction: awstypes.DefaultAction(tfMap[names.AttrDefaultAction].(string)), + UserPoolId: aws.String(tfMap[names.AttrUserPoolID].(string)), } - if v, ok := m["app_id_client_regex"].(string); ok && v != "" { - userPoolConfig.AppIdClientRegex = aws.String(v) + if v, ok := tfMap["app_id_client_regex"].(string); ok && v != "" { + apiObject.AppIdClientRegex = aws.String(v) } - if v, ok := m["aws_region"].(string); ok && v != "" { - userPoolConfig.AwsRegion = aws.String(v) + if v, ok := tfMap["aws_region"].(string); ok && v != "" { + apiObject.AwsRegion = aws.String(v) } - return userPoolConfig + return apiObject } -func expandGraphQLAPILambdaAuthorizerConfig(l []interface{}) *appsync.LambdaAuthorizerConfig { - if len(l) < 1 || l[0] == nil { +func expandLambdaAuthorizerConfig(tfList []interface{}) *awstypes.LambdaAuthorizerConfig { + if len(tfList) < 1 || tfList[0] == nil { return nil } - m := l[0].(map[string]interface{}) - - lambdaAuthorizerConfig := &appsync.LambdaAuthorizerConfig{ - AuthorizerResultTtlInSeconds: aws.Int64(int64(m["authorizer_result_ttl_in_seconds"].(int))), - AuthorizerUri: aws.String(m["authorizer_uri"].(string)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.LambdaAuthorizerConfig{ + AuthorizerResultTtlInSeconds: int32(tfMap["authorizer_result_ttl_in_seconds"].(int)), + AuthorizerUri: aws.String(tfMap["authorizer_uri"].(string)), } - if v, ok := m["identity_validation_expression"].(string); ok && v != "" { - lambdaAuthorizerConfig.IdentityValidationExpression = aws.String(v) + if v, ok := tfMap["identity_validation_expression"].(string); ok && v != "" { + apiObject.IdentityValidationExpression = aws.String(v) } - return lambdaAuthorizerConfig + return apiObject } -func expandGraphQLAPIAdditionalAuthProviders(items []interface{}, currentRegion string) []*appsync.AdditionalAuthenticationProvider { - if len(items) < 1 { +func expandAdditionalAuthenticationProviders(tfList []interface{}, currentRegion string) []awstypes.AdditionalAuthenticationProvider { + if len(tfList) < 1 { return nil } - additionalAuthProviders := make([]*appsync.AdditionalAuthenticationProvider, 0, len(items)) - for _, l := range items { - if l == nil { + apiObjects := make([]awstypes.AdditionalAuthenticationProvider, 0) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { continue } - m := l.(map[string]interface{}) - additionalAuthProvider := &appsync.AdditionalAuthenticationProvider{ - AuthenticationType: aws.String(m["authentication_type"].(string)), + apiObject := awstypes.AdditionalAuthenticationProvider{ + AuthenticationType: awstypes.AuthenticationType(tfMap["authentication_type"].(string)), } - if v, ok := m["openid_connect_config"]; ok { - additionalAuthProvider.OpenIDConnectConfig = expandGraphQLAPIOpenIDConnectConfig(v.([]interface{})) + if v, ok := tfMap["lambda_authorizer_config"]; ok { + apiObject.LambdaAuthorizerConfig = expandLambdaAuthorizerConfig(v.([]interface{})) } - if v, ok := m["user_pool_config"]; ok { - additionalAuthProvider.UserPoolConfig = expandGraphQLAPICognitoUserPoolConfig(v.([]interface{}), currentRegion) + if v, ok := tfMap["openid_connect_config"]; ok { + apiObject.OpenIDConnectConfig = expandOpenIDConnectConfig(v.([]interface{})) } - if v, ok := m["lambda_authorizer_config"]; ok { - additionalAuthProvider.LambdaAuthorizerConfig = expandGraphQLAPILambdaAuthorizerConfig(v.([]interface{})) + if v, ok := tfMap["user_pool_config"]; ok { + apiObject.UserPoolConfig = expandCognitoUserPoolConfig(v.([]interface{}), currentRegion) } - additionalAuthProviders = append(additionalAuthProviders, additionalAuthProvider) + apiObjects = append(apiObjects, apiObject) } - return additionalAuthProviders + return apiObjects } -func expandGraphQLAPICognitoUserPoolConfig(l []interface{}, currentRegion string) *appsync.CognitoUserPoolConfig { - if len(l) < 1 || l[0] == nil { +func expandCognitoUserPoolConfig(tfList []interface{}, currentRegion string) *awstypes.CognitoUserPoolConfig { + if len(tfList) < 1 || tfList[0] == nil { return nil } - m := l[0].(map[string]interface{}) - - userPoolConfig := &appsync.CognitoUserPoolConfig{ + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.CognitoUserPoolConfig{ AwsRegion: aws.String(currentRegion), - UserPoolId: aws.String(m[names.AttrUserPoolID].(string)), + UserPoolId: aws.String(tfMap[names.AttrUserPoolID].(string)), } - if v, ok := m["app_id_client_regex"].(string); ok && v != "" { - userPoolConfig.AppIdClientRegex = aws.String(v) + if v, ok := tfMap["app_id_client_regex"].(string); ok && v != "" { + apiObject.AppIdClientRegex = aws.String(v) } - if v, ok := m["aws_region"].(string); ok && v != "" { - userPoolConfig.AwsRegion = aws.String(v) + if v, ok := tfMap["aws_region"].(string); ok && v != "" { + apiObject.AwsRegion = aws.String(v) } - return userPoolConfig + return apiObject } -func flattenGraphQLAPILogConfig(logConfig *appsync.LogConfig) []interface{} { - if logConfig == nil { +func flattenLogConfig(apiObject *awstypes.LogConfig) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "cloudwatch_logs_role_arn": aws.StringValue(logConfig.CloudWatchLogsRoleArn), - "field_log_level": aws.StringValue(logConfig.FieldLogLevel), - "exclude_verbose_content": aws.BoolValue(logConfig.ExcludeVerboseContent), + tfMap := map[string]interface{}{ + "cloudwatch_logs_role_arn": aws.ToString(apiObject.CloudWatchLogsRoleArn), + "exclude_verbose_content": apiObject.ExcludeVerboseContent, + "field_log_level": apiObject.FieldLogLevel, } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenGraphQLAPIOpenIDConnectConfig(openIDConnectConfig *appsync.OpenIDConnectConfig) []interface{} { - if openIDConnectConfig == nil { +func flattenOpenIDConnectConfig(apiObject *awstypes.OpenIDConnectConfig) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "auth_ttl": aws.Int64Value(openIDConnectConfig.AuthTTL), - names.AttrClientID: aws.StringValue(openIDConnectConfig.ClientId), - "iat_ttl": aws.Int64Value(openIDConnectConfig.IatTTL), - names.AttrIssuer: aws.StringValue(openIDConnectConfig.Issuer), + tfMap := map[string]interface{}{ + "auth_ttl": apiObject.AuthTTL, + names.AttrClientID: aws.ToString(apiObject.ClientId), + "iat_ttl": apiObject.IatTTL, + names.AttrIssuer: aws.ToString(apiObject.Issuer), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenGraphQLAPIUserPoolConfig(userPoolConfig *appsync.UserPoolConfig) []interface{} { - if userPoolConfig == nil { +func flattenUserPoolConfig(apiObject *awstypes.UserPoolConfig) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "aws_region": aws.StringValue(userPoolConfig.AwsRegion), - names.AttrDefaultAction: aws.StringValue(userPoolConfig.DefaultAction), - names.AttrUserPoolID: aws.StringValue(userPoolConfig.UserPoolId), + tfMap := map[string]interface{}{ + "aws_region": aws.ToString(apiObject.AwsRegion), + names.AttrDefaultAction: apiObject.DefaultAction, + names.AttrUserPoolID: aws.ToString(apiObject.UserPoolId), } - if userPoolConfig.AppIdClientRegex != nil { - m["app_id_client_regex"] = aws.StringValue(userPoolConfig.AppIdClientRegex) + if apiObject.AppIdClientRegex != nil { + tfMap["app_id_client_regex"] = aws.ToString(apiObject.AppIdClientRegex) } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenGraphQLAPILambdaAuthorizerConfig(lambdaAuthorizerConfig *appsync.LambdaAuthorizerConfig) []interface{} { - if lambdaAuthorizerConfig == nil { +func flattenLambdaAuthorizerConfig(apiObject *awstypes.LambdaAuthorizerConfig) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "authorizer_uri": aws.StringValue(lambdaAuthorizerConfig.AuthorizerUri), - } - - if lambdaAuthorizerConfig.AuthorizerResultTtlInSeconds != nil { - m["authorizer_result_ttl_in_seconds"] = aws.Int64Value(lambdaAuthorizerConfig.AuthorizerResultTtlInSeconds) - } else { - m["authorizer_result_ttl_in_seconds"] = DefaultAuthorizerResultTTLInSeconds + tfMap := map[string]interface{}{ + "authorizer_result_ttl_in_seconds": apiObject.AuthorizerResultTtlInSeconds, + "authorizer_uri": aws.ToString(apiObject.AuthorizerUri), } - if lambdaAuthorizerConfig.IdentityValidationExpression != nil { - m["identity_validation_expression"] = aws.StringValue(lambdaAuthorizerConfig.IdentityValidationExpression) + if apiObject.IdentityValidationExpression != nil { + tfMap["identity_validation_expression"] = aws.ToString(apiObject.IdentityValidationExpression) } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenGraphQLAPIAdditionalAuthenticationProviders(additionalAuthenticationProviders []*appsync.AdditionalAuthenticationProvider) []interface{} { - if len(additionalAuthenticationProviders) == 0 { +func flattenAdditionalAuthenticationProviders(apiObjects []awstypes.AdditionalAuthenticationProvider) []interface{} { + if len(apiObjects) == 0 { return []interface{}{} } - result := make([]interface{}, len(additionalAuthenticationProviders)) - for i, provider := range additionalAuthenticationProviders { - result[i] = map[string]interface{}{ - "authentication_type": aws.StringValue(provider.AuthenticationType), - "lambda_authorizer_config": flattenGraphQLAPILambdaAuthorizerConfig(provider.LambdaAuthorizerConfig), - "openid_connect_config": flattenGraphQLAPIOpenIDConnectConfig(provider.OpenIDConnectConfig), - "user_pool_config": flattenGraphQLAPICognitoUserPoolConfig(provider.UserPoolConfig), + tfList := make([]interface{}, len(apiObjects)) + for i, apiObject := range apiObjects { + tfList[i] = map[string]interface{}{ + "authentication_type": apiObject.AuthenticationType, + "lambda_authorizer_config": flattenLambdaAuthorizerConfig(apiObject.LambdaAuthorizerConfig), + "openid_connect_config": flattenOpenIDConnectConfig(apiObject.OpenIDConnectConfig), + "user_pool_config": flattenCognitoUserPoolConfig(apiObject.UserPoolConfig), } } - return result + return tfList } -func flattenGraphQLAPICognitoUserPoolConfig(userPoolConfig *appsync.CognitoUserPoolConfig) []interface{} { - if userPoolConfig == nil { +func flattenCognitoUserPoolConfig(apiObject *awstypes.CognitoUserPoolConfig) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "aws_region": aws.StringValue(userPoolConfig.AwsRegion), - names.AttrUserPoolID: aws.StringValue(userPoolConfig.UserPoolId), + tfMap := map[string]interface{}{ + "aws_region": aws.ToString(apiObject.AwsRegion), + names.AttrUserPoolID: aws.ToString(apiObject.UserPoolId), } - if userPoolConfig.AppIdClientRegex != nil { - m["app_id_client_regex"] = aws.StringValue(userPoolConfig.AppIdClientRegex) + if apiObject.AppIdClientRegex != nil { + tfMap["app_id_client_regex"] = aws.ToString(apiObject.AppIdClientRegex) } - return []interface{}{m} + return []interface{}{tfMap} } diff --git a/internal/service/appsync/graphql_api_test.go b/internal/service/appsync/graphql_api_test.go index d8fcf253307..cf2ddcd7a0a 100644 --- a/internal/service/appsync/graphql_api_test.go +++ b/internal/service/appsync/graphql_api_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -23,12 +23,12 @@ import ( func testAccGraphQLAPI_basic(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -66,12 +66,12 @@ func testAccGraphQLAPI_basic(t *testing.T) { func testAccGraphQLAPI_disappears(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -90,12 +90,12 @@ func testAccGraphQLAPI_disappears(t *testing.T) { func testAccGraphQLAPI_schema(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -137,12 +137,12 @@ func testAccGraphQLAPI_schema(t *testing.T) { func testAccGraphQLAPI_authenticationType(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -172,12 +172,12 @@ func testAccGraphQLAPI_authenticationType(t *testing.T) { func testAccGraphQLAPI_AuthenticationType_apiKey(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -202,12 +202,12 @@ func testAccGraphQLAPI_AuthenticationType_apiKey(t *testing.T) { func testAccGraphQLAPI_AuthenticationType_iam(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -232,13 +232,13 @@ func testAccGraphQLAPI_AuthenticationType_iam(t *testing.T) { func testAccGraphQLAPI_AuthenticationType_amazonCognitoUserPools(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -265,12 +265,12 @@ func testAccGraphQLAPI_AuthenticationType_amazonCognitoUserPools(t *testing.T) { func testAccGraphQLAPI_AuthenticationType_openIDConnect(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -295,13 +295,13 @@ func testAccGraphQLAPI_AuthenticationType_openIDConnect(t *testing.T) { func testAccGraphQLAPI_AuthenticationType_lambda(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -328,13 +328,13 @@ func testAccGraphQLAPI_AuthenticationType_lambda(t *testing.T) { func testAccGraphQLAPI_log(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -360,13 +360,13 @@ func testAccGraphQLAPI_log(t *testing.T) { func testAccGraphQLAPI_Log_fieldLogLevel(t *testing.T) { ctx := acctest.Context(t) - var api1, api2, api3 appsync.GraphqlApi + var api1, api2, api3 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -412,13 +412,13 @@ func testAccGraphQLAPI_Log_fieldLogLevel(t *testing.T) { func testAccGraphQLAPI_Log_excludeVerboseContent(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -455,12 +455,12 @@ func testAccGraphQLAPI_Log_excludeVerboseContent(t *testing.T) { func testAccGraphQLAPI_OpenIDConnect_authTTL(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -496,12 +496,12 @@ func testAccGraphQLAPI_OpenIDConnect_authTTL(t *testing.T) { func testAccGraphQLAPI_OpenIDConnect_clientID(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -537,12 +537,12 @@ func testAccGraphQLAPI_OpenIDConnect_clientID(t *testing.T) { func testAccGraphQLAPI_OpenIDConnect_iatTTL(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -578,12 +578,12 @@ func testAccGraphQLAPI_OpenIDConnect_iatTTL(t *testing.T) { func testAccGraphQLAPI_OpenIDConnect_issuer(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -617,13 +617,13 @@ func testAccGraphQLAPI_OpenIDConnect_issuer(t *testing.T) { func testAccGraphQLAPI_name(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -648,13 +648,13 @@ func testAccGraphQLAPI_name(t *testing.T) { func testAccGraphQLAPI_UserPool_region(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -692,13 +692,13 @@ func testAccGraphQLAPI_UserPool_region(t *testing.T) { func testAccGraphQLAPI_UserPool_defaultAction(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -736,13 +736,13 @@ func testAccGraphQLAPI_UserPool_defaultAction(t *testing.T) { func testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerURI(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -776,13 +776,13 @@ func testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerURI(t *testing.T) { func testAccGraphQLAPI_LambdaAuthorizerConfig_identityValidationExpression(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -818,12 +818,12 @@ func testAccGraphQLAPI_LambdaAuthorizerConfig_identityValidationExpression(t *te func testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerResultTTLInSeconds(t *testing.T) { ctx := acctest.Context(t) - var api1, api2, api3, api4 appsync.GraphqlApi + var api1, api2, api3, api4 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -875,12 +875,12 @@ func testAccGraphQLAPI_LambdaAuthorizerConfig_authorizerResultTTLInSeconds(t *te func testAccGraphQLAPI_tags(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -921,12 +921,12 @@ func testAccGraphQLAPI_tags(t *testing.T) { func testAccGraphQLAPI_AdditionalAuthentication_apiKey(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -956,12 +956,12 @@ func testAccGraphQLAPI_AdditionalAuthentication_apiKey(t *testing.T) { func testAccGraphQLAPI_AdditionalAuthentication_iam(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -991,13 +991,13 @@ func testAccGraphQLAPI_AdditionalAuthentication_iam(t *testing.T) { func testAccGraphQLAPI_AdditionalAuthentication_cognitoUserPools(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1028,12 +1028,12 @@ func testAccGraphQLAPI_AdditionalAuthentication_cognitoUserPools(t *testing.T) { func testAccGraphQLAPI_AdditionalAuthentication_openIDConnect(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1064,13 +1064,13 @@ func testAccGraphQLAPI_AdditionalAuthentication_openIDConnect(t *testing.T) { func testAccGraphQLAPI_AdditionalAuthentication_lambda(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1103,14 +1103,14 @@ func testAccGraphQLAPI_AdditionalAuthentication_lambda(t *testing.T) { func testAccGraphQLAPI_AdditionalAuthentication_multiple(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) cognitoUserPoolResourceName := "aws_cognito_user_pool.test" lambdaAuthorizerResourceName := "aws_lambda_function.test" resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1155,12 +1155,12 @@ func testAccGraphQLAPI_AdditionalAuthentication_multiple(t *testing.T) { func testAccGraphQLAPI_xrayEnabled(t *testing.T) { ctx := acctest.Context(t) - var api1, api2 appsync.GraphqlApi + var api1, api2 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1185,12 +1185,12 @@ func testAccGraphQLAPI_xrayEnabled(t *testing.T) { func testAccGraphQLAPI_visibility(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1214,12 +1214,12 @@ func testAccGraphQLAPI_visibility(t *testing.T) { func testAccGraphQLAPI_introspectionConfig(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1242,12 +1242,12 @@ func testAccGraphQLAPI_introspectionConfig(t *testing.T) { func testAccGraphQLAPI_queryDepthLimit(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1270,12 +1270,12 @@ func testAccGraphQLAPI_queryDepthLimit(t *testing.T) { func testAccGraphQLAPI_resolverCountLimit(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi + var api1 awstypes.GraphqlApi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_appsync_graphql_api.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckGraphQLAPIDestroy(ctx), @@ -1298,7 +1298,8 @@ func testAccGraphQLAPI_resolverCountLimit(t *testing.T) { func testAccCheckGraphQLAPIDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_graphql_api" { continue @@ -1320,18 +1321,14 @@ func testAccCheckGraphQLAPIDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckGraphQLAPIExists(ctx context.Context, n string, v *appsync.GraphqlApi) resource.TestCheckFunc { +func testAccCheckGraphQLAPIExists(ctx context.Context, n string, v *awstypes.GraphqlApi) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No AppSync GraphQL API ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) output, err := tfappsync.FindGraphQLAPIByID(ctx, conn, rs.Primary.ID) @@ -1352,9 +1349,9 @@ func testAccCheckGraphQLAPITypeExists(ctx context.Context, n, typeName string) r return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) - _, err := tfappsync.FindTypeByThreePartKey(ctx, conn, rs.Primary.ID, appsync.OutputTypeSdl, typeName) + _, err := tfappsync.FindTypeByThreePartKey(ctx, conn, rs.Primary.ID, awstypes.TypeDefinitionFormatSdl, typeName) return err } @@ -1576,7 +1573,7 @@ resource "aws_lambda_function" "test" { function_name = %[1]q handler = "lambdatest.handler" role = aws_iam_role.test.arn - runtime = "nodejs14.x" + runtime = "nodejs20.x" publish = true } diff --git a/internal/service/appsync/list_pages_gen.go b/internal/service/appsync/list_pages_gen.go new file mode 100644 index 00000000000..e91b9807591 --- /dev/null +++ b/internal/service/appsync/list_pages_gen.go @@ -0,0 +1,59 @@ +// Code generated by "internal/generate/listpages/main.go -AWSSDKVersion=2 -ListOps=ListApiKeys,ListDomainNames,ListGraphqlApis"; DO NOT EDIT. + +package appsync + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" +) + +func listAPIKeysPages(ctx context.Context, conn *appsync.Client, input *appsync.ListApiKeysInput, fn func(*appsync.ListApiKeysOutput, bool) bool) error { + for { + output, err := conn.ListApiKeys(ctx, input) + if err != nil { + return err + } + + lastPage := aws.ToString(output.NextToken) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.NextToken = output.NextToken + } + return nil +} +func listDomainNamesPages(ctx context.Context, conn *appsync.Client, input *appsync.ListDomainNamesInput, fn func(*appsync.ListDomainNamesOutput, bool) bool) error { + for { + output, err := conn.ListDomainNames(ctx, input) + if err != nil { + return err + } + + lastPage := aws.ToString(output.NextToken) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.NextToken = output.NextToken + } + return nil +} +func listGraphQLAPIsPages(ctx context.Context, conn *appsync.Client, input *appsync.ListGraphqlApisInput, fn func(*appsync.ListGraphqlApisOutput, bool) bool) error { + for { + output, err := conn.ListGraphqlApis(ctx, input) + if err != nil { + return err + } + + lastPage := aws.ToString(output.NextToken) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.NextToken = output.NextToken + } + return nil +} diff --git a/internal/service/appsync/resolver.go b/internal/service/appsync/resolver.go index 1d5fe08f8a7..772de89f585 100644 --- a/internal/service/appsync/resolver.go +++ b/internal/service/appsync/resolver.go @@ -10,13 +10,16 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -25,7 +28,7 @@ import ( ) // @SDKResource("aws_appsync_resolver", name="Resolver) -func ResourceResolver() *schema.Resource { +func resourceResolver() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceResolverCreate, ReadWithoutTimeout: resourceResolverRead, @@ -84,10 +87,10 @@ func ResourceResolver() *schema.Resource { ForceNew: true, }, "kind": { - Type: schema.TypeString, - Optional: true, - Default: appsync.ResolverKindUnit, - ValidateFunc: validation.StringInSlice(appsync.ResolverKind_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.ResolverKindUnit, + ValidateDiagFunc: enum.Validate[awstypes.ResolverKind](), }, "max_batch_size": { Type: schema.TypeInt, @@ -127,9 +130,9 @@ func ResourceResolver() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrName: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.RuntimeName_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RuntimeName](), }, "runtime_version": { Type: schema.TypeString, @@ -145,14 +148,14 @@ func ResourceResolver() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "conflict_detection": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(appsync.ConflictDetectionType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ConflictDetectionType](), }, "conflict_handler": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(appsync.ConflictHandlerType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ConflictHandlerType](), }, "lambda_conflict_handler_config": { Type: schema.TypeList, @@ -182,32 +185,33 @@ func ResourceResolver() *schema.Resource { func resourceResolverCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) apiID, typeName, fieldName := d.Get("api_id").(string), d.Get(names.AttrType).(string), d.Get(names.AttrField).(string) + id := resolverCreateResourceID(apiID, typeName, fieldName) input := &appsync.CreateResolverInput{ ApiId: aws.String(apiID), FieldName: aws.String(fieldName), - Kind: aws.String(d.Get("kind").(string)), + Kind: awstypes.ResolverKind(d.Get("kind").(string)), TypeName: aws.String(typeName), } - if v, ok := d.GetOk("code"); ok { - input.Code = aws.String(v.(string)) - } - - if v, ok := d.GetOkExists("max_batch_size"); ok { - input.MaxBatchSize = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("caching_config"); ok { + input.CachingConfig = expandResolverCachingConfig(v.([]interface{})) } - if v, ok := d.GetOk("sync_config"); ok && len(v.([]interface{})) > 0 { - input.SyncConfig = expandSyncConfig(v.([]interface{})) + if v, ok := d.GetOk("code"); ok { + input.Code = aws.String(v.(string)) } if v, ok := d.GetOk("data_source"); ok { input.DataSourceName = aws.String(v.(string)) } + if v, ok := d.GetOk("max_batch_size"); ok { + input.MaxBatchSize = int32(v.(int)) + } + if v, ok := d.GetOk("pipeline_config"); ok && len(v.([]interface{})) > 0 { input.PipelineConfig = expandPipelineConfig(v.([]interface{})) } @@ -220,94 +224,79 @@ func resourceResolverCreate(ctx context.Context, d *schema.ResourceData, meta in input.ResponseMappingTemplate = aws.String(v.(string)) } - if v, ok := d.GetOk("caching_config"); ok { - input.CachingConfig = expandResolverCachingConfig(v.([]interface{})) - } - if v, ok := d.GetOk("runtime"); ok && len(v.([]interface{})) > 0 { input.Runtime = expandRuntime(v.([]interface{})) } - mutexKey := "appsync-schema-" + apiID - conns.GlobalMutexKV.Lock(mutexKey) - defer conns.GlobalMutexKV.Unlock(mutexKey) + if v, ok := d.GetOk("sync_config"); ok && len(v.([]interface{})) > 0 { + input.SyncConfig = expandSyncConfig(v.([]interface{})) + } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.CreateResolverWithContext(ctx, input) - }, appsync.ErrCodeConcurrentModificationException) + _, err := retryResolverOp(ctx, apiID, func() (interface{}, error) { + return conn.CreateResolver(ctx, input) + }) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating AppSync Resolver: %s", err) + return sdkdiag.AppendErrorf(diags, "creating AppSync Resolver (%s): %s", id, err) } - d.SetId(apiID + "-" + typeName + "-" + fieldName) + d.SetId(id) return append(diags, resourceResolverRead(ctx, d, meta)...) } func resourceResolverRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, typeName, fieldName, err := DecodeResolverID(d.Id()) + apiID, typeName, fieldName, err := resolverParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - input := &appsync.GetResolverInput{ - ApiId: aws.String(apiID), - TypeName: aws.String(typeName), - FieldName: aws.String(fieldName), - } - - resp, err := conn.GetResolverWithContext(ctx, input) + resolver, err := findResolverByThreePartKey(ctx, conn, apiID, typeName, fieldName) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) && !d.IsNewResource() { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] AppSync Resolver (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading AppSync Resolver (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Appsync Resolver (%s): %s", d.Id(), err) } - resolver := resp.Resolver d.Set("api_id", apiID) d.Set(names.AttrARN, resolver.ResolverArn) - d.Set(names.AttrType, resolver.TypeName) - d.Set(names.AttrField, resolver.FieldName) + if err := d.Set("caching_config", flattenCachingConfig(resolver.CachingConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting caching_config: %s", err) + } + d.Set("code", resolver.Code) d.Set("data_source", resolver.DataSourceName) - d.Set("request_template", resolver.RequestMappingTemplate) - d.Set("response_template", resolver.ResponseMappingTemplate) + d.Set(names.AttrField, resolver.FieldName) d.Set("kind", resolver.Kind) d.Set("max_batch_size", resolver.MaxBatchSize) - d.Set("code", resolver.Code) - - if err := d.Set("sync_config", flattenSyncConfig(resolver.SyncConfig)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting sync_config: %s", err) - } - if err := d.Set("pipeline_config", flattenPipelineConfig(resolver.PipelineConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting pipeline_config: %s", err) } - - if err := d.Set("caching_config", flattenCachingConfig(resolver.CachingConfig)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting caching_config: %s", err) - } - + d.Set("request_template", resolver.RequestMappingTemplate) + d.Set("response_template", resolver.ResponseMappingTemplate) if err := d.Set("runtime", flattenRuntime(resolver.Runtime)); err != nil { return sdkdiag.AppendErrorf(diags, "setting runtime: %s", err) } + if err := d.Set("sync_config", flattenSyncConfig(resolver.SyncConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting sync_config: %s", err) + } + d.Set(names.AttrType, resolver.TypeName) return diags } func resourceResolverUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, typeName, fieldName, err := DecodeResolverID(d.Id()) + apiID, typeName, fieldName, err := resolverParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -315,10 +304,14 @@ func resourceResolverUpdate(ctx context.Context, d *schema.ResourceData, meta in input := &appsync.UpdateResolverInput{ ApiId: aws.String(apiID), FieldName: aws.String(fieldName), - Kind: aws.String(d.Get("kind").(string)), + Kind: awstypes.ResolverKind(d.Get("kind").(string)), TypeName: aws.String(typeName), } + if v, ok := d.GetOk("caching_config"); ok { + input.CachingConfig = expandResolverCachingConfig(v.([]interface{})) + } + if v, ok := d.GetOk("code"); ok { input.Code = aws.String(v.(string)) } @@ -327,10 +320,14 @@ func resourceResolverUpdate(ctx context.Context, d *schema.ResourceData, meta in input.DataSourceName = aws.String(v.(string)) } + if v, ok := d.GetOk("max_batch_size"); ok { + input.MaxBatchSize = int32(v.(int)) + } + if v, ok := d.GetOk("pipeline_config"); ok { config := v.([]interface{})[0].(map[string]interface{}) - input.PipelineConfig = &appsync.PipelineConfig{ - Functions: flex.ExpandStringList(config["functions"].([]interface{})), + input.PipelineConfig = &awstypes.PipelineConfig{ + Functions: flex.ExpandStringValueList(config["functions"].([]interface{})), } } @@ -342,29 +339,17 @@ func resourceResolverUpdate(ctx context.Context, d *schema.ResourceData, meta in input.ResponseMappingTemplate = aws.String(v.(string)) } - if v, ok := d.GetOk("caching_config"); ok { - input.CachingConfig = expandResolverCachingConfig(v.([]interface{})) - } - - if v, ok := d.GetOkExists("max_batch_size"); ok { - input.MaxBatchSize = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("runtime"); ok && len(v.([]interface{})) > 0 { + input.Runtime = expandRuntime(v.([]interface{})) } if v, ok := d.GetOk("sync_config"); ok && len(v.([]interface{})) > 0 { input.SyncConfig = expandSyncConfig(v.([]interface{})) } - if v, ok := d.GetOk("runtime"); ok && len(v.([]interface{})) > 0 { - input.Runtime = expandRuntime(v.([]interface{})) - } - - mutexKey := "appsync-schema-" + apiID - conns.GlobalMutexKV.Lock(mutexKey) - defer conns.GlobalMutexKV.Unlock(mutexKey) - - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.UpdateResolverWithContext(ctx, input) - }, appsync.ErrCodeConcurrentModificationException) + _, err = retryResolverOp(ctx, apiID, func() (interface{}, error) { + return conn.UpdateResolver(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "updating AppSync Resolver (%s): %s", d.Id(), err) @@ -375,28 +360,23 @@ func resourceResolverUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceResolverDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, typeName, fieldName, err := DecodeResolverID(d.Id()) + apiID, typeName, fieldName, err := resolverParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - input := &appsync.DeleteResolverInput{ - ApiId: aws.String(apiID), - FieldName: aws.String(fieldName), - TypeName: aws.String(typeName), - } - - mutexKey := "appsync-schema-" + apiID - conns.GlobalMutexKV.Lock(mutexKey) - defer conns.GlobalMutexKV.Unlock(mutexKey) - - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.DeleteResolverWithContext(ctx, input) - }, appsync.ErrCodeConcurrentModificationException) + log.Printf("[INFO] Deleting Appsync Resolver: %s", d.Id()) + _, err = retryResolverOp(ctx, apiID, func() (interface{}, error) { + return conn.DeleteResolver(ctx, &appsync.DeleteResolverInput{ + ApiId: aws.String(apiID), + FieldName: aws.String(fieldName), + TypeName: aws.String(typeName), + }) + }) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } @@ -407,77 +387,124 @@ func resourceResolverDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } -func DecodeResolverID(id string) (string, string, string, error) { - idParts := strings.SplitN(id, "-", 3) - if len(idParts) != 3 { - return "", "", "", fmt.Errorf("expected ID in format ApiID-TypeName-FieldName, received: %s", id) +const resolverResourceIDSeparator = "-" + +func resolverCreateResourceID(apiID, typeName, fieldName string) string { + parts := []string{apiID, typeName, fieldName} + id := strings.Join(parts, resolverResourceIDSeparator) + + return id +} + +func resolverParseResourceID(id string) (string, string, string, error) { + parts := strings.SplitN(id, resolverResourceIDSeparator, 3) + + if len(parts) != 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { + return "", "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected API-ID%[2]sTYPE-NAME%[2]sFIELD-NAME", id, resolverResourceIDSeparator) } - return idParts[0], idParts[1], idParts[2], nil + + return parts[0], parts[1], parts[2], nil } -func expandResolverCachingConfig(l []interface{}) *appsync.CachingConfig { - if len(l) < 1 || l[0] == nil { - return nil +func retryResolverOp(ctx context.Context, apiID string, f func() (interface{}, error)) (interface{}, error) { //nolint:unparam + mutexKey := "appsync-schema-" + apiID + conns.GlobalMutexKV.Lock(mutexKey) + defer conns.GlobalMutexKV.Unlock(mutexKey) + + const ( + timeout = 2 * time.Minute + ) + return tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, timeout, f) +} + +func findResolverByThreePartKey(ctx context.Context, conn *appsync.Client, apiID, typeName, fieldName string) (*awstypes.Resolver, error) { + input := &appsync.GetResolverInput{ + ApiId: aws.String(apiID), + FieldName: aws.String(fieldName), + TypeName: aws.String(typeName), } - m := l[0].(map[string]interface{}) + output, err := conn.GetResolver(ctx, input) - cachingConfig := &appsync.CachingConfig{ - CachingKeys: flex.ExpandStringSet(m["caching_keys"].(*schema.Set)), + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } } - if v, ok := m["ttl"].(int); ok && v != 0 { - cachingConfig.Ttl = aws.Int64(int64(v)) + if err != nil { + return nil, err } - return cachingConfig + if output == nil || output.Resolver == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Resolver, nil } -func expandPipelineConfig(l []interface{}) *appsync.PipelineConfig { - if len(l) < 1 || l[0] == nil { +func expandResolverCachingConfig(tfList []interface{}) *awstypes.CachingConfig { + if len(tfList) < 1 || tfList[0] == nil { return nil } - m := l[0].(map[string]interface{}) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.CachingConfig{ + CachingKeys: flex.ExpandStringValueSet(tfMap["caching_keys"].(*schema.Set)), + } + + if v, ok := tfMap["ttl"].(int); ok && v != 0 { + apiObject.Ttl = int64(v) + } + + return apiObject +} + +func expandPipelineConfig(tfList []interface{}) *awstypes.PipelineConfig { + if len(tfList) < 1 || tfList[0] == nil { + return nil + } - config := &appsync.PipelineConfig{} + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.PipelineConfig{} - if v, ok := m["functions"].([]interface{}); ok && len(v) > 0 { - config.Functions = flex.ExpandStringList(v) + if v, ok := tfMap["functions"].([]interface{}); ok && len(v) > 0 { + apiObject.Functions = flex.ExpandStringValueList(v) } - return config + return apiObject } -func flattenPipelineConfig(c *appsync.PipelineConfig) []interface{} { - if c == nil { +func flattenPipelineConfig(apiObject *awstypes.PipelineConfig) []interface{} { + if apiObject == nil { return nil } - if len(c.Functions) == 0 { + if len(apiObject.Functions) == 0 { return nil } - m := map[string]interface{}{ - "functions": flex.FlattenStringList(c.Functions), + tfMap := map[string]interface{}{ + "functions": apiObject.Functions, } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenCachingConfig(c *appsync.CachingConfig) []interface{} { - if c == nil { +func flattenCachingConfig(apiObject *awstypes.CachingConfig) []interface{} { + if apiObject == nil { return nil } - if len(c.CachingKeys) == 0 && aws.Int64Value(c.Ttl) == 0 { + if len(apiObject.CachingKeys) == 0 && apiObject.Ttl == 0 { return nil } - m := map[string]interface{}{ - "caching_keys": flex.FlattenStringSet(c.CachingKeys), - "ttl": int(aws.Int64Value(c.Ttl)), + tfMap := map[string]interface{}{ + "caching_keys": apiObject.CachingKeys, + "ttl": apiObject.Ttl, } - return []interface{}{m} + return []interface{}{tfMap} } diff --git a/internal/service/appsync/resolver_test.go b/internal/service/appsync/resolver_test.go index 22427f358df..300dcef8cbe 100644 --- a/internal/service/appsync/resolver_test.go +++ b/internal/service/appsync/resolver_test.go @@ -9,26 +9,25 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfappsync "github.com/hashicorp/terraform-provider-aws/internal/service/appsync" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func testAccResolver_basic(t *testing.T) { ctx := acctest.Context(t) - var resolver1 appsync.Resolver + var resolver1 awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -56,12 +55,12 @@ func testAccResolver_basic(t *testing.T) { func testAccResolver_code(t *testing.T) { ctx := acctest.Context(t) - var resolver1 appsync.Resolver + var resolver1 awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -86,12 +85,12 @@ func testAccResolver_code(t *testing.T) { func testAccResolver_syncConfig(t *testing.T) { ctx := acctest.Context(t) - var resolver1 appsync.Resolver + var resolver1 awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -116,14 +115,12 @@ func testAccResolver_syncConfig(t *testing.T) { func testAccResolver_disappears(t *testing.T) { ctx := acctest.Context(t) - var api1 appsync.GraphqlApi - var resolver1 appsync.Resolver + var resolver1 awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) - appsyncGraphqlApiResourceName := "aws_appsync_graphql_api.test" resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -131,7 +128,6 @@ func testAccResolver_disappears(t *testing.T) { { Config: testAccResolverConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckGraphQLAPIExists(ctx, appsyncGraphqlApiResourceName, &api1), testAccCheckResolverExists(ctx, resourceName, &resolver1), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfappsync.ResourceResolver(), resourceName), ), @@ -143,12 +139,12 @@ func testAccResolver_disappears(t *testing.T) { func testAccResolver_dataSource(t *testing.T) { ctx := acctest.Context(t) - var resolver1, resolver2 appsync.Resolver + var resolver1, resolver2 awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -178,12 +174,12 @@ func testAccResolver_dataSource(t *testing.T) { func testAccResolver_DataSource_lambda(t *testing.T) { ctx := acctest.Context(t) - var resolver appsync.Resolver + var resolver awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -206,12 +202,12 @@ func testAccResolver_DataSource_lambda(t *testing.T) { func testAccResolver_requestTemplate(t *testing.T) { ctx := acctest.Context(t) - var resolver1, resolver2 appsync.Resolver + var resolver1, resolver2 awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -241,12 +237,12 @@ func testAccResolver_requestTemplate(t *testing.T) { func testAccResolver_responseTemplate(t *testing.T) { ctx := acctest.Context(t) - var resolver1, resolver2 appsync.Resolver + var resolver1, resolver2 awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -276,12 +272,12 @@ func testAccResolver_responseTemplate(t *testing.T) { func testAccResolver_multipleResolvers(t *testing.T) { ctx := acctest.Context(t) - var resolver appsync.Resolver + var resolver awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -307,12 +303,12 @@ func testAccResolver_multipleResolvers(t *testing.T) { func testAccResolver_pipeline(t *testing.T) { ctx := acctest.Context(t) - var resolver appsync.Resolver + var resolver awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -336,12 +332,12 @@ func testAccResolver_pipeline(t *testing.T) { func testAccResolver_caching(t *testing.T) { ctx := acctest.Context(t) - var resolver appsync.Resolver + var resolver awstypes.Resolver rName := fmt.Sprintf("tfacctest%d", sdkacctest.RandInt()) resourceName := "aws_appsync_resolver.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckResolverDestroy(ctx), @@ -365,69 +361,46 @@ func testAccResolver_caching(t *testing.T) { func testAccCheckResolverDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_resolver" { continue } - apiID, typeName, fieldName, err := tfappsync.DecodeResolverID(rs.Primary.ID) - - if err != nil { - return err - } - - input := &appsync.GetResolverInput{ - ApiId: aws.String(apiID), - TypeName: aws.String(typeName), - FieldName: aws.String(fieldName), - } - - _, err = conn.GetResolverWithContext(ctx, input) + _, err := tfappsync.FindResolverByThreePartKey(ctx, conn, rs.Primary.Attributes["api_id"], rs.Primary.Attributes[names.AttrType], rs.Primary.Attributes[names.AttrField]) - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { + if tfresource.NotFound(err) { continue } if err != nil { return err } + + return fmt.Errorf("Appsync Resolver %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckResolverExists(ctx context.Context, name string, resolver *appsync.Resolver) resource.TestCheckFunc { +func testAccCheckResolverExists(ctx context.Context, n string, v *awstypes.Resolver) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", name) - } - if rs.Primary.ID == "" { - return fmt.Errorf("Resource has no ID: %s", name) - } - - apiID, typeName, fieldName, err := tfappsync.DecodeResolverID(rs.Primary.ID) - - if err != nil { - return err + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) - - input := &appsync.GetResolverInput{ - ApiId: aws.String(apiID), - TypeName: aws.String(typeName), - FieldName: aws.String(fieldName), - } + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) - output, err := conn.GetResolverWithContext(ctx, input) + output, err := tfappsync.FindResolverByThreePartKey(ctx, conn, rs.Primary.Attributes["api_id"], rs.Primary.Attributes[names.AttrType], rs.Primary.Attributes[names.AttrField]) if err != nil { return err } - *resolver = *output.Resolver + *v = *output return nil } @@ -473,7 +446,7 @@ resource "aws_appsync_datasource" "test" { } func testAccResolverConfig_basic(rName string) string { - return testAccResolverConfig_base(rName) + ` + return acctest.ConfigCompose(testAccResolverConfig_base(rName), ` resource "aws_appsync_resolver" "test" { api_id = aws_appsync_graphql_api.test.id field = "singlePost" @@ -499,11 +472,11 @@ EOF #end EOF } -` +`) } func testAccResolverConfig_dataSource(rName string) string { - return testAccResolverConfig_base(rName) + ` + return acctest.ConfigCompose(testAccResolverConfig_base(rName), ` resource "aws_appsync_datasource" "test2" { api_id = aws_appsync_graphql_api.test.id name = "test_ds_2" @@ -539,14 +512,14 @@ EOF #end EOF } -` +`) } func testAccResolverConfig_dataSourceLambda(rName string) string { - return testAccDatasourceConfig_baseLambda(rName) + fmt.Sprintf(` + return acctest.ConfigCompose(testAccDatasourceConfig_baseLambda(rName), fmt.Sprintf(` resource "aws_appsync_graphql_api" "test" { authentication_type = "API_KEY" - name = %q + name = %[1]q schema = < 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets appsync service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates appsync service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn appsynciface.AppSyncAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *appsync.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*appsync.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn appsynciface.AppSyncAPI, identifier st if len(removedTags) > 0 { input := &appsync.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn appsynciface.AppSyncAPI, identifier st Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn appsynciface.AppSyncAPI, identifier st // UpdateTags updates appsync service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).AppSyncConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).AppSyncClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/appsync/type.go b/internal/service/appsync/type.go index 06009b9e942..b208a850a52 100644 --- a/internal/service/appsync/type.go +++ b/internal/service/appsync/type.go @@ -9,25 +9,28 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_appsync_type") -func ResourceType() *schema.Resource { +// @SDKResource("aws_appsync_type", name="Type") +func resourceType() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTypeCreate, ReadWithoutTimeout: resourceTypeRead, UpdateWithoutTimeout: resourceTypeUpdate, DeleteWithoutTimeout: resourceTypeDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -51,9 +54,9 @@ func ResourceType() *schema.Resource { Required: true, }, names.AttrFormat: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(appsync.TypeDefinitionFormat_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.TypeDefinitionFormat](), }, names.AttrName: { Type: schema.TypeString, @@ -65,36 +68,37 @@ func ResourceType() *schema.Resource { func resourceTypeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) apiID := d.Get("api_id").(string) - - params := &appsync.CreateTypeInput{ + input := &appsync.CreateTypeInput{ ApiId: aws.String(apiID), Definition: aws.String(d.Get("definition").(string)), - Format: aws.String(d.Get(names.AttrFormat).(string)), + Format: awstypes.TypeDefinitionFormat(d.Get(names.AttrFormat).(string)), } - out, err := conn.CreateTypeWithContext(ctx, params) + output, err := conn.CreateType(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "creating Appsync Type: %s", err) } - d.SetId(fmt.Sprintf("%s:%s:%s", apiID, aws.StringValue(out.Type.Format), aws.StringValue(out.Type.Name))) + d.SetId(typeCreateResourceID(apiID, output.Type.Format, aws.ToString(output.Type.Name))) return append(diags, resourceTypeRead(ctx, d, meta)...) } func resourceTypeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - apiID, format, name, err := DecodeTypeID(d.Id()) + apiID, format, name, err := typeParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Appsync Type %q: %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - resp, err := FindTypeByThreePartKey(ctx, conn, apiID, format, name) + resp, err := findTypeByThreePartKey(ctx, conn, apiID, format, name) + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] AppSync Type (%s) not found, removing from state", d.Id()) d.SetId("") @@ -117,18 +121,24 @@ func resourceTypeRead(ctx context.Context, d *schema.ResourceData, meta interfac func resourceTypeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - params := &appsync.UpdateTypeInput{ - ApiId: aws.String(d.Get("api_id").(string)), - Format: aws.String(d.Get(names.AttrFormat).(string)), - TypeName: aws.String(d.Get(names.AttrName).(string)), + apiID, format, name, err := typeParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + input := &appsync.UpdateTypeInput{ + ApiId: aws.String(apiID), Definition: aws.String(d.Get("definition").(string)), + Format: format, + TypeName: aws.String(name), } - _, err := conn.UpdateTypeWithContext(ctx, params) + _, err = conn.UpdateType(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Appsync Type %q: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating Appsync Type (%s): %s", d.Id(), err) } return append(diags, resourceTypeRead(ctx, d, meta)...) @@ -136,27 +146,72 @@ func resourceTypeUpdate(ctx context.Context, d *schema.ResourceData, meta interf func resourceTypeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).AppSyncConn(ctx) + conn := meta.(*conns.AWSClient).AppSyncClient(ctx) - input := &appsync.DeleteTypeInput{ - ApiId: aws.String(d.Get("api_id").(string)), - TypeName: aws.String(d.Get(names.AttrName).(string)), + apiID, _, name, err := typeParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) } - _, err := conn.DeleteTypeWithContext(ctx, input) + + log.Printf("[INFO] Deleting Appsync Type: %s", d.Id()) + _, err = conn.DeleteType(ctx, &appsync.DeleteTypeInput{ + ApiId: aws.String(apiID), + TypeName: aws.String(name), + }) + + if errs.IsA[*awstypes.NotFoundException](err) { + return diags + } + if err != nil { - if tfawserr.ErrCodeEquals(err, appsync.ErrCodeNotFoundException) { - return diags - } - return sdkdiag.AppendErrorf(diags, "deleting Appsync Type: %s", err) + return sdkdiag.AppendErrorf(diags, "deleting Appsync Type (%s): %s", d.Id(), err) } return diags } -func DecodeTypeID(id string) (string, string, string, error) { - parts := strings.Split(id, ":") - if len(parts) != 3 { - return "", "", "", fmt.Errorf("Unexpected format of ID (%q), expected API-ID:FORMAT:TYPE-NAME", id) +const typeResourceIDSeparator = ":" + +func typeCreateResourceID(apiID string, format awstypes.TypeDefinitionFormat, name string) string { + parts := []string{apiID, string(format), name} // nosemgrep:ci.typed-enum-conversion + id := strings.Join(parts, typeResourceIDSeparator) + + return id +} + +func typeParseResourceID(id string) (string, awstypes.TypeDefinitionFormat, string, error) { + parts := strings.Split(id, typeResourceIDSeparator) + + if len(parts) != 3 || parts[0] == "" || parts[1] == "" || parts[2] == "" { + return "", "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected API-ID%[2]sFORMAT%[2]sTYPE-NAME", id, typeResourceIDSeparator) + } + + return parts[0], awstypes.TypeDefinitionFormat(parts[1]), parts[2], nil +} + +func findTypeByThreePartKey(ctx context.Context, conn *appsync.Client, apiID string, format awstypes.TypeDefinitionFormat, name string) (*awstypes.Type, error) { + input := &appsync.GetTypeInput{ + ApiId: aws.String(apiID), + Format: format, + TypeName: aws.String(name), + } + + output, err := conn.GetType(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } } - return parts[0], parts[1], parts[2], nil + + if err != nil { + return nil, err + } + + if output == nil || output.Type == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Type, nil } diff --git a/internal/service/appsync/type_test.go b/internal/service/appsync/type_test.go index e4cf8ab4ab6..4decf95f45d 100644 --- a/internal/service/appsync/type_test.go +++ b/internal/service/appsync/type_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/appsync" + awstypes "github.com/aws/aws-sdk-go-v2/service/appsync/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,12 +22,12 @@ import ( func testAccType_basic(t *testing.T) { ctx := acctest.Context(t) - var typ appsync.Type + var typ awstypes.Type resourceName := "aws_appsync_type.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckTypeDestroy(ctx), @@ -53,12 +53,12 @@ func testAccType_basic(t *testing.T) { func testAccType_disappears(t *testing.T) { ctx := acctest.Context(t) - var typ appsync.Type + var typ awstypes.Type resourceName := "aws_appsync_type.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, appsync.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.AppSyncEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.AppSyncServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckTypeDestroy(ctx), @@ -77,51 +77,46 @@ func testAccType_disappears(t *testing.T) { func testAccCheckTypeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_appsync_type" { continue } - apiID, format, name, err := tfappsync.DecodeTypeID(rs.Primary.ID) - if err != nil { - return err + _, err := tfappsync.FindTypeByThreePartKey(ctx, conn, rs.Primary.Attributes["api_id"], awstypes.TypeDefinitionFormat(rs.Primary.Attributes[names.AttrFormat]), rs.Primary.Attributes[names.AttrName]) + + if tfresource.NotFound(err) { + continue } - _, err = tfappsync.FindTypeByThreePartKey(ctx, conn, apiID, format, name) - if err == nil { - if tfresource.NotFound(err) { - return nil - } + if err != nil { return err } - return nil + return fmt.Errorf("Appsync Type %s still exists", rs.Primary.ID) } + return nil } } -func testAccCheckTypeExists(ctx context.Context, resourceName string, typ *appsync.Type) resource.TestCheckFunc { +func testAccCheckTypeExists(ctx context.Context, n string, v *awstypes.Type) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Appsync Type Not found in state: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - apiID, format, name, err := tfappsync.DecodeTypeID(rs.Primary.ID) - if err != nil { - return err - } + conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncClient(ctx) + + output, err := tfappsync.FindTypeByThreePartKey(ctx, conn, rs.Primary.Attributes["api_id"], awstypes.TypeDefinitionFormat(rs.Primary.Attributes[names.AttrFormat]), rs.Primary.Attributes[names.AttrName]) - conn := acctest.Provider.Meta().(*conns.AWSClient).AppSyncConn(ctx) - out, err := tfappsync.FindTypeByThreePartKey(ctx, conn, apiID, format, name) if err != nil { return err } - *typ = *out + *v = *output return nil } diff --git a/internal/service/appsync/wait.go b/internal/service/appsync/wait.go deleted file mode 100644 index 59bd7cb9c48..00000000000 --- a/internal/service/appsync/wait.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package appsync - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go/service/appsync" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -const ( - apiCacheAvailableTimeout = 60 * time.Minute - apiCacheDeletedTimeout = 60 * time.Minute - domainNameAPIAssociationTimeout = 60 * time.Minute - domainNameAPIDisassociationTimeout = 60 * time.Minute -) - -func waitAPICacheAvailable(ctx context.Context, conn *appsync.AppSync, id string) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{appsync.ApiCacheStatusCreating, appsync.ApiCacheStatusModifying}, - Target: []string{appsync.ApiCacheStatusAvailable}, - Refresh: StatusAPICache(ctx, conn, id), - Timeout: apiCacheAvailableTimeout, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} - -func waitAPICacheDeleted(ctx context.Context, conn *appsync.AppSync, id string) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{appsync.ApiCacheStatusDeleting}, - Target: []string{}, - Refresh: StatusAPICache(ctx, conn, id), - Timeout: apiCacheDeletedTimeout, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} - -func waitDomainNameAPIAssociation(ctx context.Context, conn *appsync.AppSync, id string) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{appsync.AssociationStatusProcessing}, - Target: []string{appsync.AssociationStatusSuccess}, - Refresh: statusDomainNameAPIAssociation(ctx, conn, id), - Timeout: domainNameAPIAssociationTimeout, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} - -func waitDomainNameAPIDisassociation(ctx context.Context, conn *appsync.AppSync, id string) error { - stateConf := &retry.StateChangeConf{ - Pending: []string{appsync.AssociationStatusProcessing}, - Target: []string{}, - Refresh: statusDomainNameAPIAssociation(ctx, conn, id), - Timeout: domainNameAPIDisassociationTimeout, - } - - _, err := stateConf.WaitForStateContext(ctx) - - return err -} diff --git a/internal/service/athena/service_endpoint_resolver_gen.go b/internal/service/athena/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..6bb495fdaae --- /dev/null +++ b/internal/service/athena/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package athena + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + athena_sdkv2 "github.com/aws/aws-sdk-go-v2/service/athena" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ athena_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver athena_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: athena_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params athena_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up athena endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*athena_sdkv2.Options) { + return func(o *athena_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/athena/service_endpoints_gen_test.go b/internal/service/athena/service_endpoints_gen_test.go index 620d6674300..7777c256879 100644 --- a/internal/service/athena/service_endpoints_gen_test.go +++ b/internal/service/athena/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := athena_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), athena_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := athena_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), athena_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/athena/service_package_gen.go b/internal/service/athena/service_package_gen.go index 34edfc2f4ae..c35211a04fa 100644 --- a/internal/service/athena/service_package_gen.go +++ b/internal/service/athena/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package athena @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" athena_sdkv2 "github.com/aws/aws-sdk-go-v2/service/athena" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -74,19 +73,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*athena_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return athena_sdkv2.NewFromConfig(cfg, func(o *athena_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return athena_sdkv2.NewFromConfig(cfg, + athena_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/auditmanager/service_endpoint_resolver_gen.go b/internal/service/auditmanager/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..5694d8436b9 --- /dev/null +++ b/internal/service/auditmanager/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package auditmanager + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + auditmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/auditmanager" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ auditmanager_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver auditmanager_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: auditmanager_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params auditmanager_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up auditmanager endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*auditmanager_sdkv2.Options) { + return func(o *auditmanager_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/auditmanager/service_endpoints_gen_test.go b/internal/service/auditmanager/service_endpoints_gen_test.go index 6d2d605dd65..18afec6a538 100644 --- a/internal/service/auditmanager/service_endpoints_gen_test.go +++ b/internal/service/auditmanager/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := auditmanager_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), auditmanager_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := auditmanager_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), auditmanager_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/auditmanager/service_package_gen.go b/internal/service/auditmanager/service_package_gen.go index 3e80724c8fb..99ead68abd5 100644 --- a/internal/service/auditmanager/service_package_gen.go +++ b/internal/service/auditmanager/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package auditmanager @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" auditmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/auditmanager" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -83,19 +82,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*auditmanager_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return auditmanager_sdkv2.NewFromConfig(cfg, func(o *auditmanager_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return auditmanager_sdkv2.NewFromConfig(cfg, + auditmanager_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/autoscaling/consts.go b/internal/service/autoscaling/consts.go index 3edf0cf99c7..c61a28eb52e 100644 --- a/internal/service/autoscaling/consts.go +++ b/internal/service/autoscaling/consts.go @@ -121,3 +121,7 @@ func (lifecycleHookLifecycleTransition) Values() []lifecycleHookLifecycleTransit lifecycleHookLifecycleTransitionInstanceTerminating, } } + +const ( + elbInstanceStateInService = "InService" +) diff --git a/internal/service/autoscaling/group.go b/internal/service/autoscaling/group.go index f209abfecc9..bc5712684f6 100644 --- a/internal/service/autoscaling/group.go +++ b/internal/service/autoscaling/group.go @@ -16,8 +16,9 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/autoscaling" awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + elasticloadbalancingv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -33,7 +34,6 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" - tfelb "github.com/hashicorp/terraform-provider-aws/internal/service/elb" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -614,6 +614,11 @@ func resourceGroup() *schema.Resource { ValidateDiagFunc: enum.Validate[awstypes.LocalStorageType](), }, }, + "max_spot_price_as_percentage_of_optimal_on_demand_price": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, "memory_gib_per_vcpu": { Type: schema.TypeList, Optional: true, @@ -918,14 +923,16 @@ func resourceGroup() *schema.Resource { }, }, "max_group_prepared_capacity": { - Type: schema.TypeInt, - Optional: true, - Default: defaultWarmPoolMaxGroupPreparedCapacity, + Type: schema.TypeInt, + Optional: true, + Default: defaultWarmPoolMaxGroupPreparedCapacity, + ValidateFunc: validation.IntAtLeast(defaultWarmPoolMaxGroupPreparedCapacity), }, "min_size": { - Type: schema.TypeInt, - Optional: true, - Default: 0, + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntAtLeast(0), }, "pool_state": { Type: schema.TypeString, @@ -1204,7 +1211,7 @@ func resourceGroupCreate(ctx context.Context, d *schema.ResourceData, meta inter return nil } - if err := waitGroupCapacitySatisfied(ctx, conn, meta.(*conns.AWSClient).ELBConn(ctx), meta.(*conns.AWSClient).ELBV2Conn(ctx), d.Id(), f, startTime, d.Get("ignore_failed_scaling_activities").(bool), timeout); err != nil { + if err := waitGroupCapacitySatisfied(ctx, conn, meta.(*conns.AWSClient).ELBClient(ctx), meta.(*conns.AWSClient).ELBV2Client(ctx), d.Id(), f, startTime, d.Get("ignore_failed_scaling_activities").(bool), timeout); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Auto Scaling Group (%s) capacity satisfied: %s", d.Id(), err) } } @@ -1701,7 +1708,7 @@ func resourceGroupUpdate(ctx context.Context, d *schema.ResourceData, meta inter return nil } - if err := waitGroupCapacitySatisfied(ctx, conn, meta.(*conns.AWSClient).ELBConn(ctx), meta.(*conns.AWSClient).ELBV2Conn(ctx), d.Id(), f, startTime, d.Get("ignore_failed_scaling_activities").(bool), timeout); err != nil { + if err := waitGroupCapacitySatisfied(ctx, conn, meta.(*conns.AWSClient).ELBClient(ctx), meta.(*conns.AWSClient).ELBV2Client(ctx), d.Id(), f, startTime, d.Get("ignore_failed_scaling_activities").(bool), timeout); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Auto Scaling Group (%s) capacity satisfied: %s", d.Id(), err) } } @@ -1958,16 +1965,16 @@ func drainWarmPool(ctx context.Context, conn *autoscaling.Client, name string, t return nil } -func findELBInstanceStates(ctx context.Context, conn *elb.ELB, g *awstypes.AutoScalingGroup) (map[string]map[string]string, error) { +func findELBInstanceStates(ctx context.Context, conn *elasticloadbalancing.Client, g *awstypes.AutoScalingGroup) (map[string]map[string]string, error) { instanceStates := make(map[string]map[string]string) for _, v := range g.LoadBalancerNames { lbName := v - input := &elb.DescribeInstanceHealthInput{ + input := &elasticloadbalancing.DescribeInstanceHealthInput{ LoadBalancerName: aws.String(lbName), } - output, err := conn.DescribeInstanceHealthWithContext(ctx, input) + output, err := conn.DescribeInstanceHealth(ctx, input) if err != nil { return nil, fmt.Errorf("reading load balancer (%s) instance health: %w", lbName, err) @@ -1992,16 +1999,16 @@ func findELBInstanceStates(ctx context.Context, conn *elb.ELB, g *awstypes.AutoS return instanceStates, nil } -func findELBV2InstanceStates(ctx context.Context, conn *elbv2.ELBV2, g *awstypes.AutoScalingGroup) (map[string]map[string]string, error) { +func findELBV2InstanceStates(ctx context.Context, conn *elasticloadbalancingv2.Client, g *awstypes.AutoScalingGroup) (map[string]map[string]string, error) { instanceStates := make(map[string]map[string]string) for _, v := range g.TargetGroupARNs { targetGroupARN := v - input := &elbv2.DescribeTargetHealthInput{ + input := &elasticloadbalancingv2.DescribeTargetHealthInput{ TargetGroupArn: aws.String(targetGroupARN), } - output, err := conn.DescribeTargetHealthWithContext(ctx, input) + output, err := conn.DescribeTargetHealth(ctx, input) if err != nil { return nil, fmt.Errorf("reading target group (%s) instance health: %w", targetGroupARN, err) @@ -2018,7 +2025,7 @@ func findELBV2InstanceStates(ctx context.Context, conn *elbv2.ELBV2, g *awstypes if instanceID == "" { continue } - state := aws.ToString(v.TargetHealth.State) + state := string(v.TargetHealth.State) if state == "" { continue } @@ -2277,7 +2284,7 @@ func findWarmPoolByName(ctx context.Context, conn *autoscaling.Client, name stri return findWarmPool(ctx, conn, input) } -func statusGroupCapacity(ctx context.Context, conn *autoscaling.Client, elbconn *elb.ELB, elbv2conn *elbv2.ELBV2, name string, cb func(int, int) error, startTime time.Time, ignoreFailedScalingActivities bool) retry.StateRefreshFunc { +func statusGroupCapacity(ctx context.Context, conn *autoscaling.Client, elbconn *elasticloadbalancing.Client, elbv2conn *elasticloadbalancingv2.Client, name string, cb func(int, int) error, startTime time.Time, ignoreFailedScalingActivities bool) retry.StateRefreshFunc { return func() (interface{}, string, error) { if !ignoreFailedScalingActivities { // Check for fatal error in activity logs. @@ -2352,7 +2359,7 @@ func statusGroupCapacity(ctx context.Context, conn *autoscaling.Client, elbconn inAll := true for _, v := range lbInstanceStates { - if state, ok := v[instanceID]; ok && state != tfelb.InstanceStateInService { + if state, ok := v[instanceID]; ok && state != elbInstanceStateInService { inAll = false break } @@ -2360,7 +2367,7 @@ func statusGroupCapacity(ctx context.Context, conn *autoscaling.Client, elbconn if inAll { for _, v := range targetGroupInstanceStates { - if state, ok := v[instanceID]; ok && state != elbv2.TargetHealthStateEnumHealthy { + if state, ok := v[instanceID]; ok && state != string(elasticloadbalancingv2types.TargetHealthStateEnumHealthy) { inAll = false break } @@ -2532,7 +2539,7 @@ func statusWarmPoolInstanceCount(ctx context.Context, conn *autoscaling.Client, } } -func waitGroupCapacitySatisfied(ctx context.Context, conn *autoscaling.Client, elbconn *elb.ELB, elbv2conn *elbv2.ELBV2, name string, cb func(int, int) error, startTime time.Time, ignoreFailedScalingActivities bool, timeout time.Duration) error { +func waitGroupCapacitySatisfied(ctx context.Context, conn *autoscaling.Client, elbconn *elasticloadbalancing.Client, elbv2conn *elasticloadbalancingv2.Client, name string, cb func(int, int) error, startTime time.Time, ignoreFailedScalingActivities bool, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ Target: []string{"ok"}, Refresh: statusGroupCapacity(ctx, conn, elbconn, elbv2conn, name, cb, startTime, ignoreFailedScalingActivities), @@ -2886,6 +2893,10 @@ func expandInstanceRequirements(tfMap map[string]interface{}) *awstypes.Instance apiObject.LocalStorageTypes = flex.ExpandStringyValueSet[awstypes.LocalStorageType](v) } + if v, ok := tfMap["max_spot_price_as_percentage_of_optimal_on_demand_price"].(int); ok && v != 0 { + apiObject.MaxSpotPriceAsPercentageOfOptimalOnDemandPrice = aws.Int32(int32(v)) + } + if v, ok := tfMap["memory_gib_per_vcpu"].([]interface{}); ok && len(v) > 0 { apiObject.MemoryGiBPerVCpu = expandMemoryGiBPerVCPURequest(v[0].(map[string]interface{})) } @@ -3255,7 +3266,7 @@ func expandPutWarmPoolInput(name string, tfMap map[string]interface{}) *autoscal apiObject.InstanceReusePolicy = expandInstanceReusePolicy(v[0].(map[string]interface{})) } - if v, ok := tfMap["max_group_prepared_capacity"].(int); ok && v != 0 { + if v, ok := tfMap["max_group_prepared_capacity"].(int); ok { apiObject.MaxGroupPreparedCapacity = aws.Int32(int32(v)) } @@ -3674,6 +3685,10 @@ func flattenInstanceRequirements(apiObject *awstypes.InstanceRequirements) map[s tfMap["local_storage_types"] = apiObject.LocalStorageTypes } + if v := apiObject.MaxSpotPriceAsPercentageOfOptimalOnDemandPrice; v != nil { + tfMap["max_spot_price_as_percentage_of_optimal_on_demand_price"] = aws.ToInt32(v) + } + if v := apiObject.MemoryGiBPerVCpu; v != nil { tfMap["memory_gib_per_vcpu"] = []interface{}{flattenMemoryGiBPerVCPU(v)} } diff --git a/internal/service/autoscaling/group_data_source.go b/internal/service/autoscaling/group_data_source.go index 74f7fe0d87f..fdf8754b2a9 100644 --- a/internal/service/autoscaling/group_data_source.go +++ b/internal/service/autoscaling/group_data_source.go @@ -289,6 +289,10 @@ func dataSourceGroup() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "max_spot_price_as_percentage_of_optimal_on_demand_price": { + Type: schema.TypeInt, + Computed: true, + }, "memory_gib_per_vcpu": { Type: schema.TypeList, Computed: true, diff --git a/internal/service/autoscaling/group_test.go b/internal/service/autoscaling/group_test.go index c0a2a04918f..1ddc98410a1 100644 --- a/internal/service/autoscaling/group_test.go +++ b/internal/service/autoscaling/group_test.go @@ -13,7 +13,8 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/autoscaling" awstypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + elasticloadbalancingv2types "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" @@ -21,7 +22,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfautoscaling "github.com/hashicorp/terraform-provider-aws/internal/service/autoscaling" - tfelbv2 "github.com/hashicorp/terraform-provider-aws/internal/service/elbv2" + tfelasticloadbalancingv2 "github.com/hashicorp/terraform-provider-aws/internal/service/elbv2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -1810,7 +1811,7 @@ func TestAccAutoScalingGroup_ALBTargetGroups_elbCapacity(t *testing.T) { resourceName := "aws_autoscaling_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) subnetCount := 2 - var tg elbv2.TargetGroup + var tg elasticloadbalancingv2types.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -1874,6 +1875,18 @@ func TestAccAutoScalingGroup_warmPool(t *testing.T) { resource.TestCheckNoResourceAttr(resourceName, "warm_pool.#"), ), }, + { + Config: testAccGroupConfig_warmPoolZero(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckGroupExists(ctx, resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "warm_pool.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "warm_pool.0.instance_reuse_policy.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "warm_pool.0.instance_reuse_policy.0.reuse_on_scale_in", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "warm_pool.0.max_group_prepared_capacity", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "warm_pool.0.min_size", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "warm_pool.0.pool_state", "Stopped"), + ), + }, }, }) } @@ -3486,6 +3499,42 @@ func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instance }) } +func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_maxSpotPriceAsPercentageOfOptimalOnDemandPrice(t *testing.T) { + ctx := acctest.Context(t) + var group awstypes.AutoScalingGroup + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_autoscaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.AutoScalingServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGroupConfig_mixedInstancesPolicyLaunchTemplateOverrideInstanceRequirements(rName, + `max_spot_price_as_percentage_of_optimal_on_demand_price = 75 + memory_mib { + min = 500 + } + vcpu_count { + min = 1 + }`), + Check: resource.ComposeTestCheckFunc( + testAccCheckGroupExists(ctx, resourceName, &group), + resource.TestCheckResourceAttr(resourceName, "mixed_instances_policy.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "mixed_instances_policy.0.launch_template.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "mixed_instances_policy.0.launch_template.0.override.#", acctest.Ct1), + + resource.TestCheckResourceAttr(resourceName, "mixed_instances_policy.0.launch_template.0.override.0.instance_requirements.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "mixed_instances_policy.0.launch_template.0.override.0.instance_requirements.0.max_spot_price_as_percentage_of_optimal_on_demand_price", "75"), + ), + }, + testAccGroupImportStep(resourceName), + }, + }) +} + func TestAccAutoScalingGroup_MixedInstancesPolicyLaunchTemplateOverride_instanceRequirements_memoryGiBPerVCPU(t *testing.T) { ctx := acctest.Context(t) var group awstypes.AutoScalingGroup @@ -4135,16 +4184,16 @@ func testAccCheckInstanceRefreshStatus(ctx context.Context, v *awstypes.AutoScal } } -func testAccCheckLBTargetGroupExists(ctx context.Context, n string, v *elbv2.TargetGroup) resource.TestCheckFunc { +func testAccCheckLBTargetGroupExists(ctx context.Context, n string, v *elasticloadbalancingv2types.TargetGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) - output, err := tfelbv2.FindTargetGroupByARN(ctx, conn, rs.Primary.ID) + output, err := tfelasticloadbalancingv2.FindTargetGroupByARN(ctx, conn, rs.Primary.ID) if err != nil { return err @@ -4156,13 +4205,13 @@ func testAccCheckLBTargetGroupExists(ctx context.Context, n string, v *elbv2.Tar } } -// testAccCheckALBTargetGroupHealthy checks an *elbv2.TargetGroup to make +// testAccCheckALBTargetGroupHealthy checks an *awstypes.TargetGroup to make // sure that all instances in it are healthy. -func testAccCheckALBTargetGroupHealthy(ctx context.Context, v *elbv2.TargetGroup) resource.TestCheckFunc { +func testAccCheckALBTargetGroupHealthy(ctx context.Context, v *elasticloadbalancingv2types.TargetGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) - output, err := conn.DescribeTargetHealthWithContext(ctx, &elbv2.DescribeTargetHealthInput{ + output, err := conn.DescribeTargetHealth(ctx, &elasticloadbalancingv2.DescribeTargetHealthInput{ TargetGroupArn: v.TargetGroupArn, }) @@ -4171,7 +4220,7 @@ func testAccCheckALBTargetGroupHealthy(ctx context.Context, v *elbv2.TargetGroup } for _, v := range output.TargetHealthDescriptions { - if v.TargetHealth == nil || aws.ToString(v.TargetHealth.State) != elbv2.TargetHealthStateEnumHealthy { + if v.TargetHealth == nil || v.TargetHealth.State != elasticloadbalancingv2types.TargetHealthStateEnumHealthy { return errors.New("Not all instances in target group are healthy yet, but should be") } } @@ -4697,7 +4746,7 @@ resource "aws_autoscaling_group" "test" { traffic_source { identifier = aws_lb_target_group.test.arn - type = "elbv2" + type = "elasticloadbalancingv2" } tag { @@ -4743,7 +4792,7 @@ resource "aws_autoscaling_group" "test" { for_each = aws_lb_target_group.test[*] content { identifier = traffic_source.value.arn - type = "elbv2" + type = "elasticloadbalancingv2" } } } @@ -5742,6 +5791,34 @@ resource "aws_autoscaling_group" "test" { `, rName)) } +func testAccGroupConfig_warmPoolZero(rName string) string { + return acctest.ConfigCompose(testAccGroupConfig_launchConfigurationBase(rName, "t3.nano"), fmt.Sprintf(` +resource "aws_autoscaling_group" "test" { + availability_zones = [data.aws_availability_zones.available.names[0]] + max_size = 5 + min_size = 1 + desired_capacity = 1 + name = %[1]q + launch_configuration = aws_launch_configuration.test.name + + warm_pool { + pool_state = "Stopped" + min_size = 0 + max_group_prepared_capacity = 0 + instance_reuse_policy { + reuse_on_scale_in = true + } + } + + tag { + key = "Name" + value = %[1]q + propagate_at_launch = true + } +} +`, rName)) +} + func testAccGroupConfig_warmPoolNone(rName string) string { return acctest.ConfigCompose(testAccGroupConfig_launchConfigurationBase(rName, "t3.nano"), fmt.Sprintf(` resource "aws_autoscaling_group" "test" { diff --git a/internal/service/autoscaling/service_endpoint_resolver_gen.go b/internal/service/autoscaling/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..7957c8ce8dd --- /dev/null +++ b/internal/service/autoscaling/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package autoscaling + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + autoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscaling" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ autoscaling_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver autoscaling_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: autoscaling_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params autoscaling_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up autoscaling endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*autoscaling_sdkv2.Options) { + return func(o *autoscaling_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/autoscaling/service_endpoints_gen_test.go b/internal/service/autoscaling/service_endpoints_gen_test.go index c6ebc02ddd1..790a7bc829b 100644 --- a/internal/service/autoscaling/service_endpoints_gen_test.go +++ b/internal/service/autoscaling/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := autoscaling_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), autoscaling_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := autoscaling_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), autoscaling_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/autoscaling/service_package_gen.go b/internal/service/autoscaling/service_package_gen.go index 86dc639da63..3eca5059adb 100644 --- a/internal/service/autoscaling/service_package_gen.go +++ b/internal/service/autoscaling/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package autoscaling @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" autoscaling_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscaling" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -101,19 +100,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*autoscaling_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return autoscaling_sdkv2.NewFromConfig(cfg, func(o *autoscaling_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return autoscaling_sdkv2.NewFromConfig(cfg, + autoscaling_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/autoscalingplans/service_endpoint_resolver_gen.go b/internal/service/autoscalingplans/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..8c35b996615 --- /dev/null +++ b/internal/service/autoscalingplans/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package autoscalingplans + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + autoscalingplans_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscalingplans" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ autoscalingplans_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver autoscalingplans_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: autoscalingplans_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params autoscalingplans_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up autoscalingplans endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*autoscalingplans_sdkv2.Options) { + return func(o *autoscalingplans_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/autoscalingplans/service_endpoints_gen_test.go b/internal/service/autoscalingplans/service_endpoints_gen_test.go index 136e923b352..31e88ea87c1 100644 --- a/internal/service/autoscalingplans/service_endpoints_gen_test.go +++ b/internal/service/autoscalingplans/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := autoscalingplans_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), autoscalingplans_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := autoscalingplans_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), autoscalingplans_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/autoscalingplans/service_package_gen.go b/internal/service/autoscalingplans/service_package_gen.go index aa09439d7c0..f7153419f7c 100644 --- a/internal/service/autoscalingplans/service_package_gen.go +++ b/internal/service/autoscalingplans/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package autoscalingplans @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" autoscalingplans_sdkv2 "github.com/aws/aws-sdk-go-v2/service/autoscalingplans" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -44,19 +43,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*autoscalingplans_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return autoscalingplans_sdkv2.NewFromConfig(cfg, func(o *autoscalingplans_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return autoscalingplans_sdkv2.NewFromConfig(cfg, + autoscalingplans_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/backup/exports_test.go b/internal/service/backup/exports_test.go new file mode 100644 index 00000000000..4466e1cf367 --- /dev/null +++ b/internal/service/backup/exports_test.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package backup + +// Exports for use in tests only. +var ( + FindVaultAccessPolicyByName = findVaultAccessPolicyByName + FindVaultByName = findVaultByName +) diff --git a/internal/service/backup/find.go b/internal/service/backup/find.go index 81a2aa0304a..cdee7f09329 100644 --- a/internal/service/backup/find.go +++ b/internal/service/backup/find.go @@ -6,21 +6,23 @@ package backup import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func FindJobByID(ctx context.Context, conn *backup.Backup, id string) (*backup.DescribeBackupJobOutput, error) { +func findJobByID(ctx context.Context, conn *backup.Client, id string) (*backup.DescribeBackupJobOutput, error) { input := &backup.DescribeBackupJobInput{ BackupJobId: aws.String(id), } - output, err := conn.DescribeBackupJobWithContext(ctx, input) + output, err := conn.DescribeBackupJob(ctx, input) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -38,15 +40,15 @@ func FindJobByID(ctx context.Context, conn *backup.Backup, id string) (*backup.D return output, nil } -func FindRecoveryPointByTwoPartKey(ctx context.Context, conn *backup.Backup, backupVaultName, recoveryPointARN string) (*backup.DescribeRecoveryPointOutput, error) { +func findRecoveryPointByTwoPartKey(ctx context.Context, conn *backup.Client, backupVaultName, recoveryPointARN string) (*backup.DescribeRecoveryPointOutput, error) { input := &backup.DescribeRecoveryPointInput{ BackupVaultName: aws.String(backupVaultName), RecoveryPointArn: aws.String(recoveryPointARN), } - output, err := conn.DescribeRecoveryPointWithContext(ctx, input) + output, err := conn.DescribeRecoveryPoint(ctx, input) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException, errCodeAccessDeniedException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -64,14 +66,14 @@ func FindRecoveryPointByTwoPartKey(ctx context.Context, conn *backup.Backup, bac return output, nil } -func FindVaultAccessPolicyByName(ctx context.Context, conn *backup.Backup, name string) (*backup.GetBackupVaultAccessPolicyOutput, error) { +func findVaultAccessPolicyByName(ctx context.Context, conn *backup.Client, name string) (*backup.GetBackupVaultAccessPolicyOutput, error) { input := &backup.GetBackupVaultAccessPolicyInput{ BackupVaultName: aws.String(name), } - output, err := conn.GetBackupVaultAccessPolicyWithContext(ctx, input) + output, err := conn.GetBackupVaultAccessPolicy(ctx, input) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException, errCodeAccessDeniedException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) || tfawserr.ErrCodeEquals(err, errCodeAccessDeniedException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -89,14 +91,14 @@ func FindVaultAccessPolicyByName(ctx context.Context, conn *backup.Backup, name return output, nil } -func FindVaultByName(ctx context.Context, conn *backup.Backup, name string) (*backup.DescribeBackupVaultOutput, error) { +func findVaultByName(ctx context.Context, conn *backup.Client, name string) (*backup.DescribeBackupVaultOutput, error) { input := &backup.DescribeBackupVaultInput{ BackupVaultName: aws.String(name), } - output, err := conn.DescribeBackupVaultWithContext(ctx, input) + output, err := conn.DescribeBackupVault(ctx, input) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException, errCodeAccessDeniedException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) || tfawserr.ErrCodeEquals(err, errCodeAccessDeniedException) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -113,3 +115,24 @@ func FindVaultByName(ctx context.Context, conn *backup.Backup, name string) (*ba return output, nil } + +func findFrameworkByName(ctx context.Context, conn *backup.Client, name string) (*backup.DescribeFrameworkOutput, error) { + input := &backup.DescribeFrameworkInput{ + FrameworkName: aws.String(name), + } + + output, err := conn.DescribeFramework(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} diff --git a/internal/service/backup/framework.go b/internal/service/backup/framework.go index 6bb3f2e7545..c9b69a059e9 100644 --- a/internal/service/backup/framework.go +++ b/internal/service/backup/framework.go @@ -8,14 +8,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -36,9 +37,9 @@ func ResourceFramework() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(2 * time.Minute), - Update: schema.DefaultTimeout(2 * time.Minute), - Delete: schema.DefaultTimeout(2 * time.Minute), + Create: schema.DefaultTimeout(3 * time.Minute), + Update: schema.DefaultTimeout(3 * time.Minute), + Delete: schema.DefaultTimeout(3 * time.Minute), }, Schema: map[string]*schema.Schema{ names.AttrARN: { @@ -85,6 +86,7 @@ func ResourceFramework() *schema.Resource { "compliance_resource_ids": { Type: schema.TypeSet, Optional: true, + Computed: true, MinItems: 1, MaxItems: 100, Elem: &schema.Schema{ @@ -94,6 +96,7 @@ func ResourceFramework() *schema.Resource { "compliance_resource_types": { Type: schema.TypeSet, Optional: true, + Computed: true, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -139,7 +142,7 @@ func ResourceFramework() *schema.Resource { func resourceFrameworkCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) name := d.Get(names.AttrName).(string) input := &backup.CreateFrameworkInput{ @@ -153,13 +156,13 @@ func resourceFrameworkCreate(ctx context.Context, d *schema.ResourceData, meta i input.FrameworkDescription = aws.String(v.(string)) } - resp, err := conn.CreateFrameworkWithContext(ctx, input) + resp, err := conn.CreateFramework(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Backup Framework: %s", err) } // Set ID with the name since the name is unique for the framework - d.SetId(aws.StringValue(resp.FrameworkName)) + d.SetId(aws.ToString(resp.FrameworkName)) // waiter since the status changes from CREATE_IN_PROGRESS to either COMPLETED or FAILED if _, err := waitFrameworkCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -171,13 +174,11 @@ func resourceFrameworkCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceFrameworkRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) - resp, err := conn.DescribeFrameworkWithContext(ctx, &backup.DescribeFrameworkInput{ - FrameworkName: aws.String(d.Id()), - }) + resp, err := findFrameworkByName(ctx, conn, d.Id()) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Backup Framework (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -205,7 +206,7 @@ func resourceFrameworkRead(ctx context.Context, d *schema.ResourceData, meta int func resourceFrameworkUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) if d.HasChanges(names.AttrDescription, "control") { input := &backup.UpdateFrameworkInput{ @@ -217,9 +218,9 @@ func resourceFrameworkUpdate(ctx context.Context, d *schema.ResourceData, meta i log.Printf("[DEBUG] Updating Backup Framework: %#v", input) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.UpdateFrameworkWithContext(ctx, input) - }, backup.ErrCodeConflictException) + _, err := tfresource.RetryWhenIsA[*awstypes.ConflictException](ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.UpdateFramework(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Backup Framework (%s): %s", d.Id(), err) @@ -235,15 +236,19 @@ func resourceFrameworkUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceFrameworkDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.DeleteFrameworkInput{ FrameworkName: aws.String(d.Id()), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutDelete), func() (interface{}, error) { - return conn.DeleteFrameworkWithContext(ctx, input) - }, backup.ErrCodeConflictException) + _, err := tfresource.RetryWhenIsA[*awstypes.ConflictException](ctx, d.Timeout(schema.TimeoutDelete), func() (interface{}, error) { + return conn.DeleteFramework(ctx, input) + }) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return diags + } if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Backup Framework (%s): %s", d.Id(), err) @@ -256,12 +261,12 @@ func resourceFrameworkDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func expandFrameworkControls(ctx context.Context, controls []interface{}) []*backup.FrameworkControl { +func expandFrameworkControls(ctx context.Context, controls []interface{}) []awstypes.FrameworkControl { if len(controls) == 0 { return nil } - frameworkControls := []*backup.FrameworkControl{} + frameworkControls := []awstypes.FrameworkControl{} for _, control := range controls { tfMap := control.(map[string]interface{}) @@ -273,13 +278,13 @@ func expandFrameworkControls(ctx context.Context, controls []interface{}) []*bac continue } - frameworkControl := &backup.FrameworkControl{ + frameworkControl := awstypes.FrameworkControl{ ControlName: aws.String(tfMap[names.AttrName].(string)), ControlScope: expandControlScope(ctx, tfMap[names.AttrScope].([]interface{})), } if v, ok := tfMap["input_parameter"]; ok && v.(*schema.Set).Len() > 0 { - frameworkControl.ControlInputParameters = expandInputParmaeters(tfMap["input_parameter"].(*schema.Set).List()) + frameworkControl.ControlInputParameters = expandInputParameters(tfMap["input_parameter"].(*schema.Set).List()) } frameworkControls = append(frameworkControls, frameworkControl) @@ -288,16 +293,16 @@ func expandFrameworkControls(ctx context.Context, controls []interface{}) []*bac return frameworkControls } -func expandInputParmaeters(inputParams []interface{}) []*backup.ControlInputParameter { +func expandInputParameters(inputParams []interface{}) []awstypes.ControlInputParameter { if len(inputParams) == 0 { return nil } - controlInputParameters := []*backup.ControlInputParameter{} + controlInputParameters := []awstypes.ControlInputParameter{} for _, inputParam := range inputParams { tfMap := inputParam.(map[string]interface{}) - controlInputParameter := &backup.ControlInputParameter{} + controlInputParameter := awstypes.ControlInputParameter{} if v, ok := tfMap[names.AttrName].(string); ok && v != "" { controlInputParameter.ParameterName = aws.String(v) @@ -313,7 +318,7 @@ func expandInputParmaeters(inputParams []interface{}) []*backup.ControlInputPara return controlInputParameters } -func expandControlScope(ctx context.Context, scope []interface{}) *backup.ControlScope { +func expandControlScope(ctx context.Context, scope []interface{}) *awstypes.ControlScope { if len(scope) == 0 || scope[0] == nil { return nil } @@ -323,14 +328,14 @@ func expandControlScope(ctx context.Context, scope []interface{}) *backup.Contro return nil } - controlScope := &backup.ControlScope{} + controlScope := &awstypes.ControlScope{} if v, ok := tfMap["compliance_resource_ids"]; ok && v.(*schema.Set).Len() > 0 { - controlScope.ComplianceResourceIds = flex.ExpandStringSet(v.(*schema.Set)) + controlScope.ComplianceResourceIds = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := tfMap["compliance_resource_types"]; ok && v.(*schema.Set).Len() > 0 { - controlScope.ComplianceResourceTypes = flex.ExpandStringSet(v.(*schema.Set)) + controlScope.ComplianceResourceTypes = flex.ExpandStringValueSet(v.(*schema.Set)) } // A maximum of one key-value pair can be provided. @@ -342,7 +347,7 @@ func expandControlScope(ctx context.Context, scope []interface{}) *backup.Contro return controlScope } -func flattenFrameworkControls(ctx context.Context, controls []*backup.FrameworkControl) []interface{} { +func flattenFrameworkControls(ctx context.Context, controls []awstypes.FrameworkControl) []interface{} { if controls == nil { return []interface{}{} } @@ -351,14 +356,14 @@ func flattenFrameworkControls(ctx context.Context, controls []*backup.FrameworkC for _, control := range controls { values := map[string]interface{}{} values["input_parameter"] = flattenInputParameters(control.ControlInputParameters) - values[names.AttrName] = aws.StringValue(control.ControlName) + values[names.AttrName] = aws.ToString(control.ControlName) values[names.AttrScope] = flattenScope(ctx, control.ControlScope) frameworkControls = append(frameworkControls, values) } return frameworkControls } -func flattenInputParameters(inputParams []*backup.ControlInputParameter) []interface{} { +func flattenInputParameters(inputParams []awstypes.ControlInputParameter) []interface{} { if inputParams == nil { return []interface{}{} } @@ -366,21 +371,21 @@ func flattenInputParameters(inputParams []*backup.ControlInputParameter) []inter controlInputParameters := []interface{}{} for _, inputParam := range inputParams { values := map[string]interface{}{} - values[names.AttrName] = aws.StringValue(inputParam.ParameterName) - values[names.AttrValue] = aws.StringValue(inputParam.ParameterValue) + values[names.AttrName] = aws.ToString(inputParam.ParameterName) + values[names.AttrValue] = aws.ToString(inputParam.ParameterValue) controlInputParameters = append(controlInputParameters, values) } return controlInputParameters } -func flattenScope(ctx context.Context, scope *backup.ControlScope) []interface{} { +func flattenScope(ctx context.Context, scope *awstypes.ControlScope) []interface{} { if scope == nil { return []interface{}{} } controlScope := map[string]interface{}{ - "compliance_resource_ids": flex.FlattenStringList(scope.ComplianceResourceIds), - "compliance_resource_types": flex.FlattenStringList(scope.ComplianceResourceTypes), + "compliance_resource_ids": flex.FlattenStringValueList(scope.ComplianceResourceIds), + "compliance_resource_types": flex.FlattenStringValueList(scope.ComplianceResourceTypes), } if v := scope.Tags; v != nil { diff --git a/internal/service/backup/framework_data_source.go b/internal/service/backup/framework_data_source.go index 7ffebf3eac9..3e59769d175 100644 --- a/internal/service/backup/framework_data_source.go +++ b/internal/service/backup/framework_data_source.go @@ -7,8 +7,8 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -28,12 +28,12 @@ func DataSourceFramework() *schema.Resource { Computed: true, }, "control": { - Type: schema.TypeSet, + Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "input_parameter": { - Type: schema.TypeSet, + Type: schema.TypeList, Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -58,14 +58,14 @@ func DataSourceFramework() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "compliance_resource_ids": { - Type: schema.TypeSet, + Type: schema.TypeList, Computed: true, Elem: &schema.Schema{ Type: schema.TypeString, }, }, "compliance_resource_types": { - Type: schema.TypeSet, + Type: schema.TypeList, Computed: true, Elem: &schema.Schema{ Type: schema.TypeString, @@ -105,19 +105,19 @@ func DataSourceFramework() *schema.Resource { func dataSourceFrameworkRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig name := d.Get(names.AttrName).(string) - resp, err := conn.DescribeFrameworkWithContext(ctx, &backup.DescribeFrameworkInput{ + resp, err := conn.DescribeFramework(ctx, &backup.DescribeFrameworkInput{ FrameworkName: aws.String(name), }) if err != nil { return sdkdiag.AppendErrorf(diags, "getting Backup Framework: %s", err) } - d.SetId(aws.StringValue(resp.FrameworkName)) + d.SetId(aws.ToString(resp.FrameworkName)) d.Set(names.AttrARN, resp.FrameworkArn) d.Set("deployment_status", resp.DeploymentStatus) @@ -133,7 +133,7 @@ func dataSourceFrameworkRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "setting control: %s", err) } - tags, err := listTags(ctx, conn, aws.StringValue(resp.FrameworkArn)) + tags, err := listTags(ctx, conn, aws.ToString(resp.FrameworkArn)) if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for Backup Framework (%s): %s", d.Id(), err) diff --git a/internal/service/backup/framework_test.go b/internal/service/backup/framework_test.go index c357320b3c4..1e2d92ebaee 100644 --- a/internal/service/backup/framework_test.go +++ b/internal/service/backup/framework_test.go @@ -8,8 +8,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -491,9 +491,9 @@ func testAccFramework_disappears(t *testing.T) { } func testAccFrameworkPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) - _, err := conn.ListFrameworksWithContext(ctx, &backup.ListFrameworksInput{}) + _, err := conn.ListFrameworks(ctx, &backup.ListFrameworksInput{}) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) @@ -506,7 +506,7 @@ func testAccFrameworkPreCheck(ctx context.Context, t *testing.T) { func testAccCheckFrameworkDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_backup_framework" { continue @@ -516,10 +516,10 @@ func testAccCheckFrameworkDestroy(ctx context.Context) resource.TestCheckFunc { FrameworkName: aws.String(rs.Primary.ID), } - resp, err := conn.DescribeFrameworkWithContext(ctx, input) + resp, err := conn.DescribeFramework(ctx, input) if err == nil { - if aws.StringValue(resp.FrameworkName) == rs.Primary.ID { + if aws.ToString(resp.FrameworkName) == rs.Primary.ID { return fmt.Errorf("Backup Framework '%s' was not deleted properly", rs.Primary.ID) } } @@ -537,11 +537,11 @@ func testAccCheckFrameworkExists(ctx context.Context, name string, framework *ba return fmt.Errorf("Not found: %s", name) } - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) input := &backup.DescribeFrameworkInput{ FrameworkName: aws.String(rs.Primary.ID), } - resp, err := conn.DescribeFrameworkWithContext(ctx, input) + resp, err := conn.DescribeFramework(ctx, input) if err != nil { return err diff --git a/internal/service/backup/generate.go b/internal/service/backup/generate.go index 22d253292fd..323b51ecb6f 100644 --- a/internal/service/backup/generate.go +++ b/internal/service/backup/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=ListTags -ListTagsOpPaginated -ServiceTagsMap -UntagInTagsElem=TagKeyList -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -ListTags -ListTagsOp=ListTags -ServiceTagsMap -SkipTypesImp -UntagInTagsElem=TagKeyList -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/backup/global_settings.go b/internal/service/backup/global_settings.go index 4b62e526d86..377ee82adac 100644 --- a/internal/service/backup/global_settings.go +++ b/internal/service/backup/global_settings.go @@ -6,8 +6,7 @@ package backup import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -38,13 +37,13 @@ func ResourceGlobalSettings() *schema.Resource { func resourceGlobalSettingsUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.UpdateGlobalSettingsInput{ - GlobalSettings: flex.ExpandStringMap(d.Get("global_settings").(map[string]interface{})), + GlobalSettings: flex.ExpandStringValueMap(d.Get("global_settings").(map[string]interface{})), } - _, err := conn.UpdateGlobalSettingsWithContext(ctx, input) + _, err := conn.UpdateGlobalSettings(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting Backup Global Settings (%s): %s", meta.(*conns.AWSClient).AccountID, err) } @@ -56,14 +55,14 @@ func resourceGlobalSettingsUpdate(ctx context.Context, d *schema.ResourceData, m func resourceGlobalSettingsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) - resp, err := conn.DescribeGlobalSettingsWithContext(ctx, &backup.DescribeGlobalSettingsInput{}) + resp, err := conn.DescribeGlobalSettings(ctx, &backup.DescribeGlobalSettingsInput{}) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Backup Global Settings (%s): %s", d.Id(), err) } - if err := d.Set("global_settings", aws.StringValueMap(resp.GlobalSettings)); err != nil { + if err := d.Set("global_settings", resp.GlobalSettings); err != nil { return sdkdiag.AppendErrorf(diags, "setting global_settings: %s", err) } diff --git a/internal/service/backup/global_settings_test.go b/internal/service/backup/global_settings_test.go index bd8c66ea330..c4defa1cc27 100644 --- a/internal/service/backup/global_settings_test.go +++ b/internal/service/backup/global_settings_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -66,8 +66,8 @@ func TestAccBackupGlobalSettings_basic(t *testing.T) { func testAccCheckGlobalSettingsExists(ctx context.Context, settings *backup.DescribeGlobalSettingsOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) - resp, err := conn.DescribeGlobalSettingsWithContext(ctx, &backup.DescribeGlobalSettingsInput{}) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) + resp, err := conn.DescribeGlobalSettings(ctx, &backup.DescribeGlobalSettingsInput{}) if err != nil { return err } diff --git a/internal/service/backup/plan.go b/internal/service/backup/plan.go index f99e9da8882..1bf7d74e37f 100644 --- a/internal/service/backup/plan.go +++ b/internal/service/backup/plan.go @@ -11,15 +11,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -187,11 +188,11 @@ func ResourcePlan() *schema.Resource { func resourcePlanCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) name := d.Get(names.AttrName).(string) input := &backup.CreateBackupPlanInput{ - BackupPlan: &backup.PlanInput{ + BackupPlan: &awstypes.BackupPlanInput{ AdvancedBackupSettings: expandPlanAdvancedSettings(d.Get("advanced_backup_setting").(*schema.Set)), BackupPlanName: aws.String(name), Rules: expandPlanRules(ctx, d.Get(names.AttrRule).(*schema.Set)), @@ -199,20 +200,20 @@ func resourcePlanCreate(ctx context.Context, d *schema.ResourceData, meta interf BackupPlanTags: getTagsIn(ctx), } - output, err := conn.CreateBackupPlanWithContext(ctx, input) + output, err := conn.CreateBackupPlan(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Backup Plan (%s): %s", name, err) } - d.SetId(aws.StringValue(output.BackupPlanId)) + d.SetId(aws.ToString(output.BackupPlanId)) return append(diags, resourcePlanRead(ctx, d, meta)...) } func resourcePlanRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) output, err := FindPlanByID(ctx, conn, d.Id()) @@ -243,19 +244,19 @@ func resourcePlanRead(ctx context.Context, d *schema.ResourceData, meta interfac func resourcePlanUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) if d.HasChanges(names.AttrRule, "advanced_backup_setting") { input := &backup.UpdateBackupPlanInput{ BackupPlanId: aws.String(d.Id()), - BackupPlan: &backup.PlanInput{ + BackupPlan: &awstypes.BackupPlanInput{ AdvancedBackupSettings: expandPlanAdvancedSettings(d.Get("advanced_backup_setting").(*schema.Set)), BackupPlanName: aws.String(d.Get(names.AttrName).(string)), Rules: expandPlanRules(ctx, d.Get(names.AttrRule).(*schema.Set)), }, } - _, err := conn.UpdateBackupPlanWithContext(ctx, input) + _, err := conn.UpdateBackupPlan(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Backup Plan (%s): %s", d.Id(), err) @@ -267,19 +268,19 @@ func resourcePlanUpdate(ctx context.Context, d *schema.ResourceData, meta interf func resourcePlanDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) log.Printf("[DEBUG] Deleting Backup Plan: %s", d.Id()) const ( timeout = 2 * time.Minute ) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func() (interface{}, error) { - return conn.DeleteBackupPlanWithContext(ctx, &backup.DeleteBackupPlanInput{ + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidRequestException](ctx, timeout, func() (interface{}, error) { + return conn.DeleteBackupPlan(ctx, &backup.DeleteBackupPlanInput{ BackupPlanId: aws.String(d.Id()), }) - }, backup.ErrCodeInvalidRequestException, "Related backup plan selections must be deleted prior to backup") + }, "Related backup plan selections must be deleted prior to backup") - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -290,14 +291,14 @@ func resourcePlanDelete(ctx context.Context, d *schema.ResourceData, meta interf return diags } -func FindPlanByID(ctx context.Context, conn *backup.Backup, id string) (*backup.GetBackupPlanOutput, error) { +func FindPlanByID(ctx context.Context, conn *backup.Client, id string) (*backup.GetBackupPlanOutput, error) { input := &backup.GetBackupPlanInput{ BackupPlanId: aws.String(id), } - output, err := conn.GetBackupPlanWithContext(ctx, input) + output, err := conn.GetBackupPlan(ctx, input) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -315,11 +316,11 @@ func FindPlanByID(ctx context.Context, conn *backup.Backup, id string) (*backup. return output, nil } -func expandPlanRules(ctx context.Context, vRules *schema.Set) []*backup.RuleInput { - rules := []*backup.RuleInput{} +func expandPlanRules(ctx context.Context, vRules *schema.Set) []awstypes.BackupRuleInput { + rules := []awstypes.BackupRuleInput{} for _, vRule := range vRules.List() { - rule := &backup.RuleInput{} + rule := awstypes.BackupRuleInput{} mRule := vRule.(map[string]interface{}) @@ -362,16 +363,16 @@ func expandPlanRules(ctx context.Context, vRules *schema.Set) []*backup.RuleInpu return rules } -func expandPlanAdvancedSettings(vAdvancedBackupSettings *schema.Set) []*backup.AdvancedBackupSetting { - advancedBackupSettings := []*backup.AdvancedBackupSetting{} +func expandPlanAdvancedSettings(vAdvancedBackupSettings *schema.Set) []awstypes.AdvancedBackupSetting { + advancedBackupSettings := []awstypes.AdvancedBackupSetting{} for _, vAdvancedBackupSetting := range vAdvancedBackupSettings.List() { - advancedBackupSetting := &backup.AdvancedBackupSetting{} + advancedBackupSetting := awstypes.AdvancedBackupSetting{} mAdvancedBackupSetting := vAdvancedBackupSetting.(map[string]interface{}) if v, ok := mAdvancedBackupSetting["backup_options"].(map[string]interface{}); ok && v != nil { - advancedBackupSetting.BackupOptions = flex.ExpandStringMap(v) + advancedBackupSetting.BackupOptions = flex.ExpandStringValueMap(v) } if v, ok := mAdvancedBackupSetting[names.AttrResourceType].(string); ok && v != "" { advancedBackupSetting.ResourceType = aws.String(v) @@ -389,12 +390,12 @@ func expandPlanAdvancedSettings(vAdvancedBackupSettings *schema.Set) []*backup.A return advancedBackupSettings } -func expandPlanCopyActions(actionList []interface{}) []*backup.CopyAction { - actions := []*backup.CopyAction{} +func expandPlanCopyActions(actionList []interface{}) []awstypes.CopyAction { + actions := []awstypes.CopyAction{} for _, i := range actionList { item := i.(map[string]interface{}) - action := &backup.CopyAction{} + action := awstypes.CopyAction{} action.DestinationBackupVaultArn = aws.String(item["destination_vault_arn"].(string)) @@ -408,12 +409,12 @@ func expandPlanCopyActions(actionList []interface{}) []*backup.CopyAction { return actions } -func expandPlanLifecycle(tfMap map[string]interface{}) *backup.Lifecycle { +func expandPlanLifecycle(tfMap map[string]interface{}) *awstypes.Lifecycle { if tfMap == nil { return nil } - apiObject := &backup.Lifecycle{} + apiObject := &awstypes.Lifecycle{} if v, ok := tfMap["delete_after"].(int); ok && v != 0 { apiObject.DeleteAfterDays = aws.Int64(int64(v)) @@ -430,17 +431,17 @@ func expandPlanLifecycle(tfMap map[string]interface{}) *backup.Lifecycle { return apiObject } -func flattenPlanRules(ctx context.Context, rules []*backup.Rule) *schema.Set { +func flattenPlanRules(ctx context.Context, rules []awstypes.BackupRule) *schema.Set { vRules := []interface{}{} for _, rule := range rules { mRule := map[string]interface{}{ - "rule_name": aws.StringValue(rule.RuleName), - "target_vault_name": aws.StringValue(rule.TargetBackupVaultName), - names.AttrSchedule: aws.StringValue(rule.ScheduleExpression), - "enable_continuous_backup": aws.BoolValue(rule.EnableContinuousBackup), - "start_window": int(aws.Int64Value(rule.StartWindowMinutes)), - "completion_window": int(aws.Int64Value(rule.CompletionWindowMinutes)), + "rule_name": aws.ToString(rule.RuleName), + "target_vault_name": aws.ToString(rule.TargetBackupVaultName), + names.AttrSchedule: aws.ToString(rule.ScheduleExpression), + "enable_continuous_backup": aws.ToBool(rule.EnableContinuousBackup), + "start_window": int(aws.ToInt64(rule.StartWindowMinutes)), + "completion_window": int(aws.ToInt64(rule.CompletionWindowMinutes)), "recovery_point_tags": KeyValueTags(ctx, rule.RecoveryPointTags).IgnoreAWS().Map(), } @@ -456,13 +457,13 @@ func flattenPlanRules(ctx context.Context, rules []*backup.Rule) *schema.Set { return schema.NewSet(planHash, vRules) } -func flattenPlanAdvancedSettings(advancedBackupSettings []*backup.AdvancedBackupSetting) *schema.Set { +func flattenPlanAdvancedSettings(advancedBackupSettings []awstypes.AdvancedBackupSetting) *schema.Set { vAdvancedBackupSettings := []interface{}{} for _, advancedBackupSetting := range advancedBackupSettings { mAdvancedBackupSetting := map[string]interface{}{ - "backup_options": aws.StringValueMap(advancedBackupSetting.BackupOptions), - names.AttrResourceType: aws.StringValue(advancedBackupSetting.ResourceType), + "backup_options": advancedBackupSetting.BackupOptions, + names.AttrResourceType: aws.ToString(advancedBackupSetting.ResourceType), } vAdvancedBackupSettings = append(vAdvancedBackupSettings, mAdvancedBackupSetting) @@ -471,7 +472,7 @@ func flattenPlanAdvancedSettings(advancedBackupSettings []*backup.AdvancedBackup return schema.NewSet(planHash, vAdvancedBackupSettings) } -func flattenPlanCopyActions(copyActions []*backup.CopyAction) []interface{} { +func flattenPlanCopyActions(copyActions []awstypes.CopyAction) []interface{} { if len(copyActions) == 0 { return nil } @@ -479,12 +480,8 @@ func flattenPlanCopyActions(copyActions []*backup.CopyAction) []interface{} { var tfList []interface{} for _, copyAction := range copyActions { - if copyAction == nil { - continue - } - tfMap := map[string]interface{}{ - "destination_vault_arn": aws.StringValue(copyAction.DestinationBackupVaultArn), + "destination_vault_arn": aws.ToString(copyAction.DestinationBackupVaultArn), } if copyAction.Lifecycle != nil { @@ -497,15 +494,15 @@ func flattenPlanCopyActions(copyActions []*backup.CopyAction) []interface{} { return tfList } -func flattenPlanCopyActionLifecycle(copyActionLifecycle *backup.Lifecycle) []interface{} { +func flattenPlanCopyActionLifecycle(copyActionLifecycle *awstypes.Lifecycle) []interface{} { if copyActionLifecycle == nil { return []interface{}{} } m := map[string]interface{}{ - "delete_after": aws.Int64Value(copyActionLifecycle.DeleteAfterDays), - "cold_storage_after": aws.Int64Value(copyActionLifecycle.MoveToColdStorageAfterDays), - "opt_in_to_archive_for_supported_resources": aws.BoolValue(copyActionLifecycle.OptInToArchiveForSupportedResources), + "delete_after": aws.ToInt64(copyActionLifecycle.DeleteAfterDays), + "cold_storage_after": aws.ToInt64(copyActionLifecycle.MoveToColdStorageAfterDays), + "opt_in_to_archive_for_supported_resources": aws.ToBool(copyActionLifecycle.OptInToArchiveForSupportedResources), } return []interface{}{m} diff --git a/internal/service/backup/plan_data_source.go b/internal/service/backup/plan_data_source.go index 2bda79b24ad..f90be0ef188 100644 --- a/internal/service/backup/plan_data_source.go +++ b/internal/service/backup/plan_data_source.go @@ -6,8 +6,8 @@ package backup import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -34,6 +34,92 @@ func DataSourcePlan() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrRule: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "completion_window": { + Type: schema.TypeInt, + Computed: true, + }, + "copy_action": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_vault_arn": { + Type: schema.TypeString, + Computed: true, + }, + "lifecycle": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cold_storage_after": { + Type: schema.TypeInt, + Computed: true, + }, + "delete_after": { + Type: schema.TypeInt, + Computed: true, + }, + "opt_in_to_archive_for_supported_resources": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "enable_continuous_backup": { + Type: schema.TypeBool, + Computed: true, + }, + "lifecycle": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cold_storage_after": { + Type: schema.TypeInt, + Computed: true, + }, + "delete_after": { + Type: schema.TypeInt, + Computed: true, + }, + "opt_in_to_archive_for_supported_resources": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "recovery_point_tags": tftags.TagsSchema(), + "rule_name": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrSchedule: { + Type: schema.TypeString, + Computed: true, + }, + "start_window": { + Type: schema.TypeInt, + Computed: true, + }, + "target_vault_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: planHash, + }, names.AttrTags: tftags.TagsSchemaComputed(), names.AttrVersion: { Type: schema.TypeString, @@ -45,24 +131,27 @@ func DataSourcePlan() *schema.Resource { func dataSourcePlanRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig id := d.Get("plan_id").(string) - resp, err := conn.GetBackupPlanWithContext(ctx, &backup.GetBackupPlanInput{ + resp, err := conn.GetBackupPlan(ctx, &backup.GetBackupPlanInput{ BackupPlanId: aws.String(id), }) if err != nil { return sdkdiag.AppendErrorf(diags, "getting Backup Plan: %s", err) } - d.SetId(aws.StringValue(resp.BackupPlanId)) + d.SetId(aws.ToString(resp.BackupPlanId)) d.Set(names.AttrARN, resp.BackupPlanArn) d.Set(names.AttrName, resp.BackupPlan.BackupPlanName) d.Set(names.AttrVersion, resp.VersionId) + if err := d.Set(names.AttrRule, flattenPlanRules(ctx, resp.BackupPlan.Rules)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting rule: %s", err) + } - tags, err := listTags(ctx, conn, aws.StringValue(resp.BackupPlanArn)) + tags, err := listTags(ctx, conn, aws.ToString(resp.BackupPlanArn)) if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for Backup Plan (%s): %s", id, err) } diff --git a/internal/service/backup/plan_data_source_test.go b/internal/service/backup/plan_data_source_test.go index c1de262d647..6279b7cbd1b 100644 --- a/internal/service/backup/plan_data_source_test.go +++ b/internal/service/backup/plan_data_source_test.go @@ -31,6 +31,8 @@ func TestAccBackupPlanDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(datasourceName, names.AttrVersion, resourceName, names.AttrVersion), resource.TestCheckResourceAttrPair(datasourceName, acctest.CtTagsPercent, resourceName, acctest.CtTagsPercent), + resource.TestCheckResourceAttrPair(datasourceName, acctest.CtRulePound, resourceName, acctest.CtRulePound), + resource.TestCheckResourceAttrPair(datasourceName, names.AttrRule, resourceName, names.AttrRule), ), }, }, diff --git a/internal/service/backup/plan_test.go b/internal/service/backup/plan_test.go index cb7dbc401f4..192d0a2e17b 100644 --- a/internal/service/backup/plan_test.go +++ b/internal/service/backup/plan_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/service/backup" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -677,7 +677,7 @@ func TestAccBackupPlan_disappears(t *testing.T) { func testAccCheckPlanDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_backup_plan" { continue @@ -702,7 +702,7 @@ func testAccCheckPlanDestroy(ctx context.Context) resource.TestCheckFunc { func testAccCheckPlanExists(ctx context.Context, n string, v *backup.GetBackupPlanOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) rs, ok := s.RootModule().Resources[n] if !ok { diff --git a/internal/service/backup/region_settings.go b/internal/service/backup/region_settings.go index 2fd8549896c..8c2ef75f7b9 100644 --- a/internal/service/backup/region_settings.go +++ b/internal/service/backup/region_settings.go @@ -6,8 +6,7 @@ package backup import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -44,19 +43,19 @@ func ResourceRegionSettings() *schema.Resource { func resourceRegionSettingsUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.UpdateRegionSettingsInput{} if v, ok := d.GetOk("resource_type_management_preference"); ok && len(v.(map[string]interface{})) > 0 { - input.ResourceTypeManagementPreference = flex.ExpandBoolMap(v.(map[string]interface{})) + input.ResourceTypeManagementPreference = flex.ExpandBoolValueMap(v.(map[string]interface{})) } if v, ok := d.GetOk("resource_type_opt_in_preference"); ok && len(v.(map[string]interface{})) > 0 { - input.ResourceTypeOptInPreference = flex.ExpandBoolMap(v.(map[string]interface{})) + input.ResourceTypeOptInPreference = flex.ExpandBoolValueMap(v.(map[string]interface{})) } - _, err := conn.UpdateRegionSettingsWithContext(ctx, input) + _, err := conn.UpdateRegionSettings(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Backup Region Settings (%s): %s", d.Id(), err) @@ -69,16 +68,16 @@ func resourceRegionSettingsUpdate(ctx context.Context, d *schema.ResourceData, m func resourceRegionSettingsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) - output, err := conn.DescribeRegionSettingsWithContext(ctx, &backup.DescribeRegionSettingsInput{}) + output, err := conn.DescribeRegionSettings(ctx, &backup.DescribeRegionSettingsInput{}) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Backup Region Settings (%s): %s", d.Id(), err) } - d.Set("resource_type_opt_in_preference", aws.BoolValueMap(output.ResourceTypeOptInPreference)) - d.Set("resource_type_management_preference", aws.BoolValueMap(output.ResourceTypeManagementPreference)) + d.Set("resource_type_opt_in_preference", output.ResourceTypeOptInPreference) + d.Set("resource_type_management_preference", output.ResourceTypeManagementPreference) return diags } diff --git a/internal/service/backup/region_settings_test.go b/internal/service/backup/region_settings_test.go index 1ae46860c01..8fc763fdf44 100644 --- a/internal/service/backup/region_settings_test.go +++ b/internal/service/backup/region_settings_test.go @@ -7,7 +7,7 @@ import ( "context" "testing" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/aws/aws-sdk-go/service/fsx" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -35,7 +35,7 @@ func TestAccBackupRegionSettings_basic(t *testing.T) { Config: testAccRegionSettingsConfig_1(), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRegionSettingsExists(ctx, &settings), - resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.%", "12"), + resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.%", "16"), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.Aurora", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.DocumentDB", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.DynamoDB", acctest.CtTrue), @@ -62,7 +62,7 @@ func TestAccBackupRegionSettings_basic(t *testing.T) { Config: testAccRegionSettingsConfig_2(), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRegionSettingsExists(ctx, &settings), - resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.%", "12"), + resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.%", "16"), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.Aurora", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.DocumentDB", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.DynamoDB", acctest.CtTrue), @@ -84,7 +84,7 @@ func TestAccBackupRegionSettings_basic(t *testing.T) { Config: testAccRegionSettingsConfig_3(), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckRegionSettingsExists(ctx, &settings), - resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.%", "12"), + resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.%", "16"), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.Aurora", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.DocumentDB", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "resource_type_opt_in_preference.DynamoDB", acctest.CtTrue), @@ -108,9 +108,9 @@ func TestAccBackupRegionSettings_basic(t *testing.T) { func testAccCheckRegionSettingsExists(ctx context.Context, v *backup.DescribeRegionSettingsOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) - output, err := conn.DescribeRegionSettingsWithContext(ctx, &backup.DescribeRegionSettingsInput{}) + output, err := conn.DescribeRegionSettings(ctx, &backup.DescribeRegionSettingsInput{}) if err != nil { return err @@ -126,18 +126,22 @@ func testAccRegionSettingsConfig_1() string { return ` resource "aws_backup_region_settings" "test" { resource_type_opt_in_preference = { - "Aurora" = true - "DocumentDB" = true - "DynamoDB" = true - "EBS" = true - "EC2" = true - "EFS" = true - "FSx" = true - "Neptune" = true - "RDS" = true - "S3" = true - "Storage Gateway" = true - "VirtualMachine" = true + "Aurora" = true + "CloudFormation" = true + "DocumentDB" = true + "DynamoDB" = true + "EBS" = true + "EC2" = true + "EFS" = true + "FSx" = true + "Neptune" = true + "RDS" = true + "Redshift" = true + "S3" = true + "SAP HANA on Amazon EC2" = true + "Storage Gateway" = true + "Timestream" = true + "VirtualMachine" = true } } ` @@ -147,18 +151,22 @@ func testAccRegionSettingsConfig_2() string { return ` resource "aws_backup_region_settings" "test" { resource_type_opt_in_preference = { - "Aurora" = false - "DocumentDB" = true - "DynamoDB" = true - "EBS" = true - "EC2" = true - "EFS" = true - "FSx" = true - "Neptune" = true - "RDS" = true - "S3" = true - "Storage Gateway" = true - "VirtualMachine" = true + "Aurora" = false + "CloudFormation" = true + "DocumentDB" = true + "DynamoDB" = true + "EBS" = true + "EC2" = true + "EFS" = true + "FSx" = true + "Neptune" = true + "RDS" = true + "Redshift" = true + "S3" = true + "SAP HANA on Amazon EC2" = true + "Storage Gateway" = true + "Timestream" = true + "VirtualMachine" = true } resource_type_management_preference = { @@ -173,18 +181,22 @@ func testAccRegionSettingsConfig_3() string { return ` resource "aws_backup_region_settings" "test" { resource_type_opt_in_preference = { - "Aurora" = false - "DocumentDB" = true - "DynamoDB" = true - "EBS" = true - "EC2" = true - "EFS" = true - "FSx" = true - "Neptune" = true - "RDS" = true - "S3" = true - "Storage Gateway" = true - "VirtualMachine" = false + "Aurora" = false + "CloudFormation" = true + "DocumentDB" = true + "DynamoDB" = true + "EBS" = true + "EC2" = true + "EFS" = true + "FSx" = true + "Neptune" = true + "RDS" = true + "Redshift" = true + "S3" = true + "SAP HANA on Amazon EC2" = true + "Storage Gateway" = true + "Timestream" = true + "VirtualMachine" = false } resource_type_management_preference = { diff --git a/internal/service/backup/report_plan.go b/internal/service/backup/report_plan.go index 00915c1342b..8dca727d151 100644 --- a/internal/service/backup/report_plan.go +++ b/internal/service/backup/report_plan.go @@ -8,15 +8,16 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -145,7 +146,7 @@ func ResourceReportPlan() *schema.Resource { func resourceReportPlanCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) name := d.Get(names.AttrName).(string) input := &backup.CreateReportPlanInput{ @@ -160,14 +161,14 @@ func resourceReportPlanCreate(ctx context.Context, d *schema.ResourceData, meta input.ReportPlanDescription = aws.String(v.(string)) } - output, err := conn.CreateReportPlanWithContext(ctx, input) + output, err := conn.CreateReportPlan(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Backup Report Plan (%s): %s", name, err) } // Set ID with the name since the name is unique for the report plan. - d.SetId(aws.StringValue(output.ReportPlanName)) + d.SetId(aws.ToString(output.ReportPlanName)) if _, err := waitReportPlanCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Backup Report Plan (%s) create: %s", d.Id(), err) @@ -178,7 +179,7 @@ func resourceReportPlanCreate(ctx context.Context, d *schema.ResourceData, meta func resourceReportPlanRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) reportPlan, err := FindReportPlanByName(ctx, conn, d.Id()) @@ -211,7 +212,7 @@ func resourceReportPlanRead(ctx context.Context, d *schema.ResourceData, meta in func resourceReportPlanUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &backup.UpdateReportPlanInput{ @@ -222,8 +223,8 @@ func resourceReportPlanUpdate(ctx context.Context, d *schema.ResourceData, meta ReportSetting: expandReportSetting(d.Get("report_setting").([]interface{})), } - log.Printf("[DEBUG] Updating Backup Report Plan: %s", input) - _, err := conn.UpdateReportPlanWithContext(ctx, input) + log.Printf("[DEBUG] Updating Backup Report Plan: %+v", input) + _, err := conn.UpdateReportPlan(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Backup Report Plan (%s): %s", d.Id(), err) @@ -239,13 +240,17 @@ func resourceReportPlanUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceReportPlanDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) log.Printf("[DEBUG] Deleting Backup Report Plan: %s", d.Id()) - _, err := conn.DeleteReportPlanWithContext(ctx, &backup.DeleteReportPlanInput{ + _, err := conn.DeleteReportPlan(ctx, &backup.DeleteReportPlanInput{ ReportPlanName: aws.String(d.Id()), }) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Backup Report Plan (%s): %s", d.Id(), err) } @@ -257,7 +262,7 @@ func resourceReportPlanDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func expandReportDeliveryChannel(reportDeliveryChannel []interface{}) *backup.ReportDeliveryChannel { +func expandReportDeliveryChannel(reportDeliveryChannel []interface{}) *awstypes.ReportDeliveryChannel { if len(reportDeliveryChannel) == 0 || reportDeliveryChannel[0] == nil { return nil } @@ -267,12 +272,12 @@ func expandReportDeliveryChannel(reportDeliveryChannel []interface{}) *backup.Re return nil } - result := &backup.ReportDeliveryChannel{ + result := &awstypes.ReportDeliveryChannel{ S3BucketName: aws.String(tfMap[names.AttrS3BucketName].(string)), } if v, ok := tfMap["formats"]; ok && v.(*schema.Set).Len() > 0 { - result.Formats = flex.ExpandStringSet(v.(*schema.Set)) + result.Formats = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := tfMap[names.AttrS3KeyPrefix].(string); ok && v != "" { @@ -282,7 +287,7 @@ func expandReportDeliveryChannel(reportDeliveryChannel []interface{}) *backup.Re return result } -func expandReportSetting(reportSetting []interface{}) *backup.ReportSetting { +func expandReportSetting(reportSetting []interface{}) *awstypes.ReportSetting { if len(reportSetting) == 0 || reportSetting[0] == nil { return nil } @@ -292,93 +297,91 @@ func expandReportSetting(reportSetting []interface{}) *backup.ReportSetting { return nil } - result := &backup.ReportSetting{ + result := &awstypes.ReportSetting{ ReportTemplate: aws.String(tfMap["report_template"].(string)), } if v, ok := tfMap["accounts"]; ok && v.(*schema.Set).Len() > 0 { - result.Accounts = flex.ExpandStringSet(v.(*schema.Set)) + result.Accounts = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := tfMap["framework_arns"]; ok && v.(*schema.Set).Len() > 0 { - result.FrameworkArns = flex.ExpandStringSet(v.(*schema.Set)) + result.FrameworkArns = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := tfMap["number_of_frameworks"].(int); ok && v > 0 { - result.NumberOfFrameworks = aws.Int64(int64(v)) + result.NumberOfFrameworks = int32(v) } if v, ok := tfMap["organization_units"]; ok && v.(*schema.Set).Len() > 0 { - result.OrganizationUnits = flex.ExpandStringSet(v.(*schema.Set)) + result.OrganizationUnits = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := tfMap["regions"]; ok && v.(*schema.Set).Len() > 0 { - result.Regions = flex.ExpandStringSet(v.(*schema.Set)) + result.Regions = flex.ExpandStringValueSet(v.(*schema.Set)) } return result } -func flattenReportDeliveryChannel(reportDeliveryChannel *backup.ReportDeliveryChannel) []interface{} { +func flattenReportDeliveryChannel(reportDeliveryChannel *awstypes.ReportDeliveryChannel) []interface{} { if reportDeliveryChannel == nil { return []interface{}{} } values := map[string]interface{}{ - names.AttrS3BucketName: aws.StringValue(reportDeliveryChannel.S3BucketName), + names.AttrS3BucketName: aws.ToString(reportDeliveryChannel.S3BucketName), } if reportDeliveryChannel.Formats != nil && len(reportDeliveryChannel.Formats) > 0 { - values["formats"] = flex.FlattenStringSet(reportDeliveryChannel.Formats) + values["formats"] = flex.FlattenStringValueSet(reportDeliveryChannel.Formats) } if v := reportDeliveryChannel.S3KeyPrefix; v != nil { - values[names.AttrS3KeyPrefix] = aws.StringValue(v) + values[names.AttrS3KeyPrefix] = aws.ToString(v) } return []interface{}{values} } -func flattenReportSetting(reportSetting *backup.ReportSetting) []interface{} { +func flattenReportSetting(reportSetting *awstypes.ReportSetting) []interface{} { if reportSetting == nil { return []interface{}{} } values := map[string]interface{}{ - "report_template": aws.StringValue(reportSetting.ReportTemplate), + "report_template": aws.ToString(reportSetting.ReportTemplate), } if reportSetting.Accounts != nil && len(reportSetting.Accounts) > 0 { - values["accounts"] = flex.FlattenStringSet(reportSetting.Accounts) + values["accounts"] = flex.FlattenStringValueSet(reportSetting.Accounts) } if reportSetting.FrameworkArns != nil && len(reportSetting.FrameworkArns) > 0 { - values["framework_arns"] = flex.FlattenStringSet(reportSetting.FrameworkArns) + values["framework_arns"] = flex.FlattenStringValueSet(reportSetting.FrameworkArns) } - if reportSetting.NumberOfFrameworks != nil { - values["number_of_frameworks"] = aws.Int64Value(reportSetting.NumberOfFrameworks) - } + values["number_of_frameworks"] = reportSetting.NumberOfFrameworks if reportSetting.OrganizationUnits != nil && len(reportSetting.OrganizationUnits) > 0 { - values["organization_units"] = flex.FlattenStringSet(reportSetting.OrganizationUnits) + values["organization_units"] = flex.FlattenStringValueSet(reportSetting.OrganizationUnits) } if reportSetting.Regions != nil && len(reportSetting.Regions) > 0 { - values["regions"] = flex.FlattenStringSet(reportSetting.Regions) + values["regions"] = flex.FlattenStringValueSet(reportSetting.Regions) } return []interface{}{values} } -func FindReportPlanByName(ctx context.Context, conn *backup.Backup, name string) (*backup.ReportPlan, error) { +func FindReportPlanByName(ctx context.Context, conn *backup.Client, name string) (*awstypes.ReportPlan, error) { input := &backup.DescribeReportPlanInput{ ReportPlanName: aws.String(name), } - output, err := conn.DescribeReportPlanWithContext(ctx, input) + output, err := conn.DescribeReportPlan(ctx, input) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -396,7 +399,7 @@ func FindReportPlanByName(ctx context.Context, conn *backup.Backup, name string) return output.ReportPlan, nil } -func statusReportPlanDeployment(ctx context.Context, conn *backup.Backup, name string) retry.StateRefreshFunc { +func statusReportPlanDeployment(ctx context.Context, conn *backup.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindReportPlanByName(ctx, conn, name) @@ -408,11 +411,11 @@ func statusReportPlanDeployment(ctx context.Context, conn *backup.Backup, name s return nil, "", err } - return output, aws.StringValue(output.DeploymentStatus), nil + return output, aws.ToString(output.DeploymentStatus), nil } } -func waitReportPlanCreated(ctx context.Context, conn *backup.Backup, name string, timeout time.Duration) (*backup.ReportPlan, error) { +func waitReportPlanCreated(ctx context.Context, conn *backup.Client, name string, timeout time.Duration) (*awstypes.ReportPlan, error) { stateConf := &retry.StateChangeConf{ Pending: []string{reportPlanDeploymentStatusCreateInProgress}, Target: []string{reportPlanDeploymentStatusCompleted}, @@ -422,14 +425,14 @@ func waitReportPlanCreated(ctx context.Context, conn *backup.Backup, name string outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*backup.ReportPlan); ok { + if output, ok := outputRaw.(*awstypes.ReportPlan); ok { return output, err } return nil, err } -func waitReportPlanDeleted(ctx context.Context, conn *backup.Backup, name string, timeout time.Duration) (*backup.ReportPlan, error) { +func waitReportPlanDeleted(ctx context.Context, conn *backup.Client, name string, timeout time.Duration) (*awstypes.ReportPlan, error) { stateConf := &retry.StateChangeConf{ Pending: []string{reportPlanDeploymentStatusDeleteInProgress}, Target: []string{}, @@ -439,14 +442,14 @@ func waitReportPlanDeleted(ctx context.Context, conn *backup.Backup, name string outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*backup.ReportPlan); ok { + if output, ok := outputRaw.(*awstypes.ReportPlan); ok { return output, err } return nil, err } -func waitReportPlanUpdated(ctx context.Context, conn *backup.Backup, name string, timeout time.Duration) (*backup.ReportPlan, error) { +func waitReportPlanUpdated(ctx context.Context, conn *backup.Client, name string, timeout time.Duration) (*awstypes.ReportPlan, error) { stateConf := &retry.StateChangeConf{ Pending: []string{reportPlanDeploymentStatusUpdateInProgress}, Target: []string{reportPlanDeploymentStatusCompleted}, @@ -456,7 +459,7 @@ func waitReportPlanUpdated(ctx context.Context, conn *backup.Backup, name string outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*backup.ReportPlan); ok { + if output, ok := outputRaw.(*awstypes.ReportPlan); ok { return output, err } diff --git a/internal/service/backup/report_plan_data_source.go b/internal/service/backup/report_plan_data_source.go index 515140c4b53..3bd9d71e8ef 100644 --- a/internal/service/backup/report_plan_data_source.go +++ b/internal/service/backup/report_plan_data_source.go @@ -7,7 +7,7 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -116,7 +116,7 @@ func DataSourceReportPlan() *schema.Resource { func dataSourceReportPlanRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig name := d.Get(names.AttrName).(string) @@ -126,7 +126,7 @@ func dataSourceReportPlanRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading Backup Report Plan (%s): %s", name, err) } - d.SetId(aws.StringValue(reportPlan.ReportPlanName)) + d.SetId(aws.ToString(reportPlan.ReportPlanName)) d.Set(names.AttrARN, reportPlan.ReportPlanArn) d.Set(names.AttrCreationTime, reportPlan.CreationTime.Format(time.RFC3339)) @@ -142,7 +142,7 @@ func dataSourceReportPlanRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "setting report_setting: %s", err) } - tags, err := listTags(ctx, conn, aws.StringValue(reportPlan.ReportPlanArn)) + tags, err := listTags(ctx, conn, aws.ToString(reportPlan.ReportPlanArn)) if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for Backup Report Plan (%s): %s", d.Id(), err) diff --git a/internal/service/backup/report_plan_test.go b/internal/service/backup/report_plan_test.go index aa76530cbd4..c67bb9e4de5 100644 --- a/internal/service/backup/report_plan_test.go +++ b/internal/service/backup/report_plan_test.go @@ -8,7 +8,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +22,7 @@ import ( func TestAccBackupReportPlan_basic(t *testing.T) { ctx := acctest.Context(t) - var reportPlan backup.ReportPlan + var reportPlan awstypes.ReportPlan rName := sdkacctest.RandomWithPrefix("tf-test-bucket") rName2 := fmt.Sprintf("tf_acc_test_%s", sdkacctest.RandString(7)) originalDescription := "original description" @@ -81,7 +82,7 @@ func TestAccBackupReportPlan_basic(t *testing.T) { func TestAccBackupReportPlan_updateTags(t *testing.T) { ctx := acctest.Context(t) - var reportPlan backup.ReportPlan + var reportPlan awstypes.ReportPlan rName := sdkacctest.RandomWithPrefix("tf-test-bucket") rName2 := fmt.Sprintf("tf_acc_test_%s", sdkacctest.RandString(7)) description := "example description" @@ -166,7 +167,7 @@ func TestAccBackupReportPlan_updateTags(t *testing.T) { func TestAccBackupReportPlan_updateReportDeliveryChannel(t *testing.T) { ctx := acctest.Context(t) - var reportPlan backup.ReportPlan + var reportPlan awstypes.ReportPlan rName := sdkacctest.RandomWithPrefix("tf-test-bucket") rName2 := fmt.Sprintf("tf_acc_test_%s", sdkacctest.RandString(7)) description := "example description" @@ -226,7 +227,7 @@ func TestAccBackupReportPlan_updateReportDeliveryChannel(t *testing.T) { func TestAccBackupReportPlan_updateReportSettings(t *testing.T) { ctx := acctest.Context(t) - var reportPlan backup.ReportPlan + var reportPlan awstypes.ReportPlan rName := sdkacctest.RandomWithPrefix("tf-test-bucket") rName2 := fmt.Sprintf("tf_acc_test_%s", sdkacctest.RandString(7)) description := "example description" @@ -291,7 +292,7 @@ func TestAccBackupReportPlan_updateReportSettings(t *testing.T) { func TestAccBackupReportPlan_disappears(t *testing.T) { ctx := acctest.Context(t) - var reportPlan backup.ReportPlan + var reportPlan awstypes.ReportPlan rName := sdkacctest.RandomWithPrefix("tf-test-bucket") rName2 := fmt.Sprintf("tf_acc_test_%s", sdkacctest.RandString(7)) resourceName := "aws_backup_report_plan.test" @@ -315,9 +316,9 @@ func TestAccBackupReportPlan_disappears(t *testing.T) { } func testAccReportPlanPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) - _, err := conn.ListReportPlansWithContext(ctx, &backup.ListReportPlansInput{}) + _, err := conn.ListReportPlans(ctx, &backup.ListReportPlansInput{}) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) @@ -330,7 +331,7 @@ func testAccReportPlanPreCheck(ctx context.Context, t *testing.T) { func testAccCheckReportPlanDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_backup_report_plan" { @@ -354,7 +355,7 @@ func testAccCheckReportPlanDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckReportPlanExists(ctx context.Context, n string, v *backup.ReportPlan) resource.TestCheckFunc { +func testAccCheckReportPlanExists(ctx context.Context, n string, v *awstypes.ReportPlan) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -365,7 +366,7 @@ func testAccCheckReportPlanExists(ctx context.Context, n string, v *backup.Repor return fmt.Errorf("No Backup Report Plan ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) output, err := tfbackup.FindReportPlanByName(ctx, conn, rs.Primary.ID) diff --git a/internal/service/backup/selection.go b/internal/service/backup/selection.go index 4130bd35dad..08cb3a5218d 100644 --- a/internal/service/backup/selection.go +++ b/internal/service/backup/selection.go @@ -11,15 +11,17 @@ import ( "strings" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -151,12 +153,10 @@ func ResourceSelection() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrType: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - backup.ConditionTypeStringequals, - }, false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.ConditionType](), }, names.AttrKey: { Type: schema.TypeString, @@ -190,14 +190,14 @@ func ResourceSelection() *schema.Resource { func resourceSelectionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) - selection := &backup.Selection{ + selection := &awstypes.BackupSelection{ Conditions: expandConditions(d.Get(names.AttrCondition).(*schema.Set).List()), IamRoleArn: aws.String(d.Get(names.AttrIAMRoleARN).(string)), ListOfTags: expandConditionTags(d.Get("selection_tag").(*schema.Set).List()), - NotResources: flex.ExpandStringSet(d.Get("not_resources").(*schema.Set)), - Resources: flex.ExpandStringSet(d.Get(names.AttrResources).(*schema.Set)), + NotResources: flex.ExpandStringValueSet(d.Get("not_resources").(*schema.Set)), + Resources: flex.ExpandStringValueSet(d.Get(names.AttrResources).(*schema.Set)), SelectionName: aws.String(d.Get(names.AttrName).(string)), } @@ -210,18 +210,18 @@ func resourceSelectionCreate(ctx context.Context, d *schema.ResourceData, meta i var output *backup.CreateBackupSelectionOutput err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { var err error - output, err = conn.CreateBackupSelectionWithContext(ctx, input) + output, err = conn.CreateBackupSelection(ctx, input) // Retry on the following error: // InvalidParameterValueException: IAM Role arn:aws:iam::123456789012:role/XXX cannot be assumed by AWS Backup - if tfawserr.ErrMessageContains(err, backup.ErrCodeInvalidParameterValueException, "cannot be assumed") { + if errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "cannot be assumed") { log.Printf("[DEBUG] Received %s, retrying create backup selection.", err) return retry.RetryableError(err) } // Retry on the following error: // InvalidParameterValueException: IAM Role arn:aws:iam::123456789012:role/XXX is not authorized to call tag:GetResources - if tfawserr.ErrMessageContains(err, backup.ErrCodeInvalidParameterValueException, "is not authorized to call") { + if errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "is not authorized to call") { log.Printf("[DEBUG] Received %s, retrying create backup selection.", err) return retry.RetryableError(err) } @@ -234,21 +234,21 @@ func resourceSelectionCreate(ctx context.Context, d *schema.ResourceData, meta i }) if tfresource.TimedOut(err) { - output, err = conn.CreateBackupSelectionWithContext(ctx, input) + output, err = conn.CreateBackupSelection(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating Backup Selection: %s", err) } - d.SetId(aws.StringValue(output.SelectionId)) + d.SetId(aws.ToString(output.SelectionId)) return append(diags, resourceSelectionRead(ctx, d, meta)...) } func resourceSelectionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.GetBackupSelectionInput{ BackupPlanId: aws.String(d.Get("plan_id").(string)), @@ -260,13 +260,13 @@ func resourceSelectionRead(ctx context.Context, d *schema.ResourceData, meta int err := retry.RetryContext(ctx, propagationTimeout, func() *retry.RetryError { var err error - resp, err = conn.GetBackupSelectionWithContext(ctx, input) + resp, err = conn.GetBackupSelection(ctx, input) - if d.IsNewResource() && tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if d.IsNewResource() && errs.IsA[*awstypes.ResourceNotFoundException](err) { return retry.RetryableError(err) } - if d.IsNewResource() && tfawserr.ErrMessageContains(err, backup.ErrCodeInvalidParameterValueException, "Cannot find Backup plan") { + if d.IsNewResource() && errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "Cannot find Backup plan") { return retry.RetryableError(err) } @@ -278,16 +278,16 @@ func resourceSelectionRead(ctx context.Context, d *schema.ResourceData, meta int }) if tfresource.TimedOut(err) { - resp, err = conn.GetBackupSelectionWithContext(ctx, input) + resp, err = conn.GetBackupSelection(ctx, input) } - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if !d.IsNewResource() && errs.IsA[*awstypes.ResourceNotFoundException](err) { log.Printf("[WARN] Backup Selection (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if !d.IsNewResource() && tfawserr.ErrMessageContains(err, backup.ErrCodeInvalidParameterValueException, "Cannot find Backup plan") { + if !d.IsNewResource() && errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "Cannot find Backup plan") { log.Printf("[WARN] Backup Selection (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -317,9 +317,9 @@ func resourceSelectionRead(ctx context.Context, d *schema.ResourceData, meta int for _, r := range resp.BackupSelection.ListOfTags { m := make(map[string]interface{}) - m[names.AttrType] = aws.StringValue(r.ConditionType) - m[names.AttrKey] = aws.StringValue(r.ConditionKey) - m[names.AttrValue] = aws.StringValue(r.ConditionValue) + m[names.AttrType] = string(r.ConditionType) + m[names.AttrKey] = aws.ToString(r.ConditionKey) + m[names.AttrValue] = aws.ToString(r.ConditionValue) tags = append(tags, m) } @@ -329,16 +329,12 @@ func resourceSelectionRead(ctx context.Context, d *schema.ResourceData, meta int } } - if resp.BackupSelection.Resources != nil { - if err := d.Set(names.AttrResources, aws.StringValueSlice(resp.BackupSelection.Resources)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting resources: %s", err) - } + if err := d.Set(names.AttrResources, resp.BackupSelection.Resources); err != nil { + return sdkdiag.AppendErrorf(diags, "setting resources: %s", err) } - if resp.BackupSelection.NotResources != nil { - if err := d.Set("not_resources", aws.StringValueSlice(resp.BackupSelection.NotResources)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting not resources: %s", err) - } + if err := d.Set("not_resources", resp.BackupSelection.NotResources); err != nil { + return sdkdiag.AppendErrorf(diags, "setting not resources: %s", err) } return diags @@ -346,14 +342,19 @@ func resourceSelectionRead(ctx context.Context, d *schema.ResourceData, meta int func resourceSelectionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.DeleteBackupSelectionInput{ BackupPlanId: aws.String(d.Get("plan_id").(string)), SelectionId: aws.String(d.Id()), } - _, err := conn.DeleteBackupSelectionWithContext(ctx, input) + _, err := conn.DeleteBackupSelection(ctx, input) + + if errs.IsA[*awstypes.InvalidParameterValueException](err) { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Backup Selection: %s", err) } @@ -376,14 +377,14 @@ func resourceSelectionImportState(ctx context.Context, d *schema.ResourceData, m return []*schema.ResourceData{d}, nil } -func expandConditionTags(tagList []interface{}) []*backup.Condition { - conditions := []*backup.Condition{} +func expandConditionTags(tagList []interface{}) []awstypes.Condition { + conditions := []awstypes.Condition{} for _, i := range tagList { item := i.(map[string]interface{}) - tag := &backup.Condition{} + tag := awstypes.Condition{} - tag.ConditionType = aws.String(item[names.AttrType].(string)) + tag.ConditionType = awstypes.ConditionType(item[names.AttrType].(string)) tag.ConditionKey = aws.String(item[names.AttrKey].(string)) tag.ConditionValue = aws.String(item[names.AttrValue].(string)) @@ -393,8 +394,8 @@ func expandConditionTags(tagList []interface{}) []*backup.Condition { return conditions } -func expandConditions(conditionsList []interface{}) *backup.Conditions { - conditions := &backup.Conditions{} +func expandConditions(conditionsList []interface{}) *awstypes.Conditions { + conditions := &awstypes.Conditions{} for _, condition := range conditionsList { mCondition := condition.(map[string]interface{}) @@ -416,12 +417,12 @@ func expandConditions(conditionsList []interface{}) *backup.Conditions { return conditions } -func expandConditionParameters(conditionParametersList []interface{}) []*backup.ConditionParameter { - conditionParameters := []*backup.ConditionParameter{} +func expandConditionParameters(conditionParametersList []interface{}) []awstypes.ConditionParameter { + conditionParameters := []awstypes.ConditionParameter{} for _, i := range conditionParametersList { item := i.(map[string]interface{}) - conditionParameter := &backup.ConditionParameter{} + conditionParameter := awstypes.ConditionParameter{} conditionParameter.ConditionKey = aws.String(item[names.AttrKey].(string)) conditionParameter.ConditionValue = aws.String(item[names.AttrValue].(string)) @@ -432,7 +433,7 @@ func expandConditionParameters(conditionParametersList []interface{}) []*backup. return conditionParameters } -func flattenConditions(conditions *backup.Conditions) *schema.Set { +func flattenConditions(conditions *awstypes.Conditions) *schema.Set { var vConditions []interface{} mCondition := map[string]interface{}{} @@ -471,7 +472,7 @@ func conditionsHash(vCondition interface{}) int { return create.StringHashcode(buf.String()) } -func flattenConditionParameters(conditionParameters []*backup.ConditionParameter) []interface{} { +func flattenConditionParameters(conditionParameters []awstypes.ConditionParameter) []interface{} { if len(conditionParameters) == 0 { return nil } @@ -479,13 +480,9 @@ func flattenConditionParameters(conditionParameters []*backup.ConditionParameter var tfList []interface{} for _, conditionParameter := range conditionParameters { - if conditionParameter == nil { - continue - } - tfMap := map[string]interface{}{ - names.AttrKey: aws.StringValue(conditionParameter.ConditionKey), - names.AttrValue: aws.StringValue(conditionParameter.ConditionValue), + names.AttrKey: aws.ToString(conditionParameter.ConditionKey), + names.AttrValue: aws.ToString(conditionParameter.ConditionValue), } tfList = append(tfList, tfMap) diff --git a/internal/service/backup/selection_data_source.go b/internal/service/backup/selection_data_source.go index 7d29a58dff2..401bec6c275 100644 --- a/internal/service/backup/selection_data_source.go +++ b/internal/service/backup/selection_data_source.go @@ -6,8 +6,8 @@ package backup import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -48,26 +48,24 @@ func DataSourceSelection() *schema.Resource { func dataSourceSelectionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.GetBackupSelectionInput{ BackupPlanId: aws.String(d.Get("plan_id").(string)), SelectionId: aws.String(d.Get("selection_id").(string)), } - resp, err := conn.GetBackupSelectionWithContext(ctx, input) + resp, err := conn.GetBackupSelection(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "getting Backup Selection: %s", err) } - d.SetId(aws.StringValue(resp.SelectionId)) + d.SetId(aws.ToString(resp.SelectionId)) d.Set(names.AttrIAMRoleARN, resp.BackupSelection.IamRoleArn) d.Set(names.AttrName, resp.BackupSelection.SelectionName) - if resp.BackupSelection.Resources != nil { - if err := d.Set(names.AttrResources, aws.StringValueSlice(resp.BackupSelection.Resources)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting resources: %s", err) - } + if err := d.Set(names.AttrResources, resp.BackupSelection.Resources); err != nil { + return sdkdiag.AppendErrorf(diags, "setting resources: %s", err) } return diags diff --git a/internal/service/backup/selection_test.go b/internal/service/backup/selection_test.go index 60f4f74df87..1602d18256b 100644 --- a/internal/service/backup/selection_test.go +++ b/internal/service/backup/selection_test.go @@ -8,8 +8,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -254,7 +254,7 @@ func TestAccBackupSelection_updateTag(t *testing.T) { func testAccCheckSelectionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_backup_selection" { continue @@ -265,7 +265,7 @@ func testAccCheckSelectionDestroy(ctx context.Context) resource.TestCheckFunc { SelectionId: aws.String(rs.Primary.ID), } - resp, err := conn.GetBackupSelectionWithContext(ctx, input) + resp, err := conn.GetBackupSelection(ctx, input) if err == nil { if *resp.SelectionId == rs.Primary.ID { @@ -285,14 +285,14 @@ func testAccCheckSelectionExists(ctx context.Context, name string, selection *ba return fmt.Errorf("not found: %s, %v", name, s.RootModule().Resources) } - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) input := &backup.GetBackupSelectionInput{ BackupPlanId: aws.String(rs.Primary.Attributes["plan_id"]), SelectionId: aws.String(rs.Primary.ID), } - output, err := conn.GetBackupSelectionWithContext(ctx, input) + output, err := conn.GetBackupSelection(ctx, input) if err != nil { return err diff --git a/internal/service/backup/service_endpoint_resolver_gen.go b/internal/service/backup/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f858b73d3da --- /dev/null +++ b/internal/service/backup/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package backup + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + backup_sdkv2 "github.com/aws/aws-sdk-go-v2/service/backup" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ backup_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver backup_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: backup_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params backup_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up backup endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*backup_sdkv2.Options) { + return func(o *backup_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/backup/service_endpoints_gen_test.go b/internal/service/backup/service_endpoints_gen_test.go index fd84c0b5b77..28c210b2512 100644 --- a/internal/service/backup/service_endpoints_gen_test.go +++ b/internal/service/backup/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package backup_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - backup_sdkv1 "github.com/aws/aws-sdk-go/service/backup" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + backup_sdkv2 "github.com/aws/aws-sdk-go-v2/service/backup" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := backup_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(backup_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), backup_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := backup_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(backup_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), backup_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.BackupConn(ctx) - - req, _ := client.ListBackupPlansRequest(&backup_sdkv1.ListBackupPlansInput{}) + client := meta.BackupClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListBackupPlans(ctx, &backup_sdkv2.ListBackupPlansInput{}, + func(opts *backup_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/backup/service_package_gen.go b/internal/service/backup/service_package_gen.go index f2897dd2b76..4cdedfc56f4 100644 --- a/internal/service/backup/service_package_gen.go +++ b/internal/service/backup/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package backup import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - backup_sdkv1 "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + backup_sdkv2 "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -115,25 +112,14 @@ func (p *servicePackage) ServicePackageName() string { return names.Backup } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*backup_sdkv1.Backup, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*backup_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return backup_sdkv1.New(sess.Copy(&cfg)), nil + return backup_sdkv2.NewFromConfig(cfg, + backup_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/backup/status.go b/internal/service/backup/status.go index 8fb4acdec96..0cb4f27e4f1 100644 --- a/internal/service/backup/status.go +++ b/internal/service/backup/status.go @@ -6,16 +6,15 @@ package backup import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func statusJobState(ctx context.Context, conn *backup.Backup, id string) retry.StateRefreshFunc { +func statusJobState(ctx context.Context, conn *backup.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindJobByID(ctx, conn, id) + output, err := findJobByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -25,33 +24,29 @@ func statusJobState(ctx context.Context, conn *backup.Backup, id string) retry.S return nil, "", err } - return output, aws.StringValue(output.State), nil + return output, string(output.State), nil } } -func statusFramework(ctx context.Context, conn *backup.Backup, id string) retry.StateRefreshFunc { +func statusFramework(ctx context.Context, conn *backup.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - input := &backup.DescribeFrameworkInput{ - FrameworkName: aws.String(id), - } - - output, err := conn.DescribeFrameworkWithContext(ctx, input) + output, err := findFrameworkByName(ctx, conn, id) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { - return output, backup.ErrCodeResourceNotFoundException, nil + if tfresource.NotFound(err) { + return nil, "", nil } if err != nil { return nil, "", err } - return output, aws.StringValue(output.DeploymentStatus), nil + return output, aws.ToString(output.DeploymentStatus), nil } } -func statusRecoveryPoint(ctx context.Context, conn *backup.Backup, backupVaultName, recoveryPointARN string) retry.StateRefreshFunc { +func statusRecoveryPoint(ctx context.Context, conn *backup.Client, backupVaultName, recoveryPointARN string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindRecoveryPointByTwoPartKey(ctx, conn, backupVaultName, recoveryPointARN) + output, err := findRecoveryPointByTwoPartKey(ctx, conn, backupVaultName, recoveryPointARN) if tfresource.NotFound(err) { return nil, "", nil @@ -61,6 +56,6 @@ func statusRecoveryPoint(ctx context.Context, conn *backup.Backup, backupVaultNa return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } diff --git a/internal/service/backup/sweep.go b/internal/service/backup/sweep.go index fbf0073e99b..df876b82307 100644 --- a/internal/service/backup/sweep.go +++ b/internal/service/backup/sweep.go @@ -7,12 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/go-multierror" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -57,86 +56,80 @@ func sweepFramework(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("Error getting client: %w", err) + return fmt.Errorf("error getting client: %w", err) } - conn := client.BackupConn(ctx) + conn := client.BackupClient(ctx) input := &backup.ListFrameworksInput{} - var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListFrameworksPagesWithContext(ctx, input, func(page *backup.ListFrameworksOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := backup.NewListFrameworksPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Backup Framework sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Backup Frameworks (%s): %w", region, err) } for _, framework := range page.Frameworks { r := ResourceFramework() d := r.Data(nil) - d.SetId(aws.StringValue(framework.FrameworkName)) + d.SetId(aws.ToString(framework.FrameworkName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Backup Framework sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing Backup Frameworks for %s: %w", region, err)) } if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping Backup Frameworks for %s: %w", region, err)) + return fmt.Errorf("error sweeping Backup Frameworks (%s): %w", region, err) } - return sweeperErrs.ErrorOrNil() + return nil } func sweepReportPlan(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("Error getting client: %w", err) + return fmt.Errorf("error getting client: %w", err) } - conn := client.BackupConn(ctx) + conn := client.BackupClient(ctx) input := &backup.ListReportPlansInput{} - var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListReportPlansPagesWithContext(ctx, input, func(page *backup.ListReportPlansOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := backup.NewListReportPlansPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Backup Report Plans sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Backup Report Plans for %s: %w", region, err) } for _, reportPlan := range page.ReportPlans { r := ResourceReportPlan() d := r.Data(nil) - d.SetId(aws.StringValue(reportPlan.ReportPlanName)) + d.SetId(aws.ToString(reportPlan.ReportPlanName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Backup Report Plans sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing Backup Report Plans for %s: %w", region, err)) } if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping Backup Report Plans for %s: %w", region, err)) + return fmt.Errorf("error sweeping Backup Report Plans for %s: %w", region, err) } - return sweeperErrs.ErrorOrNil() + return nil } func sweepVaultLockConfiguration(region string) error { @@ -144,49 +137,41 @@ func sweepVaultLockConfiguration(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("Error getting client: %w", err) + return fmt.Errorf("error getting client: %w", err) } - conn := client.BackupConn(ctx) + conn := client.BackupClient(ctx) sweepResources := make([]sweep.Sweepable, 0) - var errs *multierror.Error - input := &backup.ListBackupVaultsInput{} - err = conn.ListBackupVaultsPagesWithContext(ctx, input, func(page *backup.ListBackupVaultsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := backup.NewListBackupVaultsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Backup Vault Lock Configuration sweep for %s: %s", region, err) + return nil } - for _, vault := range page.BackupVaultList { - if vault == nil { - continue - } + if err != nil { + return fmt.Errorf("error listing Backup Vaults for %s: %w", region, err) + } + for _, vault := range page.BackupVaultList { r := ResourceVaultLockConfiguration() d := r.Data(nil) - d.SetId(aws.StringValue(vault.BackupVaultName)) + d.SetId(aws.ToString(vault.BackupVaultName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing Backup Vaults for %s: %w", region, err)) } if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping Backup Vault Lock Configuration for %s: %w", region, err)) - } - - if awsv1.SkipSweepError(errs.ErrorOrNil()) { - log.Printf("[WARN] Skipping Backup Vault Lock Configuration sweep for %s: %s", region, errs) - return nil + return fmt.Errorf("error sweeping Backup Vault Lock Configuration for %s: %w", region, err) } - return errs.ErrorOrNil() + return nil } func sweepVaultNotifications(region string) error { @@ -194,49 +179,41 @@ func sweepVaultNotifications(region string) error { client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("Error getting client: %w", err) + return fmt.Errorf("error getting client: %w", err) } - conn := client.BackupConn(ctx) + conn := client.BackupClient(ctx) sweepResources := make([]sweep.Sweepable, 0) - var errs *multierror.Error - input := &backup.ListBackupVaultsInput{} - err = conn.ListBackupVaultsPagesWithContext(ctx, input, func(page *backup.ListBackupVaultsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := backup.NewListBackupVaultsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Backup Vault Notifications sweep for %s: %s", region, err) + return nil } - for _, vault := range page.BackupVaultList { - if vault == nil { - continue - } + if err != nil { + return fmt.Errorf("error listing Backup Vaults for %s: %w", region, err) + } + for _, vault := range page.BackupVaultList { r := ResourceVaultNotifications() d := r.Data(nil) - d.SetId(aws.StringValue(vault.BackupVaultName)) + d.SetId(aws.ToString(vault.BackupVaultName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing Backup Vaults for %s: %w", region, err)) } if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping Backup Vault Notifications for %s: %w", region, err)) + return fmt.Errorf("error sweeping Backup Vault Notifications for %s: %w", region, err) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { - log.Printf("[WARN] Skipping Backup Vault Notifications sweep for %s: %s", region, errs) - return nil - } - - return errs.ErrorOrNil() + return nil } func sweepVaultPolicies(region string) error { @@ -245,41 +222,38 @@ func sweepVaultPolicies(region string) error { if err != nil { return fmt.Errorf("Error getting client: %w", err) } - conn := client.BackupConn(ctx) + conn := client.BackupClient(ctx) input := &backup.ListBackupVaultsInput{} - var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListBackupVaultsPagesWithContext(ctx, input, func(page *backup.ListBackupVaultsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := backup.NewListBackupVaultsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Backup Vault Policies sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Backup Vaults for %s: %w", region, err) } for _, vault := range page.BackupVaultList { r := ResourceVaultPolicy() d := r.Data(nil) - d.SetId(aws.StringValue(vault.BackupVaultName)) + d.SetId(aws.ToString(vault.BackupVaultName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Backup Vault Policies sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing Backup Vaults for %s: %w", region, err)) } if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping Backup Vault Policies for %s: %w", region, err)) + return fmt.Errorf("error sweeping Backup Vault Policies for %s: %w", region, err) } - return sweeperErrs.ErrorOrNil() + return nil } func sweepVaults(region string) error { @@ -289,18 +263,26 @@ func sweepVaults(region string) error { if err != nil { return fmt.Errorf("Error getting client: %w", err) } - conn := client.BackupConn(ctx) + conn := client.BackupClient(ctx) input := &backup.ListBackupVaultsInput{} - var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListBackupVaultsPagesWithContext(ctx, input, func(page *backup.ListBackupVaultsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := backup.NewListBackupVaultsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Backup Vaults sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Backup Vaults for %s: %w", region, err) } for _, vault := range page.BackupVaultList { - name := aws.StringValue(vault.BackupVaultName) + name := aws.ToString(vault.BackupVaultName) // Ignore Default and Automatic EFS Backup Vaults in region (cannot be deleted) if name == "Default" || name == "aws/efs/automatic-backup-vault" { @@ -315,22 +297,11 @@ func sweepVaults(region string) error { sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Backup Vaults sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing Backup Vaults for %s: %w", region, err)) } if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping Backup Vaults for %s: %w", region, err)) + return fmt.Errorf("error sweeping Backup Vaults for %s: %w", region, err) } - return sweeperErrs.ErrorOrNil() + return nil } diff --git a/internal/service/backup/tags_gen.go b/internal/service/backup/tags_gen.go index a68b3ddb9a3..f169bf5c75d 100644 --- a/internal/service/backup/tags_gen.go +++ b/internal/service/backup/tags_gen.go @@ -4,11 +4,9 @@ package backup import ( "context" "fmt" - "maps" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/aws/aws-sdk-go/service/backup/backupiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -20,33 +18,24 @@ import ( // listTags lists backup service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn backupiface.BackupAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *backup.Client, identifier string, optFns ...func(*backup.Options)) (tftags.KeyValueTags, error) { input := &backup.ListTagsInput{ ResourceArn: aws.String(identifier), } - output := make(map[string]*string) - err := conn.ListTagsPagesWithContext(ctx, input, func(page *backup.ListTagsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - maps.Copy(output, page.Tags) - - return !lastPage - }) + output, err := conn.ListTags(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err } - return KeyValueTags(ctx, output), nil + return KeyValueTags(ctx, output.Tags), nil } // ListTags lists backup service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).BackupConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).BackupClient(ctx), identifier) if err != nil { return err @@ -59,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns backup service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from backup service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns backup service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -84,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets backup service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -93,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates backup service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn backupiface.BackupAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *backup.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*backup.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -104,10 +93,10 @@ func updateTags(ctx context.Context, conn backupiface.BackupAPI, identifier stri if len(removedTags) > 0 { input := &backup.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeyList: aws.StringSlice(removedTags.Keys()), + TagKeyList: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -122,7 +111,7 @@ func updateTags(ctx context.Context, conn backupiface.BackupAPI, identifier stri Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -135,5 +124,5 @@ func updateTags(ctx context.Context, conn backupiface.BackupAPI, identifier stri // UpdateTags updates backup service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).BackupConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).BackupClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/backup/vault.go b/internal/service/backup/vault.go index 15aa619c3df..4c104c552fa 100644 --- a/internal/service/backup/vault.go +++ b/internal/service/backup/vault.go @@ -11,13 +11,15 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -82,7 +84,7 @@ func ResourceVault() *schema.Resource { func resourceVaultCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) name := d.Get(names.AttrName).(string) input := &backup.CreateBackupVaultInput{ @@ -94,7 +96,7 @@ func resourceVaultCreate(ctx context.Context, d *schema.ResourceData, meta inter input.EncryptionKeyArn = aws.String(v.(string)) } - _, err := conn.CreateBackupVaultWithContext(ctx, input) + _, err := conn.CreateBackupVault(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Backup Vault (%s): %s", name, err) @@ -107,9 +109,9 @@ func resourceVaultCreate(ctx context.Context, d *schema.ResourceData, meta inter func resourceVaultRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) - output, err := FindVaultByName(ctx, conn, d.Id()) + output, err := findVaultByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Backup Vault (%s) not found, removing from state", d.Id()) @@ -139,7 +141,7 @@ func resourceVaultUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceVaultDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) if d.Get(names.AttrForceDestroy).(bool) { input := &backup.ListRecoveryPointsByBackupVaultInput{ @@ -147,51 +149,47 @@ func resourceVaultDelete(ctx context.Context, d *schema.ResourceData, meta inter } var errs []error - err := conn.ListRecoveryPointsByBackupVaultPagesWithContext(ctx, input, func(page *backup.ListRecoveryPointsByBackupVaultOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := backup.NewListRecoveryPointsByBackupVaultPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "listing Backup Vault (%s) recovery points: %s", d.Id(), err) + } + + if err := errors.Join(errs...); err != nil { + return sdkdiag.AppendErrorf(diags, "deleting Backup Vault (%s): %s", d.Id(), err) } for _, v := range page.RecoveryPoints { - recoveryPointARN := aws.StringValue(v.RecoveryPointArn) + recoveryPointARN := aws.ToString(v.RecoveryPointArn) log.Printf("[DEBUG] Deleting Backup Vault recovery point: %s", recoveryPointARN) - _, err := conn.DeleteRecoveryPointWithContext(ctx, &backup.DeleteRecoveryPointInput{ + _, err := conn.DeleteRecoveryPoint(ctx, &backup.DeleteRecoveryPointInput{ BackupVaultName: aws.String(d.Id()), RecoveryPointArn: aws.String(recoveryPointARN), }) if err != nil { errs = append(errs, fmt.Errorf("deleting recovery point (%s): %w", recoveryPointARN, err)) - continue } if _, err := waitRecoveryPointDeleted(ctx, conn, d.Id(), recoveryPointARN, d.Timeout(schema.TimeoutDelete)); err != nil { errs = append(errs, fmt.Errorf("waiting for recovery point (%s) delete: %w", recoveryPointARN, err)) - continue } } - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing Backup Vault (%s) recovery points: %s", d.Id(), err) - } - - if err := errors.Join(errs...); err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Backup Vault (%s): %s", d.Id(), err) } } log.Printf("[DEBUG] Deleting Backup Vault: %s", d.Id()) - _, err := conn.DeleteBackupVaultWithContext(ctx, &backup.DeleteBackupVaultInput{ + _, err := conn.DeleteBackupVault(ctx, &backup.DeleteBackupVaultInput{ BackupVaultName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) || tfawserr.ErrCodeEquals(err, errCodeAccessDeniedException) { return diags } diff --git a/internal/service/backup/vault_data_source.go b/internal/service/backup/vault_data_source.go index 931d569ecfa..fca00796f5c 100644 --- a/internal/service/backup/vault_data_source.go +++ b/internal/service/backup/vault_data_source.go @@ -6,8 +6,8 @@ package backup import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -45,7 +45,7 @@ func DataSourceVault() *schema.Resource { func dataSourceVaultRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig name := d.Get(names.AttrName).(string) @@ -53,18 +53,18 @@ func dataSourceVaultRead(ctx context.Context, d *schema.ResourceData, meta inter BackupVaultName: aws.String(name), } - resp, err := conn.DescribeBackupVaultWithContext(ctx, input) + resp, err := conn.DescribeBackupVault(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "getting Backup Vault: %s", err) } - d.SetId(aws.StringValue(resp.BackupVaultName)) + d.SetId(aws.ToString(resp.BackupVaultName)) d.Set(names.AttrARN, resp.BackupVaultArn) d.Set(names.AttrKMSKeyARN, resp.EncryptionKeyArn) d.Set(names.AttrName, resp.BackupVaultName) d.Set("recovery_points", resp.NumberOfRecoveryPoints) - tags, err := listTags(ctx, conn, aws.StringValue(resp.BackupVaultArn)) + tags, err := listTags(ctx, conn, aws.ToString(resp.BackupVaultArn)) if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for Backup Vault (%s): %s", name, err) } diff --git a/internal/service/backup/vault_lock_configuration.go b/internal/service/backup/vault_lock_configuration.go index 3db045432b0..3a042a7d340 100644 --- a/internal/service/backup/vault_lock_configuration.go +++ b/internal/service/backup/vault_lock_configuration.go @@ -8,13 +8,14 @@ import ( "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -62,7 +63,7 @@ func ResourceVaultLockConfiguration() *schema.Resource { func resourceVaultLockConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) name := d.Get("backup_vault_name").(string) input := &backup.PutBackupVaultLockConfigurationInput{ @@ -81,7 +82,7 @@ func resourceVaultLockConfigurationCreate(ctx context.Context, d *schema.Resourc input.MinRetentionDays = aws.Int64(int64(v.(int))) } - _, err := conn.PutBackupVaultLockConfigurationWithContext(ctx, input) + _, err := conn.PutBackupVaultLockConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Backup Vault Lock Configuration (%s): %s", name, err) @@ -94,9 +95,9 @@ func resourceVaultLockConfigurationCreate(ctx context.Context, d *schema.Resourc func resourceVaultLockConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) - output, err := FindVaultByName(ctx, conn, d.Id()) + output, err := findVaultByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Backup Vault Lock Configuration (%s) not found, removing from state", d.Id()) @@ -118,14 +119,14 @@ func resourceVaultLockConfigurationRead(ctx context.Context, d *schema.ResourceD func resourceVaultLockConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) log.Printf("[DEBUG] Deleting Backup Vault Lock Configuration: %s", d.Id()) - _, err := conn.DeleteBackupVaultLockConfigurationWithContext(ctx, &backup.DeleteBackupVaultLockConfigurationInput{ + _, err := conn.DeleteBackupVaultLockConfiguration(ctx, &backup.DeleteBackupVaultLockConfigurationInput{ BackupVaultName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } diff --git a/internal/service/backup/vault_lock_configuration_test.go b/internal/service/backup/vault_lock_configuration_test.go index 76d2f1c6429..31369fb900f 100644 --- a/internal/service/backup/vault_lock_configuration_test.go +++ b/internal/service/backup/vault_lock_configuration_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/service/backup" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -77,7 +77,7 @@ func TestAccBackupVaultLockConfiguration_disappears(t *testing.T) { func testAccCheckVaultLockConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_backup_vault_lock_configuration" { continue @@ -111,7 +111,7 @@ func testAccCheckVaultLockConfigurationExists(ctx context.Context, name string, return fmt.Errorf("No Backup Vault Lock Configuration ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) output, err := tfbackup.FindVaultByName(ctx, conn, rs.Primary.ID) diff --git a/internal/service/backup/vault_notifications.go b/internal/service/backup/vault_notifications.go index d21d3603519..8e4912ecad1 100644 --- a/internal/service/backup/vault_notifications.go +++ b/internal/service/backup/vault_notifications.go @@ -8,13 +8,15 @@ import ( "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -49,8 +51,8 @@ func ResourceVaultNotifications() *schema.Resource { Required: true, ForceNew: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(backup.VaultEvent_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.BackupVaultEvent](), }, }, "backup_vault_arn": { @@ -63,15 +65,15 @@ func ResourceVaultNotifications() *schema.Resource { func resourceVaultNotificationsCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.PutBackupVaultNotificationsInput{ BackupVaultName: aws.String(d.Get("backup_vault_name").(string)), SNSTopicArn: aws.String(d.Get(names.AttrSNSTopicARN).(string)), - BackupVaultEvents: flex.ExpandStringSet(d.Get("backup_vault_events").(*schema.Set)), + BackupVaultEvents: flex.ExpandStringyValueSet[awstypes.BackupVaultEvent](d.Get("backup_vault_events").(*schema.Set)), } - _, err := conn.PutBackupVaultNotificationsWithContext(ctx, input) + _, err := conn.PutBackupVaultNotifications(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Backup Vault Notifications (%s): %s", d.Id(), err) } @@ -83,14 +85,14 @@ func resourceVaultNotificationsCreate(ctx context.Context, d *schema.ResourceDat func resourceVaultNotificationsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.GetBackupVaultNotificationsInput{ BackupVaultName: aws.String(d.Id()), } - resp, err := conn.GetBackupVaultNotificationsWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + resp, err := conn.GetBackupVaultNotifications(ctx, input) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { log.Printf("[WARN] Backup Vault Notifcations %s not found, removing from state", d.Id()) d.SetId("") return diags @@ -102,7 +104,7 @@ func resourceVaultNotificationsRead(ctx context.Context, d *schema.ResourceData, d.Set("backup_vault_name", resp.BackupVaultName) d.Set(names.AttrSNSTopicARN, resp.SNSTopicArn) d.Set("backup_vault_arn", resp.BackupVaultArn) - if err := d.Set("backup_vault_events", flex.FlattenStringSet(resp.BackupVaultEvents)); err != nil { + if err := d.Set("backup_vault_events", flex.FlattenStringyValueSet(resp.BackupVaultEvents)); err != nil { return sdkdiag.AppendErrorf(diags, "setting backup_vault_events: %s", err) } @@ -111,15 +113,15 @@ func resourceVaultNotificationsRead(ctx context.Context, d *schema.ResourceData, func resourceVaultNotificationsDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) input := &backup.DeleteBackupVaultNotificationsInput{ BackupVaultName: aws.String(d.Id()), } - _, err := conn.DeleteBackupVaultNotificationsWithContext(ctx, input) + _, err := conn.DeleteBackupVaultNotifications(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } return sdkdiag.AppendErrorf(diags, "deleting Backup Vault Notifications (%s): %s", d.Id(), err) diff --git a/internal/service/backup/vault_notifications_test.go b/internal/service/backup/vault_notifications_test.go index 79059f9485b..867ac62a829 100644 --- a/internal/service/backup/vault_notifications_test.go +++ b/internal/service/backup/vault_notifications_test.go @@ -8,8 +8,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -73,7 +73,7 @@ func TestAccBackupVaultNotification_disappears(t *testing.T) { func testAccCheckVaultNotificationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_backup_vault_notifications" { continue @@ -83,10 +83,10 @@ func testAccCheckVaultNotificationDestroy(ctx context.Context) resource.TestChec BackupVaultName: aws.String(rs.Primary.ID), } - resp, err := conn.GetBackupVaultNotificationsWithContext(ctx, input) + resp, err := conn.GetBackupVaultNotifications(ctx, input) if err == nil { - if aws.StringValue(resp.BackupVaultName) == rs.Primary.ID { + if aws.ToString(resp.BackupVaultName) == rs.Primary.ID { return fmt.Errorf("Backup Plan notifications '%s' was not deleted properly", rs.Primary.ID) } } @@ -103,11 +103,11 @@ func testAccCheckVaultNotificationExists(ctx context.Context, name string, vault return fmt.Errorf("Not found: %s", name) } - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) params := &backup.GetBackupVaultNotificationsInput{ BackupVaultName: aws.String(rs.Primary.ID), } - resp, err := conn.GetBackupVaultNotificationsWithContext(ctx, params) + resp, err := conn.GetBackupVaultNotifications(ctx, params) if err != nil { return err } diff --git a/internal/service/backup/vault_policy.go b/internal/service/backup/vault_policy.go index e62a0f96ab3..59bc4b69471 100644 --- a/internal/service/backup/vault_policy.go +++ b/internal/service/backup/vault_policy.go @@ -7,14 +7,16 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -59,7 +61,7 @@ func ResourceVaultPolicy() *schema.Resource { func resourceVaultPolicyPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) policy, err := structure.NormalizeJsonString(d.Get(names.AttrPolicy).(string)) @@ -75,7 +77,7 @@ func resourceVaultPolicyPut(ctx context.Context, d *schema.ResourceData, meta in _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, iamPropagationTimeout, func() (interface{}, error) { - return conn.PutBackupVaultAccessPolicyWithContext(ctx, input) + return conn.PutBackupVaultAccessPolicy(ctx, input) }, errCodeInvalidParameterValueException, "Provided principal is not valid", ) @@ -91,9 +93,9 @@ func resourceVaultPolicyPut(ctx context.Context, d *schema.ResourceData, meta in func resourceVaultPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) - output, err := FindVaultAccessPolicyByName(ctx, conn, d.Id()) + output, err := findVaultAccessPolicyByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Backup Vault Policy (%s) not found, removing from state", d.Id()) @@ -108,7 +110,7 @@ func resourceVaultPolicyRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("backup_vault_arn", output.BackupVaultArn) d.Set("backup_vault_name", output.BackupVaultName) - policyToSet, err := verify.SecondJSONUnlessEquivalent(d.Get(names.AttrPolicy).(string), aws.StringValue(output.Policy)) + policyToSet, err := verify.SecondJSONUnlessEquivalent(d.Get(names.AttrPolicy).(string), aws.ToString(output.Policy)) if err != nil { return sdkdiag.AppendErrorf(diags, "while setting policy (%s), encountered: %s", policyToSet, err) @@ -127,14 +129,14 @@ func resourceVaultPolicyRead(ctx context.Context, d *schema.ResourceData, meta i func resourceVaultPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BackupConn(ctx) + conn := meta.(*conns.AWSClient).BackupClient(ctx) log.Printf("[DEBUG] Deleting Backup Vault Policy (%s)", d.Id()) - _, err := conn.DeleteBackupVaultAccessPolicyWithContext(ctx, &backup.DeleteBackupVaultAccessPolicyInput{ + _, err := conn.DeleteBackupVaultAccessPolicy(ctx, &backup.DeleteBackupVaultAccessPolicyInput{ BackupVaultName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, backup.ErrCodeResourceNotFoundException) || tfawserr.ErrCodeEquals(err, errCodeAccessDeniedException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) || tfawserr.ErrCodeEquals(err, errCodeAccessDeniedException) { return diags } diff --git a/internal/service/backup/vault_policy_test.go b/internal/service/backup/vault_policy_test.go index 015cd747594..d17b6c0403f 100644 --- a/internal/service/backup/vault_policy_test.go +++ b/internal/service/backup/vault_policy_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/service/backup" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -154,7 +154,7 @@ func TestAccBackupVaultPolicy_ignoreEquivalent(t *testing.T) { func testAccCheckVaultPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_backup_vault_policy" { @@ -189,7 +189,7 @@ func testAccCheckVaultPolicyExists(ctx context.Context, name string, vault *back return fmt.Errorf("No Backup Vault Policy ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) output, err := tfbackup.FindVaultAccessPolicyByName(ctx, conn, rs.Primary.ID) @@ -205,6 +205,10 @@ func testAccCheckVaultPolicyExists(ctx context.Context, name string, vault *back func testAccVaultPolicyConfig_basic(rName string) string { return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +data "aws_partition" "current" {} + resource "aws_backup_vault" "test" { name = %[1]q } @@ -219,7 +223,7 @@ resource "aws_backup_vault_policy" "test" { Sid = "default" Effect = "Allow" Principal = { - AWS = "*" + AWS = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root" } Action = [ "backup:DescribeBackupVault", @@ -240,6 +244,10 @@ resource "aws_backup_vault_policy" "test" { func testAccVaultPolicyConfig_updated(rName string) string { return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +data "aws_partition" "current" {} + resource "aws_backup_vault" "test" { name = %[1]q } @@ -254,7 +262,7 @@ resource "aws_backup_vault_policy" "test" { Sid = "default" Effect = "Allow" Principal = { - AWS = "*" + AWS = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root" } Action = [ "backup:DescribeBackupVault", @@ -276,6 +284,10 @@ resource "aws_backup_vault_policy" "test" { func testAccVaultPolicyConfig_newOrder(rName string) string { return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +data "aws_partition" "current" {} + resource "aws_backup_vault" "test" { name = %[1]q } @@ -290,7 +302,7 @@ resource "aws_backup_vault_policy" "test" { Sid = "default" Effect = "Allow" Principal = { - AWS = "*" + AWS = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root" } Action = [ "backup:DeleteBackupVault", diff --git a/internal/service/backup/vault_test.go b/internal/service/backup/vault_test.go index 29bffed27f4..b8b05490eb0 100644 --- a/internal/service/backup/vault_test.go +++ b/internal/service/backup/vault_test.go @@ -9,9 +9,9 @@ import ( "testing" "time" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/backup" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -223,7 +223,7 @@ func TestAccBackupVault_forceDestroyWithRecoveryPoint(t *testing.T) { func testAccCheckVaultDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_backup_vault" { continue @@ -257,7 +257,7 @@ func testAccCheckVaultExists(ctx context.Context, name string, v *backup.Describ return fmt.Errorf("No Backup Vault ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) output, err := tfbackup.FindVaultByName(ctx, conn, rs.Primary.ID) @@ -274,7 +274,7 @@ func testAccCheckVaultExists(ctx context.Context, name string, v *backup.Describ func testAccCheckRunDynamoDBTableBackupJob(ctx context.Context, rName string) resource.TestCheckFunc { // nosemgrep:ci.backup-in-func-name return func(s *terraform.State) error { client := acctest.Provider.Meta().(*conns.AWSClient) - conn := client.BackupConn(ctx) + conn := client.BackupClient(ctx) iamRoleARN := arn.ARN{ Partition: client.Partition, @@ -289,7 +289,7 @@ func testAccCheckRunDynamoDBTableBackupJob(ctx context.Context, rName string) re AccountID: client.AccountID, Resource: fmt.Sprintf("table/%s", rName), }.String() - output, err := conn.StartBackupJobWithContext(ctx, &backup.StartBackupJobInput{ + output, err := conn.StartBackupJob(ctx, &backup.StartBackupJobInput{ BackupVaultName: aws.String(rName), IamRoleArn: aws.String(iamRoleARN), ResourceArn: aws.String(resourceARN), @@ -299,7 +299,7 @@ func testAccCheckRunDynamoDBTableBackupJob(ctx context.Context, rName string) re return fmt.Errorf("error starting Backup Job: %w", err) } - jobID := aws.StringValue(output.BackupJobId) + jobID := aws.ToString(output.BackupJobId) _, err = tfbackup.WaitJobCompleted(ctx, conn, jobID, 10*time.Minute) @@ -312,11 +312,11 @@ func testAccCheckRunDynamoDBTableBackupJob(ctx context.Context, rName string) re } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).BackupConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BackupClient(ctx) input := &backup.ListBackupVaultsInput{} - _, err := conn.ListBackupVaultsWithContext(ctx, input) + _, err := conn.ListBackupVaults(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/backup/wait.go b/internal/service/backup/wait.go index 4a98129440d..79d7652ec28 100644 --- a/internal/service/backup/wait.go +++ b/internal/service/backup/wait.go @@ -8,9 +8,11 @@ import ( "errors" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/backup" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/backup" + awstypes "github.com/aws/aws-sdk-go-v2/service/backup/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -19,10 +21,10 @@ const ( propagationTimeout = 2 * time.Minute ) -func WaitJobCompleted(ctx context.Context, conn *backup.Backup, id string, timeout time.Duration) (*backup.DescribeBackupJobOutput, error) { +func WaitJobCompleted(ctx context.Context, conn *backup.Client, id string, timeout time.Duration) (*backup.DescribeBackupJobOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{backup.JobStateCreated, backup.JobStatePending, backup.JobStateRunning, backup.JobStateAborting}, - Target: []string{backup.JobStateCompleted}, + Pending: enum.Slice(awstypes.BackupJobStateCreated, awstypes.BackupJobStatePending, awstypes.BackupJobStateRunning, awstypes.BackupJobStateAborting), + Target: enum.Slice(awstypes.BackupJobStateCompleted), Refresh: statusJobState(ctx, conn, id), Timeout: timeout, } @@ -30,7 +32,7 @@ func WaitJobCompleted(ctx context.Context, conn *backup.Backup, id string, timeo outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*backup.DescribeBackupJobOutput); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusMessage))) + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusMessage))) return output, err } @@ -38,7 +40,7 @@ func WaitJobCompleted(ctx context.Context, conn *backup.Backup, id string, timeo return nil, err } -func waitFrameworkCreated(ctx context.Context, conn *backup.Backup, id string, timeout time.Duration) (*backup.DescribeFrameworkOutput, error) { +func waitFrameworkCreated(ctx context.Context, conn *backup.Client, id string, timeout time.Duration) (*backup.DescribeFrameworkOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{frameworkStatusCreationInProgress}, Target: []string{frameworkStatusCompleted, frameworkStatusFailed}, @@ -55,7 +57,7 @@ func waitFrameworkCreated(ctx context.Context, conn *backup.Backup, id string, t return nil, err } -func waitFrameworkUpdated(ctx context.Context, conn *backup.Backup, id string, timeout time.Duration) (*backup.DescribeFrameworkOutput, error) { +func waitFrameworkUpdated(ctx context.Context, conn *backup.Client, id string, timeout time.Duration) (*backup.DescribeFrameworkOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{frameworkStatusUpdateInProgress}, Target: []string{frameworkStatusCompleted, frameworkStatusFailed}, @@ -72,10 +74,10 @@ func waitFrameworkUpdated(ctx context.Context, conn *backup.Backup, id string, t return nil, err } -func waitFrameworkDeleted(ctx context.Context, conn *backup.Backup, id string, timeout time.Duration) (*backup.DescribeFrameworkOutput, error) { +func waitFrameworkDeleted(ctx context.Context, conn *backup.Client, id string, timeout time.Duration) (*backup.DescribeFrameworkOutput, error) { stateConf := &retry.StateChangeConf{ Pending: []string{frameworkStatusDeletionInProgress}, - Target: []string{backup.ErrCodeResourceNotFoundException}, + Target: []string{}, Refresh: statusFramework(ctx, conn, id), Timeout: timeout, } @@ -89,9 +91,9 @@ func waitFrameworkDeleted(ctx context.Context, conn *backup.Backup, id string, t return nil, err } -func waitRecoveryPointDeleted(ctx context.Context, conn *backup.Backup, backupVaultName, recoveryPointARN string, timeout time.Duration) (*backup.DescribeRecoveryPointOutput, error) { +func waitRecoveryPointDeleted(ctx context.Context, conn *backup.Client, backupVaultName, recoveryPointARN string, timeout time.Duration) (*backup.DescribeRecoveryPointOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{backup.RecoveryPointStatusDeleting}, + Pending: enum.Slice(awstypes.RecoveryPointStatusDeleting), Target: []string{}, Refresh: statusRecoveryPoint(ctx, conn, backupVaultName, recoveryPointARN), Timeout: timeout, @@ -100,7 +102,7 @@ func waitRecoveryPointDeleted(ctx context.Context, conn *backup.Backup, backupVa outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*backup.DescribeRecoveryPointOutput); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusMessage))) + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusMessage))) return output, err } diff --git a/internal/service/batch/service_endpoint_resolver_gen.go b/internal/service/batch/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..86b3db123bd --- /dev/null +++ b/internal/service/batch/service_endpoint_resolver_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package batch + +import ( + "context" + "fmt" + "net" + "net/url" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} + +var _ batch_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver batch_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: batch_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params batch_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up batch endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*batch_sdkv2.Options) { + return func(o *batch_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/batch/service_endpoints_gen_test.go b/internal/service/batch/service_endpoints_gen_test.go index cea6fe288c9..fb4d05255e8 100644 --- a/internal/service/batch/service_endpoints_gen_test.go +++ b/internal/service/batch/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -255,24 +257,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }) } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := batch_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), batch_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := batch_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), batch_sdkv2.EndpointParameters{ @@ -280,14 +282,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -364,16 +366,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/batch/service_package_gen.go b/internal/service/batch/service_package_gen.go index 2a317f3a7e9..438ef0d5a39 100644 --- a/internal/service/batch/service_package_gen.go +++ b/internal/service/batch/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package batch @@ -8,7 +8,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" batch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/batch" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" batch_sdkv1 "github.com/aws/aws-sdk-go/service/batch" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -107,11 +106,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*b "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return batch_sdkv1.New(sess.Copy(&cfg)), nil @@ -121,19 +117,10 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*b func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*batch_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return batch_sdkv2.NewFromConfig(cfg, func(o *batch_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return batch_sdkv2.NewFromConfig(cfg, + batch_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/bcmdataexports/service_endpoint_resolver_gen.go b/internal/service/bcmdataexports/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f50188abbe5 --- /dev/null +++ b/internal/service/bcmdataexports/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package bcmdataexports + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + bcmdataexports_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bcmdataexports" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ bcmdataexports_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver bcmdataexports_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: bcmdataexports_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params bcmdataexports_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up bcmdataexports endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*bcmdataexports_sdkv2.Options) { + return func(o *bcmdataexports_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/bcmdataexports/service_endpoints_gen_test.go b/internal/service/bcmdataexports/service_endpoints_gen_test.go index c461a4ae456..261364a9a92 100644 --- a/internal/service/bcmdataexports/service_endpoints_gen_test.go +++ b/internal/service/bcmdataexports/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := bcmdataexports_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), bcmdataexports_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := bcmdataexports_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), bcmdataexports_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/bcmdataexports/service_package_gen.go b/internal/service/bcmdataexports/service_package_gen.go index fabdc192f60..8d4ce2ea1e1 100644 --- a/internal/service/bcmdataexports/service_package_gen.go +++ b/internal/service/bcmdataexports/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package bcmdataexports @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" bcmdataexports_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bcmdataexports" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -47,19 +46,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*bcmdataexports_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return bcmdataexports_sdkv2.NewFromConfig(cfg, func(o *bcmdataexports_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return bcmdataexports_sdkv2.NewFromConfig(cfg, + bcmdataexports_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/bedrock/bedrock_test.go b/internal/service/bedrock/bedrock_test.go index d3a68829748..2812ea7a35c 100644 --- a/internal/service/bedrock/bedrock_test.go +++ b/internal/service/bedrock/bedrock_test.go @@ -13,6 +13,17 @@ func TestAccBedrock_serial(t *testing.T) { t.Parallel() testCases := map[string]map[string]func(t *testing.T){ + // Model customization has a non-adjustable maximum concurrency of 2 + "CustomModel": { + acctest.CtBasic: testAccBedrockCustomModel_basic, + acctest.CtDisappears: testAccBedrockCustomModel_disappears, + "tags": testAccBedrockCustomModel_tags, + "kmsKey": testAccBedrockCustomModel_kmsKey, + "validationDataConfig": testAccBedrockCustomModel_validationDataConfig, + "validationDataConfigWaitForCompletion": testAccBedrockCustomModel_validationDataConfigWaitForCompletion, + "vpcConfig": testAccBedrockCustomModel_vpcConfig, + "dataSourceBasic": testAccBedrockCustomModelDataSource_basic, + }, "ModelInvocationLoggingConfiguration": { acctest.CtBasic: testAccModelInvocationLoggingConfiguration_basic, acctest.CtDisappears: testAccModelInvocationLoggingConfiguration_disappears, diff --git a/internal/service/bedrock/custom_model_data_source_test.go b/internal/service/bedrock/custom_model_data_source_test.go index c4a51f2cbce..e4c6276d73e 100644 --- a/internal/service/bedrock/custom_model_data_source_test.go +++ b/internal/service/bedrock/custom_model_data_source_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccBedrockCustomModelDataSource_basic(t *testing.T) { +func testAccBedrockCustomModelDataSource_basic(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -24,7 +24,7 @@ func TestAccBedrockCustomModelDataSource_basic(t *testing.T) { datasourceName := "data.aws_bedrock_custom_model.test" var v bedrock.GetModelCustomizationJobOutput - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/bedrock/custom_model_test.go b/internal/service/bedrock/custom_model_test.go index c95f41682e5..85164183c58 100644 --- a/internal/service/bedrock/custom_model_test.go +++ b/internal/service/bedrock/custom_model_test.go @@ -21,13 +21,13 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccBedrockCustomModel_basic(t *testing.T) { +func testAccBedrockCustomModel_basic(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrock_custom_model.test" var v bedrock.GetModelCustomizationJobOutput - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -72,13 +72,13 @@ func TestAccBedrockCustomModel_basic(t *testing.T) { }) } -func TestAccBedrockCustomModel_disappears(t *testing.T) { +func testAccBedrockCustomModel_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrock_custom_model.test" var v bedrock.GetModelCustomizationJobOutput - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -96,13 +96,13 @@ func TestAccBedrockCustomModel_disappears(t *testing.T) { }) } -func TestAccBedrockCustomModel_tags(t *testing.T) { +func testAccBedrockCustomModel_tags(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrock_custom_model.test" var v bedrock.GetModelCustomizationJobOutput - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -143,13 +143,13 @@ func TestAccBedrockCustomModel_tags(t *testing.T) { }) } -func TestAccBedrockCustomModel_kmsKey(t *testing.T) { +func testAccBedrockCustomModel_kmsKey(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrock_custom_model.test" var v bedrock.GetModelCustomizationJobOutput - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -172,13 +172,13 @@ func TestAccBedrockCustomModel_kmsKey(t *testing.T) { }) } -func TestAccBedrockCustomModel_validationDataConfig(t *testing.T) { +func testAccBedrockCustomModel_validationDataConfig(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrock_custom_model.test" var v bedrock.GetModelCustomizationJobOutput - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -203,7 +203,7 @@ func TestAccBedrockCustomModel_validationDataConfig(t *testing.T) { }) } -func TestAccBedrockCustomModel_validationDataConfigWaitForCompletion(t *testing.T) { +func testAccBedrockCustomModel_validationDataConfigWaitForCompletion(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -213,7 +213,7 @@ func TestAccBedrockCustomModel_validationDataConfigWaitForCompletion(t *testing. resourceName := "aws_bedrock_custom_model.test" var v bedrock.GetModelCustomizationJobOutput - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -251,13 +251,13 @@ func TestAccBedrockCustomModel_validationDataConfigWaitForCompletion(t *testing. }) } -func TestAccBedrockCustomModel_vpcConfig(t *testing.T) { +func testAccBedrockCustomModel_vpcConfig(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_bedrock_custom_model.test" var v bedrock.GetModelCustomizationJobOutput - resource.ParallelTest(t, resource.TestCase{ + resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.BedrockEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.BedrockServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/bedrock/service_endpoint_resolver_gen.go b/internal/service/bedrock/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..d6fea1353f1 --- /dev/null +++ b/internal/service/bedrock/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package bedrock + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + bedrock_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrock" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ bedrock_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver bedrock_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: bedrock_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params bedrock_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up bedrock endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*bedrock_sdkv2.Options) { + return func(o *bedrock_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/bedrock/service_endpoints_gen_test.go b/internal/service/bedrock/service_endpoints_gen_test.go index f6f2d64678b..a41897029ca 100644 --- a/internal/service/bedrock/service_endpoints_gen_test.go +++ b/internal/service/bedrock/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := bedrock_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), bedrock_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := bedrock_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), bedrock_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/bedrock/service_package_gen.go b/internal/service/bedrock/service_package_gen.go index 3a1e99db950..1ee0b19ee2b 100644 --- a/internal/service/bedrock/service_package_gen.go +++ b/internal/service/bedrock/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package bedrock @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" bedrock_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrock" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -75,19 +74,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*bedrock_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return bedrock_sdkv2.NewFromConfig(cfg, func(o *bedrock_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return bedrock_sdkv2.NewFromConfig(cfg, + bedrock_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/bedrockagent/agent_action_group.go b/internal/service/bedrockagent/agent_action_group.go index 62a40042e55..cb86b3aaf7e 100644 --- a/internal/service/bedrockagent/agent_action_group.go +++ b/internal/service/bedrockagent/agent_action_group.go @@ -14,6 +14,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/bedrockagent/types" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -169,27 +170,6 @@ func (r *agentActionGroupResource) Create(ctx context.Context, request resource. return } - // AutoFlEx doesn't yet handle union types. - if !data.ActionGroupExecutor.IsNull() { - actionGroupExecutorData, diags := data.ActionGroupExecutor.ToPtr(ctx) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } - - input.ActionGroupExecutor = expandActionGroupExecutor(ctx, actionGroupExecutorData) - } - - if !data.APISchema.IsNull() { - apiSchemaData, diags := data.APISchema.ToPtr(ctx) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } - - input.ApiSchema = expandAPISchema(ctx, apiSchemaData) - } - output, err := conn.CreateAgentActionGroup(ctx, input) if err != nil { @@ -273,27 +253,6 @@ func (r *agentActionGroupResource) Update(ctx context.Context, request resource. return } - // AutoFlEx doesn't yet handle union types. - if !new.ActionGroupExecutor.IsNull() { - actionGroupExecutorData, diags := new.ActionGroupExecutor.ToPtr(ctx) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } - - input.ActionGroupExecutor = expandActionGroupExecutor(ctx, actionGroupExecutorData) - } - - if !new.APISchema.IsNull() { - apiSchemaData, diags := new.APISchema.ToPtr(ctx) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } - - input.ApiSchema = expandAPISchema(ctx, apiSchemaData) - } - _, err := conn.UpdateAgentActionGroup(ctx, input) if err != nil { @@ -410,24 +369,54 @@ type actionGroupExecutorModel struct { Lambda fwtypes.ARN `tfsdk:"lambda"` } +var ( + _ fwflex.Expander = actionGroupExecutorModel{} +) + +func (m actionGroupExecutorModel) Expand(ctx context.Context) (result any, diags diag.Diagnostics) { + switch { + case !m.Lambda.IsNull(): + return &awstypes.ActionGroupExecutorMemberLambda{ + Value: m.Lambda.ValueString(), + }, diags + } + + return nil, diags +} + type apiSchemaModel struct { Payload types.String `tfsdk:"payload"` S3 fwtypes.ListNestedObjectValueOf[s3IdentifierModel] `tfsdk:"s3"` } -type s3IdentifierModel struct { - S3BucketName types.String `tfsdk:"s3_bucket_name"` - S3ObjectKey types.String `tfsdk:"s3_object_key"` -} +var ( + _ fwflex.Expander = apiSchemaModel{} +) -func expandActionGroupExecutor(_ context.Context, actionGroupExecutorData *actionGroupExecutorModel) awstypes.ActionGroupExecutor { - if !actionGroupExecutorData.Lambda.IsNull() { - return &awstypes.ActionGroupExecutorMemberLambda{ - Value: actionGroupExecutorData.Lambda.ValueString(), - } +func (m apiSchemaModel) Expand(ctx context.Context) (result any, diags diag.Diagnostics) { + switch { + case !m.Payload.IsNull(): + return &awstypes.APISchemaMemberPayload{ + Value: m.Payload.ValueString(), + }, diags + + case !m.S3.IsNull(): + s3IdentifierModel := fwdiag.Must(m.S3.ToPtr(ctx)) + + return &awstypes.APISchemaMemberS3{ + Value: awstypes.S3Identifier{ + S3BucketName: fwflex.StringFromFramework(ctx, s3IdentifierModel.S3BucketName), + S3ObjectKey: fwflex.StringFromFramework(ctx, s3IdentifierModel.S3ObjectKey), + }, + }, diags } - return nil + return nil, diags +} + +type s3IdentifierModel struct { + S3BucketName types.String `tfsdk:"s3_bucket_name"` + S3ObjectKey types.String `tfsdk:"s3_object_key"` } func flattenActionGroupExecutor(ctx context.Context, apiObject awstypes.ActionGroupExecutor) fwtypes.ListNestedObjectValueOf[actionGroupExecutorModel] { @@ -445,27 +434,6 @@ func flattenActionGroupExecutor(ctx context.Context, apiObject awstypes.ActionGr return fwtypes.NewListNestedObjectValueOfPtrMust(ctx, &actionGroupExecutorData) } -func expandAPISchema(ctx context.Context, apiSchemaData *apiSchemaModel) awstypes.APISchema { - if !apiSchemaData.Payload.IsNull() { - return &awstypes.APISchemaMemberPayload{ - Value: apiSchemaData.Payload.ValueString(), - } - } - - if !apiSchemaData.S3.IsNull() { - s3IdentifierModel := fwdiag.Must(apiSchemaData.S3.ToPtr(ctx)) - - return &awstypes.APISchemaMemberS3{ - Value: awstypes.S3Identifier{ - S3BucketName: fwflex.StringFromFramework(ctx, s3IdentifierModel.S3BucketName), - S3ObjectKey: fwflex.StringFromFramework(ctx, s3IdentifierModel.S3ObjectKey), - }, - } - } - - return nil -} - func flattenAPISchema(ctx context.Context, apiObject awstypes.APISchema) fwtypes.ListNestedObjectValueOf[apiSchemaModel] { if apiObject == nil { return fwtypes.NewListNestedObjectValueOfNull[apiSchemaModel](ctx) diff --git a/internal/service/bedrockagent/service_endpoint_resolver_gen.go b/internal/service/bedrockagent/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..60e5a6f7fc2 --- /dev/null +++ b/internal/service/bedrockagent/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package bedrockagent + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + bedrockagent_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrockagent" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ bedrockagent_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver bedrockagent_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: bedrockagent_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params bedrockagent_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up bedrockagent endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*bedrockagent_sdkv2.Options) { + return func(o *bedrockagent_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/bedrockagent/service_endpoints_gen_test.go b/internal/service/bedrockagent/service_endpoints_gen_test.go index f2b48359c61..32305838fc3 100644 --- a/internal/service/bedrockagent/service_endpoints_gen_test.go +++ b/internal/service/bedrockagent/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := bedrockagent_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), bedrockagent_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := bedrockagent_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), bedrockagent_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/bedrockagent/service_package_gen.go b/internal/service/bedrockagent/service_package_gen.go index f0695149bb7..cdbd786dd01 100644 --- a/internal/service/bedrockagent/service_package_gen.go +++ b/internal/service/bedrockagent/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package bedrockagent @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" bedrockagent_sdkv2 "github.com/aws/aws-sdk-go-v2/service/bedrockagent" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -73,19 +72,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*bedrockagent_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return bedrockagent_sdkv2.NewFromConfig(cfg, func(o *bedrockagent_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return bedrockagent_sdkv2.NewFromConfig(cfg, + bedrockagent_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/budgets/service_endpoint_resolver_gen.go b/internal/service/budgets/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..57ad8f67282 --- /dev/null +++ b/internal/service/budgets/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package budgets + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + budgets_sdkv2 "github.com/aws/aws-sdk-go-v2/service/budgets" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ budgets_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver budgets_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: budgets_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params budgets_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up budgets endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*budgets_sdkv2.Options) { + return func(o *budgets_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/budgets/service_endpoints_gen_test.go b/internal/service/budgets/service_endpoints_gen_test.go index a3d094ead3b..14463e7cfbf 100644 --- a/internal/service/budgets/service_endpoints_gen_test.go +++ b/internal/service/budgets/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := budgets_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), budgets_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := budgets_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), budgets_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/budgets/service_package_gen.go b/internal/service/budgets/service_package_gen.go index 3fdcd2a1c87..dcc002de27d 100644 --- a/internal/service/budgets/service_package_gen.go +++ b/internal/service/budgets/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package budgets @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" budgets_sdkv2 "github.com/aws/aws-sdk-go-v2/service/budgets" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -59,19 +58,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*budgets_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return budgets_sdkv2.NewFromConfig(cfg, func(o *budgets_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return budgets_sdkv2.NewFromConfig(cfg, + budgets_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/ce/service_endpoint_resolver_gen.go b/internal/service/ce/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ec2655a1f54 --- /dev/null +++ b/internal/service/ce/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ce + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + costexplorer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costexplorer" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ costexplorer_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver costexplorer_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: costexplorer_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params costexplorer_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up costexplorer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*costexplorer_sdkv2.Options) { + return func(o *costexplorer_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ce/service_endpoints_gen_test.go b/internal/service/ce/service_endpoints_gen_test.go index de259309c25..d9d1460d882 100644 --- a/internal/service/ce/service_endpoints_gen_test.go +++ b/internal/service/ce/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := costexplorer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), costexplorer_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := costexplorer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), costexplorer_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ce/service_package_gen.go b/internal/service/ce/service_package_gen.go index 3992dbafee5..482009bfad6 100644 --- a/internal/service/ce/service_package_gen.go +++ b/internal/service/ce/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ce @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" costexplorer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costexplorer" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -80,19 +79,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*costexplorer_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return costexplorer_sdkv2.NewFromConfig(cfg, func(o *costexplorer_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return costexplorer_sdkv2.NewFromConfig(cfg, + costexplorer_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/chatbot/service_endpoint_resolver_gen.go b/internal/service/chatbot/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..97545a9c42f --- /dev/null +++ b/internal/service/chatbot/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package chatbot + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + chatbot_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chatbot" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ chatbot_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver chatbot_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: chatbot_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params chatbot_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up chatbot endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*chatbot_sdkv2.Options) { + return func(o *chatbot_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/chatbot/service_endpoints_gen_test.go b/internal/service/chatbot/service_endpoints_gen_test.go index 6afc31fe91e..e022c7a0539 100644 --- a/internal/service/chatbot/service_endpoints_gen_test.go +++ b/internal/service/chatbot/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -243,24 +245,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := chatbot_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), chatbot_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := chatbot_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), chatbot_sdkv2.EndpointParameters{ @@ -268,14 +270,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/chatbot/service_package.go b/internal/service/chatbot/service_package.go index fbe7a9e657f..f7f994d91bd 100644 --- a/internal/service/chatbot/service_package.go +++ b/internal/service/chatbot/service_package.go @@ -17,27 +17,23 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*chatbot.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return chatbot.NewFromConfig(cfg, func(o *chatbot.Options) { - if config["partition"].(string) == names.StandardPartitionID { - // Chatbot endpoint is available only in the 4 regions us-east-2, us-west-2, eu-west-1, and ap-southeast-1. - // If the region from the context is one of those four, then use that region. If not default to us-west-2 - if slices.Contains([]string{names.USEast2RegionID, names.USWest2RegionID, names.EUWest1RegionID, names.APSoutheast1RegionID}, cfg.Region) { - o.Region = cfg.Region - } else { - o.Region = names.USWest2RegionID + return chatbot.NewFromConfig(cfg, + chatbot.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *chatbot.Options) { + if config["partition"].(string) == names.StandardPartitionID { + // Chatbot endpoint is available only in the 4 regions us-east-2, us-west-2, eu-west-1, and ap-southeast-1. + // If the region from the context is one of those four, then use that region. If not default to us-west-2 + if slices.Contains([]string{names.USEast2RegionID, names.USWest2RegionID, names.EUWest1RegionID, names.APSoutheast1RegionID}, cfg.Region) { + o.Region = cfg.Region + } else { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USWest2RegionID, + }) + o.Region = names.USWest2RegionID + } } - } - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - }), nil + }, + ), nil } diff --git a/internal/service/chatbot/service_package_gen.go b/internal/service/chatbot/service_package_gen.go index bfed1d17e95..e720db628d8 100644 --- a/internal/service/chatbot/service_package_gen.go +++ b/internal/service/chatbot/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package chatbot diff --git a/internal/service/chime/service_endpoint_resolver_gen.go b/internal/service/chime/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..b143693a7b4 --- /dev/null +++ b/internal/service/chime/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package chime + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/chime/service_endpoints_gen_test.go b/internal/service/chime/service_endpoints_gen_test.go index bf9f932eca4..9609fc1178f 100644 --- a/internal/service/chime/service_endpoints_gen_test.go +++ b/internal/service/chime/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(chime_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(chime_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/chime/service_package_gen.go b/internal/service/chime/service_package_gen.go index 42058367e2b..23f3610851b 100644 --- a/internal/service/chime/service_package_gen.go +++ b/internal/service/chime/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package chime @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" chime_sdkv1 "github.com/aws/aws-sdk-go/service/chime" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -81,11 +80,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*c "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return chime_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/chimesdkmediapipelines/service_endpoint_resolver_gen.go b/internal/service/chimesdkmediapipelines/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f41b43341cc --- /dev/null +++ b/internal/service/chimesdkmediapipelines/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package chimesdkmediapipelines + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + chimesdkmediapipelines_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ chimesdkmediapipelines_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver chimesdkmediapipelines_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: chimesdkmediapipelines_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params chimesdkmediapipelines_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up chimesdkmediapipelines endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*chimesdkmediapipelines_sdkv2.Options) { + return func(o *chimesdkmediapipelines_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/chimesdkmediapipelines/service_endpoints_gen_test.go b/internal/service/chimesdkmediapipelines/service_endpoints_gen_test.go index 3a8886f8581..88707528d6d 100644 --- a/internal/service/chimesdkmediapipelines/service_endpoints_gen_test.go +++ b/internal/service/chimesdkmediapipelines/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := chimesdkmediapipelines_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), chimesdkmediapipelines_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := chimesdkmediapipelines_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), chimesdkmediapipelines_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/chimesdkmediapipelines/service_package_gen.go b/internal/service/chimesdkmediapipelines/service_package_gen.go index 8f78a51d33a..9ce8ac0b2d5 100644 --- a/internal/service/chimesdkmediapipelines/service_package_gen.go +++ b/internal/service/chimesdkmediapipelines/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package chimesdkmediapipelines @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" chimesdkmediapipelines_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*chimesdkmediapipelines_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return chimesdkmediapipelines_sdkv2.NewFromConfig(cfg, func(o *chimesdkmediapipelines_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return chimesdkmediapipelines_sdkv2.NewFromConfig(cfg, + chimesdkmediapipelines_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/chimesdkvoice/service_endpoint_resolver_gen.go b/internal/service/chimesdkvoice/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f05b10fd2bb --- /dev/null +++ b/internal/service/chimesdkvoice/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package chimesdkvoice + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + chimesdkvoice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chimesdkvoice" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ chimesdkvoice_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver chimesdkvoice_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: chimesdkvoice_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params chimesdkvoice_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up chimesdkvoice endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*chimesdkvoice_sdkv2.Options) { + return func(o *chimesdkvoice_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/chimesdkvoice/service_endpoints_gen_test.go b/internal/service/chimesdkvoice/service_endpoints_gen_test.go index 220802de047..1238fde0ae1 100644 --- a/internal/service/chimesdkvoice/service_endpoints_gen_test.go +++ b/internal/service/chimesdkvoice/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := chimesdkvoice_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), chimesdkvoice_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := chimesdkvoice_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), chimesdkvoice_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/chimesdkvoice/service_package_gen.go b/internal/service/chimesdkvoice/service_package_gen.go index 8f0c383857d..40ba76b8aae 100644 --- a/internal/service/chimesdkvoice/service_package_gen.go +++ b/internal/service/chimesdkvoice/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package chimesdkvoice @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" chimesdkvoice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/chimesdkvoice" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -65,19 +64,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*chimesdkvoice_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return chimesdkvoice_sdkv2.NewFromConfig(cfg, func(o *chimesdkvoice_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return chimesdkvoice_sdkv2.NewFromConfig(cfg, + chimesdkvoice_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cleanrooms/service_endpoint_resolver_gen.go b/internal/service/cleanrooms/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..85d8ecfde4b --- /dev/null +++ b/internal/service/cleanrooms/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cleanrooms + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cleanrooms_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cleanrooms" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cleanrooms_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cleanrooms_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cleanrooms_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cleanrooms_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cleanrooms endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cleanrooms_sdkv2.Options) { + return func(o *cleanrooms_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cleanrooms/service_endpoints_gen_test.go b/internal/service/cleanrooms/service_endpoints_gen_test.go index 029f60a7aca..d199fa3ea51 100644 --- a/internal/service/cleanrooms/service_endpoints_gen_test.go +++ b/internal/service/cleanrooms/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cleanrooms_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cleanrooms_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cleanrooms_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cleanrooms_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cleanrooms/service_package_gen.go b/internal/service/cleanrooms/service_package_gen.go index 410cdd30211..2e32cee494b 100644 --- a/internal/service/cleanrooms/service_package_gen.go +++ b/internal/service/cleanrooms/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cleanrooms @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cleanrooms_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cleanrooms" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -54,19 +53,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cleanrooms_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cleanrooms_sdkv2.NewFromConfig(cfg, func(o *cleanrooms_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cleanrooms_sdkv2.NewFromConfig(cfg, + cleanrooms_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cloud9/service_endpoint_resolver_gen.go b/internal/service/cloud9/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..c84a54f62bf --- /dev/null +++ b/internal/service/cloud9/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloud9 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloud9_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloud9" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloud9_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloud9_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloud9_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloud9_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloud9 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloud9_sdkv2.Options) { + return func(o *cloud9_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloud9/service_endpoints_gen_test.go b/internal/service/cloud9/service_endpoints_gen_test.go index 4eaac93e86b..915a7877f15 100644 --- a/internal/service/cloud9/service_endpoints_gen_test.go +++ b/internal/service/cloud9/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cloud9_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloud9_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cloud9_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloud9_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cloud9/service_package_gen.go b/internal/service/cloud9/service_package_gen.go index e6c936c20ec..a0f0c583f6f 100644 --- a/internal/service/cloud9/service_package_gen.go +++ b/internal/service/cloud9/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloud9 @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cloud9_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloud9" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -53,19 +52,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloud9_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cloud9_sdkv2.NewFromConfig(cfg, func(o *cloud9_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cloud9_sdkv2.NewFromConfig(cfg, + cloud9_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cloudcontrol/service_endpoint_resolver_gen.go b/internal/service/cloudcontrol/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..320a69e3193 --- /dev/null +++ b/internal/service/cloudcontrol/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloudcontrol + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloudcontrol_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudcontrol" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloudcontrol_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloudcontrol_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloudcontrol_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloudcontrol_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloudcontrol endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloudcontrol_sdkv2.Options) { + return func(o *cloudcontrol_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloudcontrol/service_endpoints_gen_test.go b/internal/service/cloudcontrol/service_endpoints_gen_test.go index f9532d3f352..da0057352b9 100644 --- a/internal/service/cloudcontrol/service_endpoints_gen_test.go +++ b/internal/service/cloudcontrol/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cloudcontrol_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudcontrol_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cloudcontrol_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudcontrol_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cloudcontrol/service_package_gen.go b/internal/service/cloudcontrol/service_package_gen.go index 50cf1249b46..f6ec831ebf0 100644 --- a/internal/service/cloudcontrol/service_package_gen.go +++ b/internal/service/cloudcontrol/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloudcontrol @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cloudcontrol_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudcontrol" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -51,19 +50,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloudcontrol_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cloudcontrol_sdkv2.NewFromConfig(cfg, func(o *cloudcontrol_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cloudcontrol_sdkv2.NewFromConfig(cfg, + cloudcontrol_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cloudformation/service_endpoint_resolver_gen.go b/internal/service/cloudformation/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..441c8f8b889 --- /dev/null +++ b/internal/service/cloudformation/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloudformation + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloudformation_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudformation" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloudformation_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloudformation_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloudformation_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloudformation_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloudformation endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloudformation_sdkv2.Options) { + return func(o *cloudformation_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloudformation/service_endpoints_gen_test.go b/internal/service/cloudformation/service_endpoints_gen_test.go index bb5fab7aaa6..20198e3936f 100644 --- a/internal/service/cloudformation/service_endpoints_gen_test.go +++ b/internal/service/cloudformation/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cloudformation_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudformation_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cloudformation_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudformation_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cloudformation/service_package.go b/internal/service/cloudformation/service_package.go index dd34c8c8d90..08663bb7d2a 100644 --- a/internal/service/cloudformation/service_package.go +++ b/internal/service/cloudformation/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/cloudformation" "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,24 +19,16 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloudformation.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return cloudformation.NewFromConfig(cfg, func(o *cloudformation.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*types.OperationInProgressException](err, "Another Operation on StackSet") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return cloudformation.NewFromConfig(cfg, + cloudformation.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *cloudformation.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*types.OperationInProgressException](err, "Another Operation on StackSet") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/cloudformation/service_package_gen.go b/internal/service/cloudformation/service_package_gen.go index 48646010ba1..d47138c21fb 100644 --- a/internal/service/cloudformation/service_package_gen.go +++ b/internal/service/cloudformation/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloudformation diff --git a/internal/service/cloudformation/stack_set_instance.go b/internal/service/cloudformation/stack_set_instance.go index a7edde6fbfa..de0f381e6e5 100644 --- a/internal/service/cloudformation/stack_set_instance.go +++ b/internal/service/cloudformation/stack_set_instance.go @@ -76,14 +76,41 @@ func resourceStackSetInstance() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "organizational_unit_ids": { - Type: schema.TypeSet, - Optional: true, - MinItems: 1, + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + ConflictsWith: []string{names.AttrAccountID}, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringMatch(regexache.MustCompile(`^(ou-[0-9a-z]{4,32}-[0-9a-z]{8,32}|r-[0-9a-z]{4,32})$`), ""), }, }, + "account_filter_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(enum.Slice(awstypes.AccountFilterType.Values("")...), false), + ConflictsWith: []string{names.AttrAccountID}, + }, + "accounts": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + ConflictsWith: []string{names.AttrAccountID}, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidAccountID, + }, + }, + "accounts_url": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + ConflictsWith: []string{names.AttrAccountID}, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`(s3://|http(s?)://).+`), ""), + }, }, }, ConflictsWith: []string{names.AttrAccountID}, @@ -357,7 +384,6 @@ func resourceStackSetInstanceRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "finding CloudFormation StackSet Instance (%s): %s", d.Id(), err) } - d.Set("deployment_targets", flattenDeploymentTargetsFromSlice(orgIDs)) d.Set("stack_instance_summaries", flattenStackInstanceSummaries(summaries)) } @@ -368,7 +394,7 @@ func resourceStackSetInstanceUpdate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).CloudFormationClient(ctx) - if d.HasChanges("deployment_targets", "parameter_overrides", "operation_preferences") { + if d.HasChanges("parameter_overrides", "operation_preferences") { parts, err := flex.ExpandResourceId(d.Id(), stackSetInstanceResourceIDPartCount, false) if err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -388,13 +414,6 @@ func resourceStackSetInstanceUpdate(ctx context.Context, d *schema.ResourceData, input.CallAs = awstypes.CallAs(v.(string)) } - if v, ok := d.GetOk("deployment_targets"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - dt := expandDeploymentTargets(v.([]interface{})) - // reset input Accounts as the API accepts only 1 of Accounts and DeploymentTargets - input.Accounts = nil - input.DeploymentTargets = dt - } - if v, ok := d.GetOk("parameter_overrides"); ok { input.ParameterOverrides = expandParameters(v.(map[string]interface{})) } @@ -560,24 +579,17 @@ func expandDeploymentTargets(tfList []interface{}) *awstypes.DeploymentTargets { if v, ok := tfMap["organizational_unit_ids"].(*schema.Set); ok && v.Len() > 0 { dt.OrganizationalUnitIds = flex.ExpandStringValueSet(v) } - - return dt -} - -// flattenDeployment targets converts a list of organizational units (typically -// parsed from the resource ID) into the Terraform representation of the -// deployment_targets attribute. -func flattenDeploymentTargetsFromSlice(orgIDs []string) []interface{} { - tfList := []interface{}{} - for _, ou := range orgIDs { - tfList = append(tfList, ou) + if v, ok := tfMap["account_filter_type"].(string); ok && len(v) > 0 { + dt.AccountFilterType = awstypes.AccountFilterType(v) } - - m := map[string]interface{}{ - "organizational_unit_ids": tfList, + if v, ok := tfMap["accounts"].(*schema.Set); ok && v.Len() > 0 { + dt.Accounts = flex.ExpandStringValueSet(v) + } + if v, ok := tfMap["accounts_url"].(string); ok && len(v) > 0 { + dt.AccountsUrl = aws.String(v) } - return []interface{}{m} + return dt } func flattenStackInstanceSummaries(apiObject []awstypes.StackInstanceSummary) []interface{} { diff --git a/internal/service/cloudformation/stack_set_instance_test.go b/internal/service/cloudformation/stack_set_instance_test.go index 08ee3a87120..255935248e4 100644 --- a/internal/service/cloudformation/stack_set_instance_test.go +++ b/internal/service/cloudformation/stack_set_instance_test.go @@ -219,6 +219,9 @@ func TestAccCloudFormationStackSetInstance_deploymentTargets(t *testing.T) { testAccCheckStackSetInstanceForOrganizationalUnitExists(ctx, resourceName, stackInstanceSummaries), resource.TestCheckResourceAttr(resourceName, "deployment_targets.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "deployment_targets.0.organizational_unit_ids.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "deployment_targets.0.account_filter_type", "INTERSECTION"), + resource.TestCheckResourceAttr(resourceName, "deployment_targets.0.accounts.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "deployment_targets.0.accounts_url", ""), ), }, { @@ -228,6 +231,7 @@ func TestAccCloudFormationStackSetInstance_deploymentTargets(t *testing.T) { ImportStateVerifyIgnore: []string{ "retain_stack", "call_as", + "deployment_targets", }, }, { @@ -273,6 +277,7 @@ func TestAccCloudFormationStackSetInstance_DeploymentTargets_emptyOU(t *testing. ImportStateVerifyIgnore: []string{ "retain_stack", "call_as", + "deployment_targets", }, }, { @@ -812,6 +817,8 @@ resource "aws_cloudformation_stack_set_instance" "test" { deployment_targets { organizational_unit_ids = [data.aws_organizations_organization.test.roots[0].id] + account_filter_type = "INTERSECTION" + accounts = [data.aws_organizations_organization.test.non_master_accounts[0].id] } stack_set_name = aws_cloudformation_stack_set.test.name diff --git a/internal/service/cloudfront/origin_access_control_data_source.go b/internal/service/cloudfront/origin_access_control_data_source.go new file mode 100644 index 00000000000..258dfa5d210 --- /dev/null +++ b/internal/service/cloudfront/origin_access_control_data_source.go @@ -0,0 +1,104 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudfront + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource(name="Origin Access Control") +func newDataSourceOriginAccessControl(_ context.Context) (datasource.DataSourceWithConfigure, error) { + d := &dataSourceOriginAccessControl{} + + return d, nil +} + +type dataSourceOriginAccessControl struct { + framework.DataSourceWithConfigure +} + +const ( + DSNameOriginAccessControl = "Origin Access Control Data Source" +) + +func (d *dataSourceOriginAccessControl) Metadata(_ context.Context, _ datasource.MetadataRequest, response *datasource.MetadataResponse) { + response.TypeName = "aws_cloudfront_origin_access_control" +} + +func (d *dataSourceOriginAccessControl) Schema(_ context.Context, _ datasource.SchemaRequest, response *datasource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrDescription: schema.StringAttribute{ + Computed: true, + }, + "etag": schema.StringAttribute{ + Computed: true, + }, + names.AttrID: schema.StringAttribute{ + Required: true, + }, + names.AttrName: schema.StringAttribute{ + Computed: true, + }, + "origin_access_control_origin_type": schema.StringAttribute{ + Computed: true, + }, + "signing_behavior": schema.StringAttribute{ + Computed: true, + }, + "signing_protocol": schema.StringAttribute{ + Computed: true, + }, + }, + } +} + +func (d *dataSourceOriginAccessControl) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + conn := d.Meta().CloudFrontClient(ctx) + var data dataSourceOriginAccessControlData + + response.Diagnostics.Append(request.Config.Get(ctx, &data)...) + + if response.Diagnostics.HasError() { + return + } + + output, err := findOriginAccessControlByID(ctx, conn, data.ID.ValueString()) + + if err != nil { + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.CloudFront, create.ErrActionReading, DSNameOriginAccessControl, data.ID.String(), err), + err.Error(), + ) + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output.OriginAccessControl.OriginAccessControlConfig, &data)...) + + if response.Diagnostics.HasError() { + return + } + + data.Etag = fwflex.StringToFramework(ctx, output.ETag) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +type dataSourceOriginAccessControlData struct { + Description types.String `tfsdk:"description"` + Etag types.String `tfsdk:"etag"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + OriginAccessControlOriginType types.String `tfsdk:"origin_access_control_origin_type"` + SigningBehavior types.String `tfsdk:"signing_behavior"` + SigningProtocol types.String `tfsdk:"signing_protocol"` +} diff --git a/internal/service/cloudfront/origin_access_control_data_source_test.go b/internal/service/cloudfront/origin_access_control_data_source_test.go new file mode 100644 index 00000000000..20380ce196e --- /dev/null +++ b/internal/service/cloudfront/origin_access_control_data_source_test.go @@ -0,0 +1,61 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cloudfront_test + +import ( + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccCloudFrontOriginAccessControlDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_cloudfront_origin_access_control.this" + resourceName := "aws_cloudfront_origin_access_control.this" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.CloudFrontEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudFrontServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckOriginAccessControlDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccOriginAccessControlDataSourceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(dataSourceName, "etag"), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrDescription, resourceName, names.AttrDescription), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrID, resourceName, names.AttrID), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrName, resourceName, names.AttrName), + resource.TestCheckResourceAttrPair(dataSourceName, "origin_access_control_origin_type", resourceName, "origin_access_control_origin_type"), + resource.TestCheckResourceAttrPair(dataSourceName, "signing_behavior", resourceName, "signing_behavior"), + resource.TestCheckResourceAttrPair(dataSourceName, "signing_protocol", resourceName, "signing_protocol"), + ), + }, + }, + }) +} + +func testAccOriginAccessControlDataSourceConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_cloudfront_origin_access_control" "this" { + name = %[1]q + description = %[1]q + origin_access_control_origin_type = "s3" + signing_behavior = "always" + signing_protocol = "sigv4" +} + +data "aws_cloudfront_origin_access_control" "this" { + id = aws_cloudfront_origin_access_control.this.id +} +`, rName) +} diff --git a/internal/service/cloudfront/service_endpoint_resolver_gen.go b/internal/service/cloudfront/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ecb4e95c031 --- /dev/null +++ b/internal/service/cloudfront/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloudfront + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloudfront_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudfront" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloudfront_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloudfront_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloudfront_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloudfront_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloudfront endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloudfront_sdkv2.Options) { + return func(o *cloudfront_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloudfront/service_endpoints_gen_test.go b/internal/service/cloudfront/service_endpoints_gen_test.go index 15e0d92aa1b..3f001e4e08a 100644 --- a/internal/service/cloudfront/service_endpoints_gen_test.go +++ b/internal/service/cloudfront/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cloudfront_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudfront_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cloudfront_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudfront_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cloudfront/service_package_gen.go b/internal/service/cloudfront/service_package_gen.go index 52bdf96d2b1..3f8995858ab 100644 --- a/internal/service/cloudfront/service_package_gen.go +++ b/internal/service/cloudfront/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloudfront @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cloudfront_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudfront" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -16,7 +15,12 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newDataSourceOriginAccessControl, + Name: "Origin Access Control", + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { @@ -166,19 +170,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloudfront_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cloudfront_sdkv2.NewFromConfig(cfg, func(o *cloudfront_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cloudfront_sdkv2.NewFromConfig(cfg, + cloudfront_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cloudfrontkeyvaluestore/service_endpoint_resolver_gen.go b/internal/service/cloudfrontkeyvaluestore/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..620ffba3694 --- /dev/null +++ b/internal/service/cloudfrontkeyvaluestore/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloudfrontkeyvaluestore + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloudfrontkeyvaluestore_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloudfrontkeyvaluestore_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloudfrontkeyvaluestore_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloudfrontkeyvaluestore_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloudfrontkeyvaluestore_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloudfrontkeyvaluestore endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloudfrontkeyvaluestore_sdkv2.Options) { + return func(o *cloudfrontkeyvaluestore_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloudfrontkeyvaluestore/service_package_gen.go b/internal/service/cloudfrontkeyvaluestore/service_package_gen.go index 7c236d77a24..3411f7c1329 100644 --- a/internal/service/cloudfrontkeyvaluestore/service_package_gen.go +++ b/internal/service/cloudfrontkeyvaluestore/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloudfrontkeyvaluestore @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cloudfrontkeyvaluestore_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -44,19 +43,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloudfrontkeyvaluestore_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cloudfrontkeyvaluestore_sdkv2.NewFromConfig(cfg, func(o *cloudfrontkeyvaluestore_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cloudfrontkeyvaluestore_sdkv2.NewFromConfig(cfg, + cloudfrontkeyvaluestore_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cloudhsmv2/service_endpoint_resolver_gen.go b/internal/service/cloudhsmv2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..da2305dd18d --- /dev/null +++ b/internal/service/cloudhsmv2/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloudhsmv2 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloudhsmv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudhsmv2" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloudhsmv2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloudhsmv2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloudhsmv2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloudhsmv2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloudhsmv2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloudhsmv2_sdkv2.Options) { + return func(o *cloudhsmv2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloudhsmv2/service_endpoints_gen_test.go b/internal/service/cloudhsmv2/service_endpoints_gen_test.go index c7868f9e1cd..46b19669c04 100644 --- a/internal/service/cloudhsmv2/service_endpoints_gen_test.go +++ b/internal/service/cloudhsmv2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cloudhsmv2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudhsmv2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cloudhsmv2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudhsmv2_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cloudhsmv2/service_package.go b/internal/service/cloudhsmv2/service_package.go index c34eaf6a67a..674210e94d2 100644 --- a/internal/service/cloudhsmv2/service_package.go +++ b/internal/service/cloudhsmv2/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/cloudhsmv2" "github.com/aws/aws-sdk-go-v2/service/cloudhsmv2/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,24 +19,16 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloudhsmv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return cloudhsmv2.NewFromConfig(cfg, func(o *cloudhsmv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*types.CloudHsmInternalFailureException](err, "request was rejected because of an AWS CloudHSM internal failure") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return cloudhsmv2.NewFromConfig(cfg, + cloudhsmv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *cloudhsmv2.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*types.CloudHsmInternalFailureException](err, "request was rejected because of an AWS CloudHSM internal failure") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/cloudhsmv2/service_package_gen.go b/internal/service/cloudhsmv2/service_package_gen.go index d3d61647406..44de4fe0621 100644 --- a/internal/service/cloudhsmv2/service_package_gen.go +++ b/internal/service/cloudhsmv2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloudhsmv2 diff --git a/internal/service/cloudsearch/service_endpoint_resolver_gen.go b/internal/service/cloudsearch/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..8231ab82f37 --- /dev/null +++ b/internal/service/cloudsearch/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloudsearch + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloudsearch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudsearch" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloudsearch_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloudsearch_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloudsearch_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloudsearch_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloudsearch endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloudsearch_sdkv2.Options) { + return func(o *cloudsearch_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloudsearch/service_endpoints_gen_test.go b/internal/service/cloudsearch/service_endpoints_gen_test.go index 28a814f5ab5..0ca66dc3b20 100644 --- a/internal/service/cloudsearch/service_endpoints_gen_test.go +++ b/internal/service/cloudsearch/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cloudsearch_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudsearch_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cloudsearch_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudsearch_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cloudsearch/service_package_gen.go b/internal/service/cloudsearch/service_package_gen.go index e712b71f007..4a2aba2be6e 100644 --- a/internal/service/cloudsearch/service_package_gen.go +++ b/internal/service/cloudsearch/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloudsearch @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cloudsearch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudsearch" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -50,19 +49,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloudsearch_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cloudsearch_sdkv2.NewFromConfig(cfg, func(o *cloudsearch_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cloudsearch_sdkv2.NewFromConfig(cfg, + cloudsearch_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cloudtrail/event_data_store.go b/internal/service/cloudtrail/event_data_store.go index a14b2a848e1..1b5ba95c65c 100644 --- a/internal/service/cloudtrail/event_data_store.go +++ b/internal/service/cloudtrail/event_data_store.go @@ -142,6 +142,12 @@ func resourceEventDataStore() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "billing_mode": { + Type: schema.TypeString, + Optional: true, + Default: types.BillingModeExtendableRetentionPricing, + ValidateDiagFunc: enum.Validate[types.BillingMode](), + }, names.AttrKMSKeyID: { Type: schema.TypeString, Optional: true, @@ -188,6 +194,7 @@ func resourceEventDataStoreCreate(ctx context.Context, d *schema.ResourceData, m name := d.Get(names.AttrName).(string) input := &cloudtrail.CreateEventDataStoreInput{ + BillingMode: types.BillingMode(d.Get("billing_mode").(string)), MultiRegionEnabled: aws.Bool(d.Get("multi_region_enabled").(bool)), Name: aws.String(name), OrganizationEnabled: aws.Bool(d.Get("organization_enabled").(bool)), @@ -240,6 +247,7 @@ func resourceEventDataStoreRead(ctx context.Context, d *schema.ResourceData, met } d.Set(names.AttrARN, output.EventDataStoreArn) d.Set(names.AttrKMSKeyID, output.KmsKeyId) + d.Set("billing_mode", output.BillingMode) d.Set("multi_region_enabled", output.MultiRegionEnabled) d.Set(names.AttrName, output.Name) d.Set("organization_enabled", output.OrganizationEnabled) @@ -262,6 +270,10 @@ func resourceEventDataStoreUpdate(ctx context.Context, d *schema.ResourceData, m input.AdvancedEventSelectors = expandAdvancedEventSelector(d.Get("advanced_event_selector").([]interface{})) } + if d.HasChange("billing_mode") { + input.BillingMode = types.BillingMode(d.Get("billing_mode").(string)) + } + if d.HasChange("multi_region_enabled") { input.MultiRegionEnabled = aws.Bool(d.Get("multi_region_enabled").(bool)) } diff --git a/internal/service/cloudtrail/event_data_store_test.go b/internal/service/cloudtrail/event_data_store_test.go index 41b7e349ac9..2a2f1010aa6 100644 --- a/internal/service/cloudtrail/event_data_store_test.go +++ b/internal/service/cloudtrail/event_data_store_test.go @@ -43,6 +43,7 @@ func TestAccCloudTrailEventDataStore_basic(t *testing.T) { }), resource.TestCheckResourceAttr(resourceName, "advanced_event_selector.0.name", "Default management events"), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "cloudtrail", regexache.MustCompile(`eventdatastore/.+`)), + resource.TestCheckResourceAttr(resourceName, "billing_mode", "EXTENDABLE_RETENTION_PRICING"), resource.TestCheckResourceAttr(resourceName, "multi_region_enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, "organization_enabled", acctest.CtFalse), @@ -60,6 +61,42 @@ func TestAccCloudTrailEventDataStore_basic(t *testing.T) { }) } +func TestAccCloudTrailEventDataStore_billingMode(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cloudtrail_event_data_store.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CloudTrailServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEventDataStoreDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEventDataStoreConfig_billingMode(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEventDataStoreExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "billing_mode", "FIXED_RETENTION_PRICING"), + resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccEventDataStoreConfig_billingModeUpdated(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEventDataStoreExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "billing_mode", "EXTENDABLE_RETENTION_PRICING"), + resource.TestCheckResourceAttr(resourceName, "termination_protection_enabled", acctest.CtFalse), + ), + }, + }, + }) +} + func TestAccCloudTrailEventDataStore_kmsKeyId(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -346,6 +383,28 @@ resource "aws_cloudtrail_event_data_store" "test" { `, rName) } +func testAccEventDataStoreConfig_billingMode(rName string) string { + return fmt.Sprintf(` +resource "aws_cloudtrail_event_data_store" "test" { + name = %[1]q + + billing_mode = "FIXED_RETENTION_PRICING" + termination_protection_enabled = false # For ease of deletion. +} +`, rName) +} + +func testAccEventDataStoreConfig_billingModeUpdated(rName string) string { + return fmt.Sprintf(` +resource "aws_cloudtrail_event_data_store" "test" { + name = %[1]q + + billing_mode = "EXTENDABLE_RETENTION_PRICING" + termination_protection_enabled = false # For ease of deletion. +} +`, rName) +} + func testAccEventDataStoreConfig_kmsKeyId(rName string) string { return fmt.Sprintf(` resource "aws_kms_key" "test" { diff --git a/internal/service/cloudtrail/service_endpoint_resolver_gen.go b/internal/service/cloudtrail/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..4def6ac6f8d --- /dev/null +++ b/internal/service/cloudtrail/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloudtrail + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloudtrail_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudtrail" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloudtrail_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloudtrail_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloudtrail_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloudtrail_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloudtrail endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloudtrail_sdkv2.Options) { + return func(o *cloudtrail_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloudtrail/service_endpoints_gen_test.go b/internal/service/cloudtrail/service_endpoints_gen_test.go index aaacfc1abf5..a6c8e24747c 100644 --- a/internal/service/cloudtrail/service_endpoints_gen_test.go +++ b/internal/service/cloudtrail/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cloudtrail_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudtrail_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cloudtrail_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudtrail_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cloudtrail/service_package_gen.go b/internal/service/cloudtrail/service_package_gen.go index f2fc05bb468..9d995bb1aed 100644 --- a/internal/service/cloudtrail/service_package_gen.go +++ b/internal/service/cloudtrail/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloudtrail @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cloudtrail_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudtrail" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -62,19 +61,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloudtrail_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cloudtrail_sdkv2.NewFromConfig(cfg, func(o *cloudtrail_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cloudtrail_sdkv2.NewFromConfig(cfg, + cloudtrail_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cloudtrail/sweep.go b/internal/service/cloudtrail/sweep.go index 4d5acd464ac..ecd679ebe2b 100644 --- a/internal/service/cloudtrail/sweep.go +++ b/internal/service/cloudtrail/sweep.go @@ -20,6 +20,11 @@ func RegisterSweepers() { Name: "aws_cloudtrail", F: sweepTrails, }) + + resource.AddTestSweepers("aws_cloudtrail_event_data_store", &resource.Sweeper{ + Name: "aws_cloudtrail_event_data_store", + F: sweepEventDataStores, + }) } func sweepTrails(region string) error { @@ -84,3 +89,44 @@ func sweepTrails(region string) error { return nil } + +func sweepEventDataStores(region string) error { + ctx := sweep.Context(region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } + conn := client.CloudTrailClient(ctx) + input := &cloudtrail.ListEventDataStoresInput{} + sweepResources := make([]sweep.Sweepable, 0) + + pages := cloudtrail.NewListEventDataStoresPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping CloudTrail Event Data Store sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing CloudTrail Event Data Stores (%s): %w", region, err) + } + + for _, v := range page.EventDataStores { + r := resourceEventDataStore() + d := r.Data(nil) + d.SetId(aws.ToString(v.EventDataStoreArn)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping CloudTrail Event Data Stores (%s): %w", region, err) + } + + return nil +} diff --git a/internal/service/cloudwatch/service_endpoint_resolver_gen.go b/internal/service/cloudwatch/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ef81ebe7107 --- /dev/null +++ b/internal/service/cloudwatch/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cloudwatch + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cloudwatch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cloudwatch_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cloudwatch_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cloudwatch_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cloudwatch_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cloudwatch endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cloudwatch_sdkv2.Options) { + return func(o *cloudwatch_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cloudwatch/service_endpoints_gen_test.go b/internal/service/cloudwatch/service_endpoints_gen_test.go index f487b873387..4a83208444d 100644 --- a/internal/service/cloudwatch/service_endpoints_gen_test.go +++ b/internal/service/cloudwatch/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cloudwatch_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudwatch_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cloudwatch_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cloudwatch_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cloudwatch/service_package_gen.go b/internal/service/cloudwatch/service_package_gen.go index ff843efc388..9a13c354e3d 100644 --- a/internal/service/cloudwatch/service_package_gen.go +++ b/internal/service/cloudwatch/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cloudwatch @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cloudwatch_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cloudwatch" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -69,19 +68,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cloudwatch_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cloudwatch_sdkv2.NewFromConfig(cfg, func(o *cloudwatch_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cloudwatch_sdkv2.NewFromConfig(cfg, + cloudwatch_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cloudwatch/tags_gen.go b/internal/service/cloudwatch/tags_gen.go index 99a3e883a67..6ad0e590e43 100644 --- a/internal/service/cloudwatch/tags_gen.go +++ b/internal/service/cloudwatch/tags_gen.go @@ -98,12 +98,12 @@ func setTagsOut(ctx context.Context, tags []awstypes.Tag) { } // createTags creates cloudwatch service tags for new resources. -func createTags(ctx context.Context, conn *cloudwatch.Client, identifier string, tags []awstypes.Tag) error { +func createTags(ctx context.Context, conn *cloudwatch.Client, identifier string, tags []awstypes.Tag, optFns ...func(*cloudwatch.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates cloudwatch service tags. diff --git a/internal/service/codeartifact/service_endpoint_resolver_gen.go b/internal/service/codeartifact/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f6d18561c7e --- /dev/null +++ b/internal/service/codeartifact/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codeartifact + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codeartifact_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codeartifact" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codeartifact_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codeartifact_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codeartifact_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codeartifact_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codeartifact endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codeartifact_sdkv2.Options) { + return func(o *codeartifact_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codeartifact/service_endpoints_gen_test.go b/internal/service/codeartifact/service_endpoints_gen_test.go index 35469c2629e..52f947f8927 100644 --- a/internal/service/codeartifact/service_endpoints_gen_test.go +++ b/internal/service/codeartifact/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codeartifact_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codeartifact_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codeartifact_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codeartifact_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/codeartifact/service_package_gen.go b/internal/service/codeartifact/service_package_gen.go index 0cd5c87dada..168cacaca52 100644 --- a/internal/service/codeartifact/service_package_gen.go +++ b/internal/service/codeartifact/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codeartifact @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codeartifact_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codeartifact" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -77,19 +76,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codeartifact_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codeartifact_sdkv2.NewFromConfig(cfg, func(o *codeartifact_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codeartifact_sdkv2.NewFromConfig(cfg, + codeartifact_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/codebuild/project.go b/internal/service/codebuild/project.go index c263aaf3012..68a0493bccd 100644 --- a/internal/service/codebuild/project.go +++ b/internal/service/codebuild/project.go @@ -978,7 +978,11 @@ func resourceProjectUpdate(ctx context.Context, d *schema.ResourceData, meta int } if d.HasChange("concurrent_build_limit") { - input.ConcurrentBuildLimit = aws.Int32(int32(d.Get("concurrent_build_limit").(int))) + if v := int32(d.Get("concurrent_build_limit").(int)); v != 0 { + input.ConcurrentBuildLimit = aws.Int32(v) + } else { + input.ConcurrentBuildLimit = aws.Int32(-1) + } } if d.HasChange(names.AttrDescription) { diff --git a/internal/service/codebuild/project_test.go b/internal/service/codebuild/project_test.go index bd99e065ff3..bc92046f716 100644 --- a/internal/service/codebuild/project_test.go +++ b/internal/service/codebuild/project_test.go @@ -2840,6 +2840,13 @@ func TestAccCodeBuildProject_concurrentBuildLimit(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "concurrent_build_limit", "12"), ), }, + { + Config: testAccProjectConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &project), + resource.TestCheckResourceAttr(resourceName, "concurrent_build_limit", acctest.Ct0), + ), + }, }, }) } diff --git a/internal/service/codebuild/service_endpoint_resolver_gen.go b/internal/service/codebuild/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..3e7182c0523 --- /dev/null +++ b/internal/service/codebuild/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codebuild + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codebuild_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codebuild" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codebuild_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codebuild_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codebuild_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codebuild_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codebuild endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codebuild_sdkv2.Options) { + return func(o *codebuild_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codebuild/service_endpoints_gen_test.go b/internal/service/codebuild/service_endpoints_gen_test.go index 2dbd2ac7a4e..c11ccb1dd83 100644 --- a/internal/service/codebuild/service_endpoints_gen_test.go +++ b/internal/service/codebuild/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codebuild_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codebuild_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codebuild_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codebuild_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/codebuild/service_package_gen.go b/internal/service/codebuild/service_package_gen.go index 3336ede1efe..dfce895ba83 100644 --- a/internal/service/codebuild/service_package_gen.go +++ b/internal/service/codebuild/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codebuild @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codebuild_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codebuild" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -67,19 +66,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codebuild_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codebuild_sdkv2.NewFromConfig(cfg, func(o *codebuild_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codebuild_sdkv2.NewFromConfig(cfg, + codebuild_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/codebuild/webhook.go b/internal/service/codebuild/webhook.go index bad6970d761..24ba72cd34f 100644 --- a/internal/service/codebuild/webhook.go +++ b/internal/service/codebuild/webhook.go @@ -83,6 +83,28 @@ func resourceWebhook() *schema.Resource { Required: true, ForceNew: true, }, + "scope_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Required: true, + }, + names.AttrDomain: { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.WebhookScopeType](), + }, + names.AttrScope: { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, "secret": { Type: schema.TypeString, Computed: true, @@ -117,6 +139,10 @@ func resourceWebhookCreate(ctx context.Context, d *schema.ResourceData, meta int input.FilterGroups = expandWebhookFilterGroups(v.(*schema.Set).List()) } + if v, ok := d.GetOk("scope_configuration"); ok && len(v.([]interface{})) > 0 { + input.ScopeConfiguration = expandScopeConfiguration(v.([]interface{})) + } + output, err := conn.CreateWebhook(ctx, input) if err != nil { @@ -148,9 +174,14 @@ func resourceWebhookRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("build_type", webhook.BuildType) d.Set("branch_filter", webhook.BranchFilter) - d.Set("filter_group", flattenWebhookFilterGroups(webhook.FilterGroups)) + if err := d.Set("filter_group", flattenWebhookFilterGroups(webhook.FilterGroups)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting filter_group: %s", err) + } d.Set("payload_url", webhook.PayloadUrl) d.Set("project_name", d.Id()) + if err := d.Set("scope_configuration", flattenScopeConfiguration(webhook.ScopeConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting scope_configuration: %s", err) + } d.Set("secret", d.Get("secret").(string)) d.Set(names.AttrURL, webhook.Url) @@ -290,6 +321,25 @@ func expandWebhookFilter(tfMap map[string]interface{}) *types.WebhookFilter { return apiObject } +func expandScopeConfiguration(tfList []interface{}) *types.ScopeConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + apiObject := &types.ScopeConfiguration{ + Name: aws.String(tfMap[names.AttrName].(string)), + Scope: types.WebhookScopeType(tfMap[names.AttrScope].(string)), + } + + if v, ok := tfMap[names.AttrDomain].(string); ok && v != "" { + apiObject.Domain = aws.String(v) + } + + return apiObject +} + func flattenWebhookFilterGroups(apiObjects [][]types.WebhookFilter) []interface{} { if len(apiObjects) == 0 { return nil @@ -336,3 +386,20 @@ func flattenWebhookFilter(apiObject types.WebhookFilter) map[string]interface{} return tfMap } + +func flattenScopeConfiguration(apiObject *types.ScopeConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + names.AttrName: apiObject.Name, + names.AttrScope: apiObject.Scope, + } + + if apiObject.Domain != nil { + tfMap[names.AttrDomain] = apiObject.Domain + } + + return []interface{}{tfMap} +} diff --git a/internal/service/codebuild/webhook_test.go b/internal/service/codebuild/webhook_test.go index 65014e6e24e..77daa99c294 100644 --- a/internal/service/codebuild/webhook_test.go +++ b/internal/service/codebuild/webhook_test.go @@ -43,11 +43,12 @@ func TestAccCodeBuildWebhook_bitbucket(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWebhookConfig_bitbucket(rName, sourceLocation), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckWebhookExists(ctx, resourceName, &webhook), resource.TestCheckResourceAttr(resourceName, "branch_filter", ""), resource.TestCheckResourceAttr(resourceName, "project_name", rName), resource.TestMatchResourceAttr(resourceName, "payload_url", regexache.MustCompile(`^https://`)), + resource.TestCheckResourceAttr(resourceName, "scope_configuration.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "secret", ""), resource.TestMatchResourceAttr(resourceName, names.AttrURL, regexache.MustCompile(`^https://`)), ), @@ -80,11 +81,12 @@ func TestAccCodeBuildWebhook_gitHub(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWebhookConfig_gitHub(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckWebhookExists(ctx, resourceName, &webhook), resource.TestCheckResourceAttr(resourceName, "branch_filter", ""), resource.TestCheckResourceAttr(resourceName, "project_name", rName), resource.TestMatchResourceAttr(resourceName, "payload_url", regexache.MustCompile(`^https://`)), + resource.TestCheckResourceAttr(resourceName, "scope_configuration.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "secret", ""), resource.TestMatchResourceAttr(resourceName, names.AttrURL, regexache.MustCompile(`^https://`)), ), @@ -117,11 +119,12 @@ func TestAccCodeBuildWebhook_gitHubEnterprise(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWebhookConfig_gitHubEnterprise(rName, "dev"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckWebhookExists(ctx, resourceName, &webhook), resource.TestCheckResourceAttr(resourceName, "branch_filter", "dev"), resource.TestCheckResourceAttr(resourceName, "project_name", rName), resource.TestMatchResourceAttr(resourceName, "payload_url", regexache.MustCompile(`^https://`)), + resource.TestCheckResourceAttr(resourceName, "scope_configuration.#", acctest.Ct0), resource.TestMatchResourceAttr(resourceName, "secret", regexache.MustCompile(`.+`)), resource.TestCheckResourceAttr(resourceName, names.AttrURL, ""), ), @@ -134,11 +137,12 @@ func TestAccCodeBuildWebhook_gitHubEnterprise(t *testing.T) { }, { Config: testAccWebhookConfig_gitHubEnterprise(rName, "master"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckWebhookExists(ctx, resourceName, &webhook), resource.TestCheckResourceAttr(resourceName, "branch_filter", "master"), resource.TestCheckResourceAttr(resourceName, "project_name", rName), resource.TestMatchResourceAttr(resourceName, "payload_url", regexache.MustCompile(`^https://`)), + resource.TestCheckResourceAttr(resourceName, "scope_configuration.#", acctest.Ct0), resource.TestMatchResourceAttr(resourceName, "secret", regexache.MustCompile(`.+`)), resource.TestCheckResourceAttr(resourceName, names.AttrURL, ""), ), @@ -200,6 +204,41 @@ func TestAccCodeBuildWebhook_buildType(t *testing.T) { }) } +func TestAccCodeBuildWebhook_scopeConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var webhook types.Webhook + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_codebuild_webhook.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheck(ctx, t) + testAccPreCheckSourceCredentialsForServerType(ctx, t, types.ServerTypeGithub) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CodeBuildServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWebhookDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWebhookConfig_scopeConfiguration(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWebhookExists(ctx, resourceName, &webhook), + resource.TestCheckResourceAttr(resourceName, "scope_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "scope_configuration.0.name", rName), + resource.TestCheckResourceAttr(resourceName, "scope_configuration.0.scope", "GITHUB_GLOBAL"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"secret"}, + }, + }, + }) +} + func TestAccCodeBuildWebhook_branchFilter(t *testing.T) { ctx := acctest.Context(t) var webhook types.Webhook @@ -409,9 +448,7 @@ func testAccCheckWebhookExists(ctx context.Context, n string, v *types.Webhook) } func testAccWebhookConfig_bitbucket(rName, sourceLocation string) string { - return acctest.ConfigCompose( - testAccProjectConfig_sourceTypeBitbucket(rName, sourceLocation), - ` + return acctest.ConfigCompose(testAccProjectConfig_sourceTypeBitbucket(rName, sourceLocation), ` resource "aws_codebuild_webhook" "test" { project_name = aws_codebuild_project.test.name } @@ -419,9 +456,7 @@ resource "aws_codebuild_webhook" "test" { } func testAccWebhookConfig_gitHub(rName string) string { - return acctest.ConfigCompose( - testAccProjectConfig_basic(rName), - ` + return acctest.ConfigCompose(testAccProjectConfig_basic(rName), ` resource "aws_codebuild_webhook" "test" { project_name = aws_codebuild_project.test.name } @@ -476,9 +511,7 @@ resource "aws_codebuild_webhook" "test" { } func testAccWebhookConfig_filterGroup(rName string) string { - return acctest.ConfigCompose( - testAccProjectConfig_basic(rName), - ` + return acctest.ConfigCompose(testAccProjectConfig_basic(rName), ` resource "aws_codebuild_webhook" "test" { project_name = aws_codebuild_project.test.name @@ -504,3 +537,35 @@ resource "aws_codebuild_webhook" "test" { } `) } + +func testAccWebhookConfig_scopeConfiguration(rName string) string { + return acctest.ConfigCompose(testAccProjectConfig_baseServiceRole(rName), fmt.Sprintf(` +resource "aws_codebuild_project" "test" { + name = %[1]q + service_role = aws_iam_role.test.arn + + artifacts { + type = "NO_ARTIFACTS" + } + + environment { + compute_type = "BUILD_GENERAL1_SMALL" + image = "2" + type = "LINUX_CONTAINER" + } + + source { + location = "CODEBUILD_DEFAULT_WEBHOOK_SOURCE_LOCATION" + type = "GITHUB" + } +} + +resource "aws_codebuild_webhook" "test" { + project_name = aws_codebuild_project.test.name + scope_configuration { + name = %[1]q + scope = "GITHUB_GLOBAL" + } +} +`, rName)) +} diff --git a/internal/service/codecatalyst/service_endpoint_resolver_gen.go b/internal/service/codecatalyst/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..43b6ef1eae4 --- /dev/null +++ b/internal/service/codecatalyst/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codecatalyst + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codecatalyst_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codecatalyst" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codecatalyst_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codecatalyst_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codecatalyst_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codecatalyst_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codecatalyst endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codecatalyst_sdkv2.Options) { + return func(o *codecatalyst_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codecatalyst/service_package_gen.go b/internal/service/codecatalyst/service_package_gen.go index a3aa2ef23e7..255fb7a8bc2 100644 --- a/internal/service/codecatalyst/service_package_gen.go +++ b/internal/service/codecatalyst/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codecatalyst @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codecatalyst_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codecatalyst" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -61,19 +60,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codecatalyst_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codecatalyst_sdkv2.NewFromConfig(cfg, func(o *codecatalyst_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codecatalyst_sdkv2.NewFromConfig(cfg, + codecatalyst_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/codecommit/service_endpoint_resolver_gen.go b/internal/service/codecommit/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..afb73a1e822 --- /dev/null +++ b/internal/service/codecommit/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codecommit + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codecommit_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codecommit" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codecommit_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codecommit_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codecommit_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codecommit_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codecommit endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codecommit_sdkv2.Options) { + return func(o *codecommit_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codecommit/service_endpoints_gen_test.go b/internal/service/codecommit/service_endpoints_gen_test.go index a3eedd261a4..6acd86e6198 100644 --- a/internal/service/codecommit/service_endpoints_gen_test.go +++ b/internal/service/codecommit/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codecommit_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codecommit_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codecommit_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codecommit_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/codecommit/service_package_gen.go b/internal/service/codecommit/service_package_gen.go index ae362c48704..9a6c2d5f8de 100644 --- a/internal/service/codecommit/service_package_gen.go +++ b/internal/service/codecommit/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codecommit @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codecommit_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codecommit" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -74,19 +73,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codecommit_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codecommit_sdkv2.NewFromConfig(cfg, func(o *codecommit_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codecommit_sdkv2.NewFromConfig(cfg, + codecommit_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/codeguruprofiler/service_endpoint_resolver_gen.go b/internal/service/codeguruprofiler/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..5de7eba5e10 --- /dev/null +++ b/internal/service/codeguruprofiler/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codeguruprofiler + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codeguruprofiler_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codeguruprofiler" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codeguruprofiler_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codeguruprofiler_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codeguruprofiler_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codeguruprofiler_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codeguruprofiler endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codeguruprofiler_sdkv2.Options) { + return func(o *codeguruprofiler_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codeguruprofiler/service_endpoints_gen_test.go b/internal/service/codeguruprofiler/service_endpoints_gen_test.go index 724f8442dc5..8426219547f 100644 --- a/internal/service/codeguruprofiler/service_endpoints_gen_test.go +++ b/internal/service/codeguruprofiler/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codeguruprofiler_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codeguruprofiler_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codeguruprofiler_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codeguruprofiler_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/codeguruprofiler/service_package_gen.go b/internal/service/codeguruprofiler/service_package_gen.go index 219c629715e..636daad2c35 100644 --- a/internal/service/codeguruprofiler/service_package_gen.go +++ b/internal/service/codeguruprofiler/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codeguruprofiler @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codeguruprofiler_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codeguruprofiler" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -52,19 +51,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codeguruprofiler_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codeguruprofiler_sdkv2.NewFromConfig(cfg, func(o *codeguruprofiler_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codeguruprofiler_sdkv2.NewFromConfig(cfg, + codeguruprofiler_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/codegurureviewer/service_endpoint_resolver_gen.go b/internal/service/codegurureviewer/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..6ca17a9a960 --- /dev/null +++ b/internal/service/codegurureviewer/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codegurureviewer + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codegurureviewer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codegurureviewer" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codegurureviewer_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codegurureviewer_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codegurureviewer_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codegurureviewer_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codegurureviewer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codegurureviewer_sdkv2.Options) { + return func(o *codegurureviewer_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codegurureviewer/service_endpoints_gen_test.go b/internal/service/codegurureviewer/service_endpoints_gen_test.go index 1c84c0b7a72..1f573bf3d54 100644 --- a/internal/service/codegurureviewer/service_endpoints_gen_test.go +++ b/internal/service/codegurureviewer/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -87,7 +89,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -221,7 +223,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -242,24 +244,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codegurureviewer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codegurureviewer_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codegurureviewer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codegurureviewer_sdkv2.EndpointParameters{ @@ -267,14 +269,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -338,16 +340,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/codegurureviewer/service_package_gen.go b/internal/service/codegurureviewer/service_package_gen.go index b69d2ab50fb..711ad2f4cfc 100644 --- a/internal/service/codegurureviewer/service_package_gen.go +++ b/internal/service/codegurureviewer/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codegurureviewer @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codegurureviewer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codegurureviewer" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codegurureviewer_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codegurureviewer_sdkv2.NewFromConfig(cfg, func(o *codegurureviewer_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codegurureviewer_sdkv2.NewFromConfig(cfg, + codegurureviewer_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/codepipeline/service_endpoint_resolver_gen.go b/internal/service/codepipeline/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..b1ee40353db --- /dev/null +++ b/internal/service/codepipeline/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codepipeline + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codepipeline_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codepipeline" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codepipeline_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codepipeline_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codepipeline_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codepipeline_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codepipeline endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codepipeline_sdkv2.Options) { + return func(o *codepipeline_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codepipeline/service_endpoints_gen_test.go b/internal/service/codepipeline/service_endpoints_gen_test.go index bd392b47c37..ac6e0bfbbff 100644 --- a/internal/service/codepipeline/service_endpoints_gen_test.go +++ b/internal/service/codepipeline/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codepipeline_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codepipeline_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codepipeline_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codepipeline_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/codepipeline/service_package_gen.go b/internal/service/codepipeline/service_package_gen.go index b0cd23a26df..6689d01acf1 100644 --- a/internal/service/codepipeline/service_package_gen.go +++ b/internal/service/codepipeline/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codepipeline @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codepipeline_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codepipeline" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -64,19 +63,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codepipeline_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codepipeline_sdkv2.NewFromConfig(cfg, func(o *codepipeline_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codepipeline_sdkv2.NewFromConfig(cfg, + codepipeline_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/codestarconnections/service_endpoint_resolver_gen.go b/internal/service/codestarconnections/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ca67d0e38c4 --- /dev/null +++ b/internal/service/codestarconnections/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codestarconnections + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codestarconnections_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codestarconnections" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codestarconnections_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codestarconnections_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codestarconnections_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codestarconnections_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codestarconnections endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codestarconnections_sdkv2.Options) { + return func(o *codestarconnections_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codestarconnections/service_endpoints_gen_test.go b/internal/service/codestarconnections/service_endpoints_gen_test.go index fc2109ec1ab..7e984327f26 100644 --- a/internal/service/codestarconnections/service_endpoints_gen_test.go +++ b/internal/service/codestarconnections/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codestarconnections_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codestarconnections_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codestarconnections_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codestarconnections_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/codestarconnections/service_package_gen.go b/internal/service/codestarconnections/service_package_gen.go index 5ccf55d1d4c..d61c0f7a43a 100644 --- a/internal/service/codestarconnections/service_package_gen.go +++ b/internal/service/codestarconnections/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codestarconnections @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codestarconnections_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codestarconnections" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -58,19 +57,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codestarconnections_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codestarconnections_sdkv2.NewFromConfig(cfg, func(o *codestarconnections_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codestarconnections_sdkv2.NewFromConfig(cfg, + codestarconnections_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/codestarnotifications/service_endpoint_resolver_gen.go b/internal/service/codestarnotifications/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..c9c8379b45f --- /dev/null +++ b/internal/service/codestarnotifications/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package codestarnotifications + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codestarnotifications_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codestarnotifications" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codestarnotifications_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codestarnotifications_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codestarnotifications_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codestarnotifications_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codestarnotifications endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codestarnotifications_sdkv2.Options) { + return func(o *codestarnotifications_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/codestarnotifications/service_endpoints_gen_test.go b/internal/service/codestarnotifications/service_endpoints_gen_test.go index 47c99b6146f..4116ac5670b 100644 --- a/internal/service/codestarnotifications/service_endpoints_gen_test.go +++ b/internal/service/codestarnotifications/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codestarnotifications_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codestarnotifications_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codestarnotifications_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codestarnotifications_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/codestarnotifications/service_package_gen.go b/internal/service/codestarnotifications/service_package_gen.go index 9183a2840a2..a63e77cce9a 100644 --- a/internal/service/codestarnotifications/service_package_gen.go +++ b/internal/service/codestarnotifications/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package codestarnotifications @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codestarnotifications_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codestarnotifications" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codestarnotifications_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codestarnotifications_sdkv2.NewFromConfig(cfg, func(o *codestarnotifications_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codestarnotifications_sdkv2.NewFromConfig(cfg, + codestarnotifications_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cognitoidentity/pool.go b/internal/service/cognitoidentity/pool.go index 18ae5b5cb40..62b040b9fb8 100644 --- a/internal/service/cognitoidentity/pool.go +++ b/internal/service/cognitoidentity/pool.go @@ -248,19 +248,19 @@ func resourcePoolUpdate(ctx context.Context, d *schema.ResourceData, meta interf func resourcePoolDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).CognitoIdentityClient(ctx) - log.Printf("[DEBUG] Deleting Cognito Identity Pool: %s", d.Id()) + log.Printf("[DEBUG] Deleting Cognito Identity Pool: %s", d.Id()) _, err := conn.DeleteIdentityPool(ctx, &cognitoidentity.DeleteIdentityPoolInput{ IdentityPoolId: aws.String(d.Id()), }) if errs.IsA[*awstypes.ResourceNotFoundException](err) { - log.Printf("[DEBUG] Resource Pool already deleted: %s", d.Id()) return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Cognito identity pool (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting Cognito Identity Pool (%s): %s", d.Id(), err) } + return diags } diff --git a/internal/service/cognitoidentity/service_endpoint_resolver_gen.go b/internal/service/cognitoidentity/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..6b24b637ac8 --- /dev/null +++ b/internal/service/cognitoidentity/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cognitoidentity + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cognitoidentity_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cognitoidentity" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cognitoidentity_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cognitoidentity_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cognitoidentity_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cognitoidentity_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cognitoidentity endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cognitoidentity_sdkv2.Options) { + return func(o *cognitoidentity_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cognitoidentity/service_endpoints_gen_test.go b/internal/service/cognitoidentity/service_endpoints_gen_test.go index 516d1a8316b..3134656aec0 100644 --- a/internal/service/cognitoidentity/service_endpoints_gen_test.go +++ b/internal/service/cognitoidentity/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := cognitoidentity_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cognitoidentity_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := cognitoidentity_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), cognitoidentity_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cognitoidentity/service_package_gen.go b/internal/service/cognitoidentity/service_package_gen.go index 67aa7c6b8bf..75fe8908a20 100644 --- a/internal/service/cognitoidentity/service_package_gen.go +++ b/internal/service/cognitoidentity/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cognitoidentity @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" cognitoidentity_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cognitoidentity" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -67,19 +66,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cognitoidentity_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cognitoidentity_sdkv2.NewFromConfig(cfg, func(o *cognitoidentity_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return cognitoidentity_sdkv2.NewFromConfig(cfg, + cognitoidentity_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cognitoidentity/sweep.go b/internal/service/cognitoidentity/sweep.go new file mode 100644 index 00000000000..80758d95987 --- /dev/null +++ b/internal/service/cognitoidentity/sweep.go @@ -0,0 +1,65 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cognitoidentity + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentity" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/sweep" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" +) + +func RegisterSweepers() { + resource.AddTestSweepers("aws_cognito_identity_pool", &resource.Sweeper{ + Name: "aws_cognito_identity_pool", + F: sweepIdentityPools, + }) +} + +func sweepIdentityPools(region string) error { + ctx := sweep.Context(region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) + if err != nil { + return fmt.Errorf("Error getting client: %s", err) + } + input := &cognitoidentity.ListIdentityPoolsInput{ + MaxResults: aws.Int32(50), + } + conn := client.CognitoIdentityClient(ctx) + sweepResources := make([]sweep.Sweepable, 0) + + pages := cognitoidentity.NewListIdentityPoolsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Cognito Identity Pool sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Cognito Identity Pools (%s): %w", region, err) + } + + for _, v := range page.IdentityPools { + r := resourcePool() + d := r.Data(nil) + d.SetId(aws.ToString(v.IdentityPoolId)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping Cognito Identity Pools (%s): %w", region, err) + } + + return nil +} diff --git a/internal/service/cognitoidp/consts.go b/internal/service/cognitoidp/consts.go index ea839accef2..43ec8d35123 100644 --- a/internal/service/cognitoidp/consts.go +++ b/internal/service/cognitoidp/consts.go @@ -7,14 +7,6 @@ import ( "time" ) -const ( - ResNameResourceServer = "Resource Server" - ResNameRiskConfiguration = "Risk Configuration" - ResNameUserPoolClient = "User Pool Client" - ResNameUserPoolDomain = "User Pool Domain" - ResNameUser = "User" -) - const ( propagationTimeout = 2 * time.Minute ) diff --git a/internal/service/cognitoidp/exports_test.go b/internal/service/cognitoidp/exports_test.go index ed8c6953b58..1f4448aec2a 100644 --- a/internal/service/cognitoidp/exports_test.go +++ b/internal/service/cognitoidp/exports_test.go @@ -18,10 +18,14 @@ var ( ResourceUserPoolUICustomization = resourceUserPoolUICustomization FindGroupByTwoPartKey = findGroupByTwoPartKey + FindGroupUserByThreePartKey = findGroupUserByThreePartKey FindIdentityProviderByTwoPartKey = findIdentityProviderByTwoPartKey + FindResourceServerByTwoPartKey = findResourceServerByTwoPartKey + FindRiskConfigurationByTwoPartKey = findRiskConfigurationByTwoPartKey FindUserByTwoPartKey = findUserByTwoPartKey FindUserPoolByID = findUserPoolByID + FindUserPoolClientByName = findUserPoolClientByName + FindUserPoolClientByTwoPartKey = findUserPoolClientByTwoPartKey + FindUserPoolDomain = findUserPoolDomain FindUserPoolUICustomizationByTwoPartKey = findUserPoolUICustomizationByTwoPartKey - - SkipFlatteningStringAttributeContraints = skipFlatteningStringAttributeContraints ) diff --git a/internal/service/cognitoidp/find.go b/internal/service/cognitoidp/find.go deleted file mode 100644 index edcda34b1de..00000000000 --- a/internal/service/cognitoidp/find.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cognitoidp - -import ( - "context" - "errors" - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -// FindCognitoUserInGroup checks whether the specified user is present in the specified group. Returns boolean value accordingly. -func FindCognitoUserInGroup(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, groupName, userPoolId, username string) (bool, error) { - input := &cognitoidentityprovider.AdminListGroupsForUserInput{ - UserPoolId: aws.String(userPoolId), - Username: aws.String(username), - } - - found := false - - err := conn.AdminListGroupsForUserPagesWithContext(ctx, input, func(page *cognitoidentityprovider.AdminListGroupsForUserOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, group := range page.Groups { - if group == nil { - continue - } - - if aws.StringValue(group.GroupName) == groupName { - found = true - break - } - } - - if found { - return false - } - - return !lastPage - }) - - if err != nil { - return false, fmt.Errorf("reading groups for user: %w", err) - } - - return found, nil -} - -// FindCognitoUserPoolClientByID returns a Cognito User Pool Client using the ClientId -func FindCognitoUserPoolClientByID(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, userPoolId, clientId string) (*cognitoidentityprovider.UserPoolClientType, error) { - input := &cognitoidentityprovider.DescribeUserPoolClientInput{ - ClientId: aws.String(clientId), - UserPoolId: aws.String(userPoolId), - } - - output, err := conn.DescribeUserPoolClientWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.UserPoolClient == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.UserPoolClient, nil -} - -func FindCognitoUserPoolClientByName(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, userPoolId string, nameFilter cognitoUserPoolClientDescriptionNameFilter) (*cognitoidentityprovider.UserPoolClientType, error) { - clientDescs, err := listCognitoUserPoolClientDescriptions(ctx, conn, userPoolId, nameFilter) - if err != nil { - return nil, err - } - - client, err := tfresource.AssertSinglePtrResult(clientDescs) - if err != nil { - return nil, err - } - - return FindCognitoUserPoolClientByID(ctx, conn, userPoolId, aws.StringValue(client.ClientId)) -} - -type cognitoUserPoolClientDescriptionNameFilter func(string) (bool, error) - -func listCognitoUserPoolClientDescriptions(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, userPoolId string, nameFilter cognitoUserPoolClientDescriptionNameFilter) ([]*cognitoidentityprovider.UserPoolClientDescription, error) { - var errs []error - var descs []*cognitoidentityprovider.UserPoolClientDescription - - input := &cognitoidentityprovider.ListUserPoolClientsInput{ - UserPoolId: aws.String(userPoolId), - } - - err := conn.ListUserPoolClientsPagesWithContext(ctx, input, func(page *cognitoidentityprovider.ListUserPoolClientsOutput, lastPage bool) bool { - for _, client := range page.UserPoolClients { - if ok, err := nameFilter(aws.StringValue(client.ClientName)); err != nil { - errs = append(errs, err) - } else if ok { - descs = append(descs, client) - } - } - return !lastPage - }) - - if err != nil { - errs = append(errs, err) - return descs, errors.Join(errs...) - } - - return descs, nil -} - -func FindRiskConfigurationById(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, id string) (*cognitoidentityprovider.RiskConfigurationType, error) { - userPoolId, clientId, err := RiskConfigurationParseID(id) - if err != nil { - return nil, err - } - - input := &cognitoidentityprovider.DescribeRiskConfigurationInput{ - UserPoolId: aws.String(userPoolId), - } - - if clientId != "" { - input.ClientId = aws.String(clientId) - } - - output, err := conn.DescribeRiskConfigurationWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.RiskConfiguration == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.RiskConfiguration, nil -} diff --git a/internal/service/cognitoidp/flex.go b/internal/service/cognitoidp/flex.go deleted file mode 100644 index 184027b31cb..00000000000 --- a/internal/service/cognitoidp/flex.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package cognitoidp - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" -) - -func expandServerScope(inputs []interface{}) []*cognitoidentityprovider.ResourceServerScopeType { - configs := make([]*cognitoidentityprovider.ResourceServerScopeType, len(inputs)) - for i, input := range inputs { - param := input.(map[string]interface{}) - config := &cognitoidentityprovider.ResourceServerScopeType{} - - if v, ok := param["scope_description"]; ok { - config.ScopeDescription = aws.String(v.(string)) - } - - if v, ok := param["scope_name"]; ok { - config.ScopeName = aws.String(v.(string)) - } - - configs[i] = config - } - - return configs -} - -func flattenServerScope(inputs []*cognitoidentityprovider.ResourceServerScopeType) []map[string]interface{} { - values := make([]map[string]interface{}, 0) - - for _, input := range inputs { - if input == nil { - continue - } - var value = map[string]interface{}{ - "scope_name": aws.StringValue(input.ScopeName), - "scope_description": aws.StringValue(input.ScopeDescription), - } - values = append(values, value) - } - return values -} diff --git a/internal/service/cognitoidp/flex_test.go b/internal/service/cognitoidp/flex_test.go index ad67259a600..084892e7e92 100644 --- a/internal/service/cognitoidp/flex_test.go +++ b/internal/service/cognitoidp/flex_test.go @@ -1,15 +1,13 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package cognitoidp_test +package cognitoidp import ( "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfcognitoidp "github.com/hashicorp/terraform-provider-aws/internal/service/cognitoidp" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -18,109 +16,109 @@ func TestUserPoolSchemaAttributeMatchesStandardAttribute(t *testing.T) { cases := []struct { Name string - Input *cognitoidentityprovider.SchemaAttributeType + Input *awstypes.SchemaAttributeType Expected bool }{ { Name: "birthday standard", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("birthdate"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ - MaxLength: aws.String(acctest.Ct10), - MinLength: aws.String(acctest.Ct10), + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ + MaxLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant + MinLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant }, }, Expected: true, }, { Name: "birthday non-standard DeveloperOnlyAttribute", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(true), Mutable: aws.Bool(true), Name: aws.String("birthdate"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ - MaxLength: aws.String(acctest.Ct10), - MinLength: aws.String(acctest.Ct10), + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ + MaxLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant + MinLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant }, }, Expected: false, }, { Name: "birthday non-standard Mutable", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(false), Name: aws.String("birthdate"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ - MaxLength: aws.String(acctest.Ct10), - MinLength: aws.String(acctest.Ct10), + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ + MaxLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant + MinLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant }, }, Expected: false, }, { Name: "non-standard Name", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("non-existent"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ - MaxLength: aws.String(acctest.Ct10), - MinLength: aws.String(acctest.Ct10), + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ + MaxLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant + MinLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant }, }, Expected: false, }, { Name: "birthday non-standard Required", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("birthdate"), Required: aws.Bool(true), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ - MaxLength: aws.String(acctest.Ct10), - MinLength: aws.String(acctest.Ct10), + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ + MaxLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant + MinLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant }, }, Expected: false, }, { Name: "birthday non-standard StringAttributeConstraints.Max", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("birthdate"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("999"), - MinLength: aws.String(acctest.Ct10), + MinLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant }, }, Expected: false, }, { Name: "birthday non-standard StringAttributeConstraints.Min", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("birthdate"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ - MaxLength: aws.String(acctest.Ct10), + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ + MaxLength: aws.String("10"), // nosemgrep:ci.literal-10-string-test-constant MinLength: aws.String("999"), }, }, @@ -128,8 +126,8 @@ func TestUserPoolSchemaAttributeMatchesStandardAttribute(t *testing.T) { }, { Name: "email_verified standard", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeBoolean), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeBoolean, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("email_verified"), @@ -139,13 +137,13 @@ func TestUserPoolSchemaAttributeMatchesStandardAttribute(t *testing.T) { }, { Name: "updated_at standard", - Input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeNumber), + Input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeNumber, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("updated_at"), - NumberAttributeConstraints: &cognitoidentityprovider.NumberAttributeConstraintsType{ - MinValue: aws.String(acctest.Ct0), + NumberAttributeConstraints: &awstypes.NumberAttributeConstraintsType{ + MinValue: aws.String("0"), // nosemgrep:ci.literal-0-string-test-constant }, Required: aws.Bool(false), }, @@ -157,7 +155,7 @@ func TestUserPoolSchemaAttributeMatchesStandardAttribute(t *testing.T) { tc := tc t.Run(tc.Name, func(t *testing.T) { t.Parallel() - output := tfcognitoidp.UserPoolSchemaAttributeMatchesStandardAttribute(tc.Input) + output := userPoolSchemaAttributeMatchesStandardAttribute(tc.Input) if output != tc.Expected { t.Fatalf("Expected %t match with standard attribute on input: \n\n%#v\n\n", tc.Expected, tc.Input) } @@ -170,86 +168,86 @@ func TestSkipFlatteningStringAttributeContraints(t *testing.T) { cases := []struct { name string - configured []*cognitoidentityprovider.SchemaAttributeType - input *cognitoidentityprovider.SchemaAttributeType + configured []awstypes.SchemaAttributeType + input *awstypes.SchemaAttributeType want bool }{ { name: "config omitted", - configured: []*cognitoidentityprovider.SchemaAttributeType{ + configured: []awstypes.SchemaAttributeType{ { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(false), Name: aws.String(names.AttrEmail), Required: aws.Bool(true), }, }, - input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(false), Name: aws.String(names.AttrEmail), Required: aws.Bool(true), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), - MinLength: aws.String(acctest.Ct0), + MinLength: aws.String("0"), // nosemgrep:ci.literal-0-string-test-constant }, }, want: true, }, { name: "config set", - configured: []*cognitoidentityprovider.SchemaAttributeType{ + configured: []awstypes.SchemaAttributeType{ { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(false), Name: aws.String(names.AttrEmail), Required: aws.Bool(true), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), - MinLength: aws.String(acctest.Ct0), + MinLength: aws.String("0"), // nosemgrep:ci.literal-0-string-test-constant }, }, }, - input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(false), Name: aws.String(names.AttrEmail), Required: aws.Bool(true), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), - MinLength: aws.String(acctest.Ct0), + MinLength: aws.String("0"), // nosemgrep:ci.literal-0-string-test-constant }, }, want: false, }, { name: "config set with diff", - configured: []*cognitoidentityprovider.SchemaAttributeType{ + configured: []awstypes.SchemaAttributeType{ { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(false), Name: aws.String(names.AttrEmail), Required: aws.Bool(true), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("1024"), MinLength: aws.String("5"), }, }, }, - input: &cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + input: &awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(false), Name: aws.String(names.AttrEmail), Required: aws.Bool(true), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), - MinLength: aws.String(acctest.Ct0), + MinLength: aws.String("0"), // nosemgrep:ci.literal-0-string-test-constant }, }, want: false, @@ -260,7 +258,7 @@ func TestSkipFlatteningStringAttributeContraints(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { t.Parallel() - got := tfcognitoidp.SkipFlatteningStringAttributeContraints(tc.configured, tc.input) + got := skipFlatteningStringAttributeContraints(tc.configured, tc.input) if got != tc.want { t.Fatalf("skipFlatteningStringAttributeContraints() got %t, want %t\n\n%#v\n\n", got, tc.want, tc.input) } diff --git a/internal/service/cognitoidp/generate.go b/internal/service/cognitoidp/generate.go index 11c3168f128..972478c2a0e 100644 --- a/internal/service/cognitoidp/generate.go +++ b/internal/service/cognitoidp/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ServiceTagsMap -UpdateTags -KVTValues -SkipTypesImp //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/cognitoidp/identity_provider.go b/internal/service/cognitoidp/identity_provider.go index 56158218537..46becadd850 100644 --- a/internal/service/cognitoidp/identity_provider.go +++ b/internal/service/cognitoidp/identity_provider.go @@ -10,14 +10,16 @@ import ( "strings" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -72,10 +74,10 @@ func resourceIdentityProvider() *schema.Resource { ), }, "provider_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(cognitoidentityprovider.IdentityProviderTypeType_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.IdentityProviderTypeType](), }, names.AttrUserPoolID: { Type: schema.TypeString, @@ -88,30 +90,30 @@ func resourceIdentityProvider() *schema.Resource { func resourceIdentityProviderCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) providerName := d.Get(names.AttrProviderName).(string) userPoolID := d.Get(names.AttrUserPoolID).(string) id := identityProviderCreateResourceID(userPoolID, providerName) input := &cognitoidentityprovider.CreateIdentityProviderInput{ ProviderName: aws.String(providerName), - ProviderType: aws.String(d.Get("provider_type").(string)), + ProviderType: awstypes.IdentityProviderTypeType(d.Get("provider_type").(string)), UserPoolId: aws.String(userPoolID), } if v, ok := d.GetOk("attribute_mapping"); ok && len(v.(map[string]interface{})) > 0 { - input.AttributeMapping = flex.ExpandStringMap(v.(map[string]interface{})) + input.AttributeMapping = flex.ExpandStringValueMap(v.(map[string]interface{})) } if v, ok := d.GetOk("idp_identifiers"); ok && len(v.([]interface{})) > 0 { - input.IdpIdentifiers = flex.ExpandStringList(v.([]interface{})) + input.IdpIdentifiers = flex.ExpandStringValueList(v.([]interface{})) } if v, ok := d.GetOk("provider_details"); ok && len(v.(map[string]interface{})) > 0 { - input.ProviderDetails = flex.ExpandStringMap(v.(map[string]interface{})) + input.ProviderDetails = flex.ExpandStringValueMap(v.(map[string]interface{})) } - _, err := conn.CreateIdentityProviderWithContext(ctx, input) + _, err := conn.CreateIdentityProvider(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Cognito Identity Provider (%s): %s", id, err) @@ -124,7 +126,7 @@ func resourceIdentityProviderCreate(ctx context.Context, d *schema.ResourceData, func resourceIdentityProviderRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID, providerName, err := identityProviderParseResourceID(d.Id()) if err != nil { @@ -143,9 +145,9 @@ func resourceIdentityProviderRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "reading Cognito Identity Provider (%s): %s", d.Id(), err) } - d.Set("attribute_mapping", aws.StringValueMap(idp.AttributeMapping)) - d.Set("idp_identifiers", aws.StringValueSlice(idp.IdpIdentifiers)) - d.Set("provider_details", aws.StringValueMap(idp.ProviderDetails)) + d.Set("attribute_mapping", idp.AttributeMapping) + d.Set("idp_identifiers", idp.IdpIdentifiers) + d.Set("provider_details", idp.ProviderDetails) d.Set(names.AttrProviderName, idp.ProviderName) d.Set("provider_type", idp.ProviderType) d.Set(names.AttrUserPoolID, idp.UserPoolId) @@ -155,7 +157,7 @@ func resourceIdentityProviderRead(ctx context.Context, d *schema.ResourceData, m func resourceIdentityProviderUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID, providerName, err := identityProviderParseResourceID(d.Id()) if err != nil { @@ -168,20 +170,20 @@ func resourceIdentityProviderUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("attribute_mapping") { - input.AttributeMapping = flex.ExpandStringMap(d.Get("attribute_mapping").(map[string]interface{})) + input.AttributeMapping = flex.ExpandStringValueMap(d.Get("attribute_mapping").(map[string]interface{})) } if d.HasChange("idp_identifiers") { - input.IdpIdentifiers = flex.ExpandStringList(d.Get("idp_identifiers").([]interface{})) + input.IdpIdentifiers = flex.ExpandStringValueList(d.Get("idp_identifiers").([]interface{})) } if d.HasChange("provider_details") { - v := flex.ExpandStringMap(d.Get("provider_details").(map[string]interface{})) + v := flex.ExpandStringValueMap(d.Get("provider_details").(map[string]interface{})) delete(v, "ActiveEncryptionCertificate") input.ProviderDetails = v } - _, err = conn.UpdateIdentityProviderWithContext(ctx, input) + _, err = conn.UpdateIdentityProvider(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Cognito Identity Provider (%s): %s", d.Id(), err) @@ -192,7 +194,7 @@ func resourceIdentityProviderUpdate(ctx context.Context, d *schema.ResourceData, func resourceIdentityProviderDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID, providerName, err := identityProviderParseResourceID(d.Id()) if err != nil { @@ -200,12 +202,12 @@ func resourceIdentityProviderDelete(ctx context.Context, d *schema.ResourceData, } log.Printf("[DEBUG] Deleting Cognito Identity Provider: %s", d.Id()) - _, err = conn.DeleteIdentityProviderWithContext(ctx, &cognitoidentityprovider.DeleteIdentityProviderInput{ + _, err = conn.DeleteIdentityProvider(ctx, &cognitoidentityprovider.DeleteIdentityProviderInput{ ProviderName: aws.String(providerName), UserPoolId: aws.String(userPoolID), }) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -235,15 +237,15 @@ func identityProviderParseResourceID(id string) (string, string, error) { return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected UserPoolID%[2]sProviderName", id, identityProviderResourceIDSeparator) } -func findIdentityProviderByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, userPoolID, providerName string) (*cognitoidentityprovider.IdentityProviderType, error) { +func findIdentityProviderByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID, providerName string) (*awstypes.IdentityProviderType, error) { input := &cognitoidentityprovider.DescribeIdentityProviderInput{ ProviderName: aws.String(providerName), UserPoolId: aws.String(userPoolID), } - output, err := conn.DescribeIdentityProviderWithContext(ctx, input) + output, err := conn.DescribeIdentityProvider(ctx, input) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/cognitoidp/identity_provider_test.go b/internal/service/cognitoidp/identity_provider_test.go index e6fc9169abf..20bea18f70e 100644 --- a/internal/service/cognitoidp/identity_provider_test.go +++ b/internal/service/cognitoidp/identity_provider_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccCognitoIDPIdentityProvider_basic(t *testing.T) { ctx := acctest.Context(t) - var identityProvider cognitoidentityprovider.IdentityProviderType + var identityProvider awstypes.IdentityProviderType resourceName := "aws_cognito_identity_provider.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -83,7 +83,7 @@ func TestAccCognitoIDPIdentityProvider_basic(t *testing.T) { func TestAccCognitoIDPIdentityProvider_idpIdentifiers(t *testing.T) { ctx := acctest.Context(t) - var identityProvider cognitoidentityprovider.IdentityProviderType + var identityProvider awstypes.IdentityProviderType resourceName := "aws_cognito_identity_provider.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -120,7 +120,7 @@ func TestAccCognitoIDPIdentityProvider_idpIdentifiers(t *testing.T) { func TestAccCognitoIDPIdentityProvider_saml(t *testing.T) { ctx := acctest.Context(t) - var identityProvider cognitoidentityprovider.IdentityProviderType + var identityProvider awstypes.IdentityProviderType resourceName := "aws_cognito_identity_provider.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -173,7 +173,7 @@ func TestAccCognitoIDPIdentityProvider_saml(t *testing.T) { func TestAccCognitoIDPIdentityProvider_disappears(t *testing.T) { ctx := acctest.Context(t) - var identityProvider cognitoidentityprovider.IdentityProviderType + var identityProvider awstypes.IdentityProviderType resourceName := "aws_cognito_identity_provider.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -197,7 +197,7 @@ func TestAccCognitoIDPIdentityProvider_disappears(t *testing.T) { func TestAccCognitoIDPIdentityProvider_Disappears_userPool(t *testing.T) { ctx := acctest.Context(t) - var identityProvider cognitoidentityprovider.IdentityProviderType + var identityProvider awstypes.IdentityProviderType resourceName := "aws_cognito_identity_provider.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -221,7 +221,7 @@ func TestAccCognitoIDPIdentityProvider_Disappears_userPool(t *testing.T) { func testAccCheckIdentityProviderDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_identity_provider" { @@ -245,14 +245,14 @@ func testAccCheckIdentityProviderDestroy(ctx context.Context) resource.TestCheck } } -func testAccCheckIdentityProviderExists(ctx context.Context, n string, v *cognitoidentityprovider.IdentityProviderType) resource.TestCheckFunc { +func testAccCheckIdentityProviderExists(ctx context.Context, n string, v *awstypes.IdentityProviderType) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) output, err := tfcognitoidp.FindIdentityProviderByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes[names.AttrProviderName]) diff --git a/internal/service/cognitoidp/managed_user_pool_client.go b/internal/service/cognitoidp/managed_user_pool_client.go index e6eebf7eb75..a29a3a78e76 100644 --- a/internal/service/cognitoidp/managed_user_pool_client.go +++ b/internal/service/cognitoidp/managed_user_pool_client.go @@ -9,8 +9,9 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" @@ -24,14 +25,17 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,6 +47,7 @@ func newManagedUserPoolClientResource(context.Context) (resource.ResourceWithCon type managedUserPoolClientResource struct { framework.ResourceWithConfigure + framework.WithNoOpDelete } func (r *managedUserPoolClientResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { @@ -67,7 +72,7 @@ func (r *managedUserPoolClientResource) Schema(ctx context.Context, request reso Validators: []validator.Set{ setvalidator.SizeAtMost(3), setvalidator.ValueStringsAre( - stringvalidator.OneOf(cognitoidentityprovider.OAuthFlowType_Values()...), + enum.FrameworkValidate[awstypes.OAuthFlowType](), ), }, PlanModifiers: []planmodifier.Set{ @@ -151,7 +156,7 @@ func (r *managedUserPoolClientResource) Schema(ctx context.Context, request reso Computed: true, Validators: []validator.Set{ setvalidator.ValueStringsAre( - stringvalidator.OneOf(cognitoidentityprovider.ExplicitAuthFlowsType_Values()...), + enum.FrameworkValidate[awstypes.ExplicitAuthFlowsType](), ), }, PlanModifiers: []planmodifier.Set{ @@ -202,11 +207,9 @@ func (r *managedUserPoolClientResource) Schema(ctx context.Context, request reso Validators: userPoolClientNameValidator, }, "prevent_user_existence_errors": schema.StringAttribute{ - Optional: true, - Computed: true, - Validators: []validator.String{ - stringvalidator.OneOf(cognitoidentityprovider.PreventUserExistenceErrorTypes_Values()...), - }, + CustomType: fwtypes.StringEnumType[awstypes.PreventUserExistenceErrorTypes](), + Optional: true, + Computed: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.UseStateForUnknown(), }, @@ -306,28 +309,22 @@ func (r *managedUserPoolClientResource) Schema(ctx context.Context, request reso NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "access_token": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(cognitoidentityprovider.TimeUnitsTypeHours), - Validators: []validator.String{ - stringvalidator.OneOf(cognitoidentityprovider.TimeUnitsType_Values()...), - }, + CustomType: timeUnitsType, + Optional: true, + Computed: true, + Default: timeUnitsType.AttributeDefault(awstypes.TimeUnitsTypeHours), }, "id_token": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(cognitoidentityprovider.TimeUnitsTypeHours), - Validators: []validator.String{ - stringvalidator.OneOf(cognitoidentityprovider.TimeUnitsType_Values()...), - }, + CustomType: timeUnitsType, + Optional: true, + Computed: true, + Default: timeUnitsType.AttributeDefault(awstypes.TimeUnitsTypeHours), }, "refresh_token": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(cognitoidentityprovider.TimeUnitsTypeDays), - Validators: []validator.String{ - stringvalidator.OneOf(cognitoidentityprovider.TimeUnitsType_Values()...), - }, + CustomType: timeUnitsType, + Optional: true, + Computed: true, + Default: timeUnitsType.AttributeDefault(awstypes.TimeUnitsTypeDays), }, }, }, @@ -339,7 +336,7 @@ func (r *managedUserPoolClientResource) Schema(ctx context.Context, request reso } func (r *managedUserPoolClientResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { - conn := r.Meta().CognitoIDPConn(ctx) + conn := r.Meta().CognitoIDPClient(ctx) var config resourceManagedUserPoolClientData response.Diagnostics.Append(request.Config.Get(ctx, &config)...) @@ -353,52 +350,50 @@ func (r *managedUserPoolClientResource) Create(ctx context.Context, request reso return } - userPoolId := plan.UserPoolID.ValueString() - - var nameMatcher cognitoUserPoolClientDescriptionNameFilter + filter := tfslices.PredicateTrue[*awstypes.UserPoolClientDescription]() if namePattern := plan.NamePattern; !namePattern.IsUnknown() && !namePattern.IsNull() { - nameMatcher = func(name string) (bool, error) { - return namePattern.ValueRegexp().MatchString(name), nil + filter = func(v *awstypes.UserPoolClientDescription) bool { + return namePattern.ValueRegexp().MatchString(aws.ToString(v.ClientName)) } } if namePrefix := plan.NamePrefix; !namePrefix.IsUnknown() && !namePrefix.IsNull() { - nameMatcher = func(name string) (bool, error) { - return strings.HasPrefix(name, namePrefix.ValueString()), nil + filter = func(v *awstypes.UserPoolClientDescription) bool { + return strings.HasPrefix(aws.ToString(v.ClientName), namePrefix.ValueString()) } } + userPoolID := plan.UserPoolID.ValueString() + + poolClient, err := findUserPoolClientByName(ctx, conn, userPoolID, filter) - poolClient, err := FindCognitoUserPoolClientByName(ctx, conn, userPoolId, nameMatcher) if err != nil { - response.Diagnostics.AddError( - "acquiring Cognito User Pool Client", - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito Managed User Pool Client (%s)", userPoolID), err.Error()) + return } - config.AccessTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) - config.AllowedOauthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthFlows) - config.AllowedOauthFlowsUserPoolClient = flex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) - config.AllowedOauthScopes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthScopes) + config.AccessTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) + config.AllowedOauthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthFlows) + config.AllowedOauthFlowsUserPoolClient = fwflex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) + config.AllowedOauthScopes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthScopes) config.AnalyticsConfiguration = flattenAnaylticsConfiguration(ctx, poolClient.AnalyticsConfiguration) - config.AuthSessionValidity = flex.Int64ToFramework(ctx, poolClient.AuthSessionValidity) - config.CallbackUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.CallbackURLs) - config.ClientSecret = flex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) - config.DefaultRedirectUri = flex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) - config.EnablePropagateAdditionalUserContextData = flex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) - config.EnableTokenRevocation = flex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) - config.ExplicitAuthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ExplicitAuthFlows) - config.ID = flex.StringToFramework(ctx, poolClient.ClientId) - config.IdTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) - config.LogoutUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.LogoutURLs) - config.Name = flex.StringToFramework(ctx, poolClient.ClientName) - config.PreventUserExistenceErrors = flex.StringToFrameworkLegacy(ctx, poolClient.PreventUserExistenceErrors) - config.ReadAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ReadAttributes) - config.RefreshTokenValidity = flex.Int64ToFramework(ctx, poolClient.RefreshTokenValidity) - config.SupportedIdentityProviders = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.SupportedIdentityProviders) + config.AuthSessionValidity = fwflex.Int32ToFramework(ctx, poolClient.AuthSessionValidity) + config.CallbackUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.CallbackURLs) + config.ClientSecret = fwflex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) + config.DefaultRedirectUri = fwflex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) + config.EnablePropagateAdditionalUserContextData = fwflex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) + config.EnableTokenRevocation = fwflex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) + config.ExplicitAuthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ExplicitAuthFlows) + config.ID = fwflex.StringToFramework(ctx, poolClient.ClientId) + config.IdTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) + config.LogoutUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.LogoutURLs) + config.Name = fwflex.StringToFramework(ctx, poolClient.ClientName) + config.PreventUserExistenceErrors = fwtypes.StringEnumValue(poolClient.PreventUserExistenceErrors) + config.ReadAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ReadAttributes) + config.RefreshTokenValidity = fwflex.Int32ValueToFramework(ctx, poolClient.RefreshTokenValidity) + config.SupportedIdentityProviders = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.SupportedIdentityProviders) config.TokenValidityUnits = flattenTokenValidityUnits(ctx, poolClient.TokenValidityUnits) - config.UserPoolID = flex.StringToFramework(ctx, poolClient.UserPoolId) - config.WriteAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.WriteAttributes) + config.UserPoolID = fwflex.StringToFramework(ctx, poolClient.UserPoolId) + config.WriteAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.WriteAttributes) if response.Diagnostics.HasError() { return @@ -489,12 +484,15 @@ func (r *managedUserPoolClientResource) Create(ctx context.Context, request reso return } - output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.UpdateUserPoolClientWithContext(ctx, params) - }, cognitoidentityprovider.ErrCodeConcurrentModificationException) + const ( + timeout = 2 * time.Minute + ) + output, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, timeout, func() (interface{}, error) { + return conn.UpdateUserPoolClient(ctx, params) + }) if err != nil { response.Diagnostics.AddError( - fmt.Sprintf("updating Cognito User Pool Client (%s)", plan.ID.ValueString()), + fmt.Sprintf("updating Cognito Managed User Pool Client (%s)", plan.ID.ValueString()), err.Error(), ) return @@ -502,29 +500,29 @@ func (r *managedUserPoolClientResource) Create(ctx context.Context, request reso poolClient := output.(*cognitoidentityprovider.UpdateUserPoolClientOutput).UserPoolClient - config.AccessTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) - config.AllowedOauthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthFlows) - config.AllowedOauthFlowsUserPoolClient = flex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) - config.AllowedOauthScopes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthScopes) + config.AccessTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) + config.AllowedOauthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthFlows) + config.AllowedOauthFlowsUserPoolClient = fwflex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) + config.AllowedOauthScopes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthScopes) config.AnalyticsConfiguration = flattenAnaylticsConfiguration(ctx, poolClient.AnalyticsConfiguration) - config.AuthSessionValidity = flex.Int64ToFramework(ctx, poolClient.AuthSessionValidity) - config.CallbackUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.CallbackURLs) - config.ClientSecret = flex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) - config.DefaultRedirectUri = flex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) - config.EnablePropagateAdditionalUserContextData = flex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) - config.EnableTokenRevocation = flex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) - config.ExplicitAuthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ExplicitAuthFlows) - config.ID = flex.StringToFramework(ctx, poolClient.ClientId) - config.IdTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) - config.LogoutUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.LogoutURLs) - config.Name = flex.StringToFramework(ctx, poolClient.ClientName) - config.PreventUserExistenceErrors = flex.StringToFrameworkLegacy(ctx, poolClient.PreventUserExistenceErrors) - config.ReadAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ReadAttributes) - config.RefreshTokenValidity = flex.Int64ToFramework(ctx, poolClient.RefreshTokenValidity) - config.SupportedIdentityProviders = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.SupportedIdentityProviders) + config.AuthSessionValidity = fwflex.Int32ToFramework(ctx, poolClient.AuthSessionValidity) + config.CallbackUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.CallbackURLs) + config.ClientSecret = fwflex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) + config.DefaultRedirectUri = fwflex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) + config.EnablePropagateAdditionalUserContextData = fwflex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) + config.EnableTokenRevocation = fwflex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) + config.ExplicitAuthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ExplicitAuthFlows) + config.ID = fwflex.StringToFramework(ctx, poolClient.ClientId) + config.IdTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) + config.LogoutUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.LogoutURLs) + config.Name = fwflex.StringToFramework(ctx, poolClient.ClientName) + config.PreventUserExistenceErrors = fwtypes.StringEnumValue(poolClient.PreventUserExistenceErrors) + config.ReadAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ReadAttributes) + config.RefreshTokenValidity = fwflex.Int32ValueToFramework(ctx, poolClient.RefreshTokenValidity) + config.SupportedIdentityProviders = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.SupportedIdentityProviders) config.TokenValidityUnits = flattenTokenValidityUnits(ctx, poolClient.TokenValidityUnits) - config.UserPoolID = flex.StringToFramework(ctx, poolClient.UserPoolId) - config.WriteAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.WriteAttributes) + config.UserPoolID = fwflex.StringToFramework(ctx, poolClient.UserPoolId) + config.WriteAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.WriteAttributes) if response.Diagnostics.HasError() { return @@ -541,47 +539,51 @@ func (r *managedUserPoolClientResource) Read(ctx context.Context, request resour return } - conn := r.Meta().CognitoIDPConn(ctx) + conn := r.Meta().CognitoIDPClient(ctx) + + poolClient, err := findUserPoolClientByTwoPartKey(ctx, conn, state.UserPoolID.ValueString(), state.ID.ValueString()) - poolClient, err := FindCognitoUserPoolClientByID(ctx, conn, state.UserPoolID.ValueString(), state.ID.ValueString()) if tfresource.NotFound(err) { - create.LogNotFoundRemoveState(names.CognitoIDP, create.ErrActionReading, ResNameUserPoolClient, state.ID.ValueString()) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) response.State.RemoveResource(ctx) + return } + if err != nil { - response.Diagnostics.Append(create.DiagErrorFramework(names.CognitoIDP, create.ErrActionReading, ResNameUserPoolClient, state.ID.ValueString(), err)) + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito Managed User Pool Client (%s)", state.ID.ValueString()), err.Error()) + return } - state.AccessTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) - state.AllowedOauthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthFlows) - state.AllowedOauthFlowsUserPoolClient = flex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) - state.AllowedOauthScopes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthScopes) + state.AccessTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) + state.AllowedOauthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthFlows) + state.AllowedOauthFlowsUserPoolClient = fwflex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) + state.AllowedOauthScopes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthScopes) state.AnalyticsConfiguration = flattenAnaylticsConfiguration(ctx, poolClient.AnalyticsConfiguration) - state.AuthSessionValidity = flex.Int64ToFramework(ctx, poolClient.AuthSessionValidity) - state.CallbackUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.CallbackURLs) - state.ClientSecret = flex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) - state.DefaultRedirectUri = flex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) - state.EnablePropagateAdditionalUserContextData = flex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) - state.EnableTokenRevocation = flex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) - state.ExplicitAuthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ExplicitAuthFlows) - state.ID = flex.StringToFramework(ctx, poolClient.ClientId) - state.IdTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) - state.LogoutUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.LogoutURLs) - state.Name = flex.StringToFramework(ctx, poolClient.ClientName) - state.PreventUserExistenceErrors = flex.StringToFrameworkLegacy(ctx, poolClient.PreventUserExistenceErrors) - state.ReadAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ReadAttributes) - state.RefreshTokenValidity = flex.Int64ToFramework(ctx, poolClient.RefreshTokenValidity) - state.SupportedIdentityProviders = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.SupportedIdentityProviders) + state.AuthSessionValidity = fwflex.Int32ToFramework(ctx, poolClient.AuthSessionValidity) + state.CallbackUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.CallbackURLs) + state.ClientSecret = fwflex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) + state.DefaultRedirectUri = fwflex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) + state.EnablePropagateAdditionalUserContextData = fwflex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) + state.EnableTokenRevocation = fwflex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) + state.ExplicitAuthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ExplicitAuthFlows) + state.ID = fwflex.StringToFramework(ctx, poolClient.ClientId) + state.IdTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) + state.LogoutUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.LogoutURLs) + state.Name = fwflex.StringToFramework(ctx, poolClient.ClientName) + state.PreventUserExistenceErrors = fwtypes.StringEnumValue(poolClient.PreventUserExistenceErrors) + state.ReadAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ReadAttributes) + state.RefreshTokenValidity = fwflex.Int32ValueToFramework(ctx, poolClient.RefreshTokenValidity) + state.SupportedIdentityProviders = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.SupportedIdentityProviders) if state.TokenValidityUnits.IsNull() && isDefaultTokenValidityUnits(poolClient.TokenValidityUnits) { elemType := fwtypes.NewObjectTypeOf[tokenValidityUnits](ctx).ObjectType state.TokenValidityUnits = types.ListNull(elemType) } else { state.TokenValidityUnits = flattenTokenValidityUnits(ctx, poolClient.TokenValidityUnits) } - state.UserPoolID = flex.StringToFramework(ctx, poolClient.UserPoolId) - state.WriteAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.WriteAttributes) + state.UserPoolID = fwflex.StringToFramework(ctx, poolClient.UserPoolId) + state.WriteAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.WriteAttributes) if response.Diagnostics.HasError() { return @@ -609,7 +611,7 @@ func (r *managedUserPoolClientResource) Update(ctx context.Context, request reso return } - conn := r.Meta().CognitoIDPConn(ctx) + conn := r.Meta().CognitoIDPClient(ctx) params := plan.updateInput(ctx, &response.Diagnostics) if response.Diagnostics.HasError() { @@ -617,17 +619,20 @@ func (r *managedUserPoolClientResource) Update(ctx context.Context, request reso } // If removing `token_validity_units`, reset to defaults if !state.TokenValidityUnits.IsNull() && plan.TokenValidityUnits.IsNull() { - params.TokenValidityUnits.AccessToken = aws.String(cognitoidentityprovider.TimeUnitsTypeHours) - params.TokenValidityUnits.IdToken = aws.String(cognitoidentityprovider.TimeUnitsTypeHours) - params.TokenValidityUnits.RefreshToken = aws.String(cognitoidentityprovider.TimeUnitsTypeDays) + params.TokenValidityUnits.AccessToken = awstypes.TimeUnitsTypeHours + params.TokenValidityUnits.IdToken = awstypes.TimeUnitsTypeHours + params.TokenValidityUnits.RefreshToken = awstypes.TimeUnitsTypeDays } - output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.UpdateUserPoolClientWithContext(ctx, params) - }, cognitoidentityprovider.ErrCodeConcurrentModificationException) + const ( + timeout = 2 * time.Minute + ) + output, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, timeout, func() (interface{}, error) { + return conn.UpdateUserPoolClient(ctx, params) + }) if err != nil { response.Diagnostics.AddError( - fmt.Sprintf("updating Cognito User Pool Client (%s)", plan.ID.ValueString()), + fmt.Sprintf("updating Cognito Managed User Pool Client (%s)", plan.ID.ValueString()), err.Error(), ) return @@ -635,34 +640,34 @@ func (r *managedUserPoolClientResource) Update(ctx context.Context, request reso poolClient := output.(*cognitoidentityprovider.UpdateUserPoolClientOutput).UserPoolClient - config.AccessTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) - config.AllowedOauthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthFlows) - config.AllowedOauthFlowsUserPoolClient = flex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) - config.AllowedOauthScopes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthScopes) + config.AccessTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) + config.AllowedOauthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthFlows) + config.AllowedOauthFlowsUserPoolClient = fwflex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) + config.AllowedOauthScopes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthScopes) config.AnalyticsConfiguration = flattenAnaylticsConfiguration(ctx, poolClient.AnalyticsConfiguration) - config.AuthSessionValidity = flex.Int64ToFramework(ctx, poolClient.AuthSessionValidity) - config.CallbackUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.CallbackURLs) - config.ClientSecret = flex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) - config.DefaultRedirectUri = flex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) - config.EnablePropagateAdditionalUserContextData = flex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) - config.EnableTokenRevocation = flex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) - config.ExplicitAuthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ExplicitAuthFlows) - config.ID = flex.StringToFramework(ctx, poolClient.ClientId) - config.IdTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) - config.LogoutUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.LogoutURLs) - config.Name = flex.StringToFramework(ctx, poolClient.ClientName) - config.PreventUserExistenceErrors = flex.StringToFrameworkLegacy(ctx, poolClient.PreventUserExistenceErrors) - config.ReadAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ReadAttributes) - config.RefreshTokenValidity = flex.Int64ToFramework(ctx, poolClient.RefreshTokenValidity) - config.SupportedIdentityProviders = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.SupportedIdentityProviders) + config.AuthSessionValidity = fwflex.Int32ToFramework(ctx, poolClient.AuthSessionValidity) + config.CallbackUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.CallbackURLs) + config.ClientSecret = fwflex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) + config.DefaultRedirectUri = fwflex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) + config.EnablePropagateAdditionalUserContextData = fwflex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) + config.EnableTokenRevocation = fwflex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) + config.ExplicitAuthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ExplicitAuthFlows) + config.ID = fwflex.StringToFramework(ctx, poolClient.ClientId) + config.IdTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) + config.LogoutUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.LogoutURLs) + config.Name = fwflex.StringToFramework(ctx, poolClient.ClientName) + config.PreventUserExistenceErrors = fwtypes.StringEnumValue(poolClient.PreventUserExistenceErrors) + config.ReadAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ReadAttributes) + config.RefreshTokenValidity = fwflex.Int32ValueToFramework(ctx, poolClient.RefreshTokenValidity) + config.SupportedIdentityProviders = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.SupportedIdentityProviders) if !state.TokenValidityUnits.IsNull() && plan.TokenValidityUnits.IsNull() && isDefaultTokenValidityUnits(poolClient.TokenValidityUnits) { elemType := fwtypes.NewObjectTypeOf[tokenValidityUnits](ctx).ObjectType config.TokenValidityUnits = types.ListNull(elemType) } else { config.TokenValidityUnits = flattenTokenValidityUnits(ctx, poolClient.TokenValidityUnits) } - config.UserPoolID = flex.StringToFramework(ctx, poolClient.UserPoolId) - config.WriteAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.WriteAttributes) + config.UserPoolID = fwflex.StringToFramework(ctx, poolClient.UserPoolId) + config.WriteAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.WriteAttributes) if response.Diagnostics.HasError() { return @@ -671,19 +676,6 @@ func (r *managedUserPoolClientResource) Update(ctx context.Context, request reso response.Diagnostics.Append(response.State.Set(ctx, &config)...) } -func (r *managedUserPoolClientResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { - var state resourceManagedUserPoolClientData - response.Diagnostics.Append(request.State.Get(ctx, &state)...) - if response.Diagnostics.HasError() { - return - } - - response.Diagnostics.AddWarning( - fmt.Sprintf("Cognito User Pool Client (%s) not deleted", state.ID.ValueString()), - "User Pool Client is managed by another service and will be deleted when that resource is deleted. Removed from Terraform state.", - ) -} - func (r *managedUserPoolClientResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { parts := strings.Split(request.ID, "/") if len(parts) != 2 { @@ -725,58 +717,95 @@ func (r *managedUserPoolClientResource) ConfigValidators(ctx context.Context) [] } } +func findUserPoolClientByName(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID string, filter tfslices.Predicate[*awstypes.UserPoolClientDescription]) (*awstypes.UserPoolClientType, error) { + input := &cognitoidentityprovider.ListUserPoolClientsInput{ + UserPoolId: aws.String(userPoolID), + } + var userPoolClients []awstypes.UserPoolClientDescription + + pages := cognitoidentityprovider.NewListUserPoolClientsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.UserPoolClients { + if filter(&v) { + userPoolClients = append(userPoolClients, v) + } + } + } + + userPoolClient, err := tfresource.AssertSingleValueResult(userPoolClients) + + if err != nil { + return nil, err + } + + return findUserPoolClientByTwoPartKey(ctx, conn, userPoolID, aws.ToString(userPoolClient.ClientId)) +} + type resourceManagedUserPoolClientData struct { - AccessTokenValidity types.Int64 `tfsdk:"access_token_validity"` - AllowedOauthFlows types.Set `tfsdk:"allowed_oauth_flows"` - AllowedOauthFlowsUserPoolClient types.Bool `tfsdk:"allowed_oauth_flows_user_pool_client"` - AllowedOauthScopes types.Set `tfsdk:"allowed_oauth_scopes"` - AnalyticsConfiguration types.List `tfsdk:"analytics_configuration"` - AuthSessionValidity types.Int64 `tfsdk:"auth_session_validity"` - CallbackUrls types.Set `tfsdk:"callback_urls"` - ClientSecret types.String `tfsdk:"client_secret"` - DefaultRedirectUri types.String `tfsdk:"default_redirect_uri"` - EnablePropagateAdditionalUserContextData types.Bool `tfsdk:"enable_propagate_additional_user_context_data"` - EnableTokenRevocation types.Bool `tfsdk:"enable_token_revocation"` - ExplicitAuthFlows types.Set `tfsdk:"explicit_auth_flows"` - ID types.String `tfsdk:"id"` - IdTokenValidity types.Int64 `tfsdk:"id_token_validity"` - LogoutUrls types.Set `tfsdk:"logout_urls"` - Name types.String `tfsdk:"name"` - NamePattern fwtypes.Regexp `tfsdk:"name_pattern"` - NamePrefix types.String `tfsdk:"name_prefix"` - PreventUserExistenceErrors types.String `tfsdk:"prevent_user_existence_errors"` - ReadAttributes types.Set `tfsdk:"read_attributes"` - RefreshTokenValidity types.Int64 `tfsdk:"refresh_token_validity"` - SupportedIdentityProviders types.Set `tfsdk:"supported_identity_providers"` - TokenValidityUnits types.List `tfsdk:"token_validity_units"` - UserPoolID types.String `tfsdk:"user_pool_id"` - WriteAttributes types.Set `tfsdk:"write_attributes"` + AccessTokenValidity types.Int64 `tfsdk:"access_token_validity"` + AllowedOauthFlows types.Set `tfsdk:"allowed_oauth_flows"` + AllowedOauthFlowsUserPoolClient types.Bool `tfsdk:"allowed_oauth_flows_user_pool_client"` + AllowedOauthScopes types.Set `tfsdk:"allowed_oauth_scopes"` + AnalyticsConfiguration types.List `tfsdk:"analytics_configuration"` + AuthSessionValidity types.Int64 `tfsdk:"auth_session_validity"` + CallbackUrls types.Set `tfsdk:"callback_urls"` + ClientSecret types.String `tfsdk:"client_secret"` + DefaultRedirectUri types.String `tfsdk:"default_redirect_uri"` + EnablePropagateAdditionalUserContextData types.Bool `tfsdk:"enable_propagate_additional_user_context_data"` + EnableTokenRevocation types.Bool `tfsdk:"enable_token_revocation"` + ExplicitAuthFlows types.Set `tfsdk:"explicit_auth_flows"` + ID types.String `tfsdk:"id"` + IdTokenValidity types.Int64 `tfsdk:"id_token_validity"` + LogoutUrls types.Set `tfsdk:"logout_urls"` + Name types.String `tfsdk:"name"` + NamePattern fwtypes.Regexp `tfsdk:"name_pattern"` + NamePrefix types.String `tfsdk:"name_prefix"` + PreventUserExistenceErrors fwtypes.StringEnum[awstypes.PreventUserExistenceErrorTypes] `tfsdk:"prevent_user_existence_errors"` + ReadAttributes types.Set `tfsdk:"read_attributes"` + RefreshTokenValidity types.Int64 `tfsdk:"refresh_token_validity"` + SupportedIdentityProviders types.Set `tfsdk:"supported_identity_providers"` + TokenValidityUnits types.List `tfsdk:"token_validity_units"` + UserPoolID types.String `tfsdk:"user_pool_id"` + WriteAttributes types.Set `tfsdk:"write_attributes"` } func (data resourceManagedUserPoolClientData) updateInput(ctx context.Context, diags *diag.Diagnostics) *cognitoidentityprovider.UpdateUserPoolClientInput { return &cognitoidentityprovider.UpdateUserPoolClientInput{ - AccessTokenValidity: flex.Int64FromFrameworkLegacy(ctx, data.AccessTokenValidity), - AllowedOAuthFlows: flex.ExpandFrameworkStringSet(ctx, data.AllowedOauthFlows), - AllowedOAuthFlowsUserPoolClient: flex.BoolFromFramework(ctx, data.AllowedOauthFlowsUserPoolClient), - AllowedOAuthScopes: flex.ExpandFrameworkStringSet(ctx, data.AllowedOauthScopes), + AccessTokenValidity: fwflex.Int32FromFrameworkLegacy(ctx, data.AccessTokenValidity), + AllowedOAuthFlows: fwflex.ExpandFrameworkStringyValueSet[awstypes.OAuthFlowType](ctx, data.AllowedOauthFlows), + AllowedOAuthFlowsUserPoolClient: fwflex.BoolValueFromFramework(ctx, data.AllowedOauthFlowsUserPoolClient), + AllowedOAuthScopes: fwflex.ExpandFrameworkStringValueSet(ctx, data.AllowedOauthScopes), AnalyticsConfiguration: expandAnaylticsConfiguration(ctx, data.AnalyticsConfiguration, diags), - AuthSessionValidity: flex.Int64FromFramework(ctx, data.AuthSessionValidity), - CallbackURLs: flex.ExpandFrameworkStringSet(ctx, data.CallbackUrls), - ClientId: flex.StringFromFramework(ctx, data.ID), - ClientName: flex.StringFromFramework(ctx, data.Name), - DefaultRedirectURI: flex.StringFromFrameworkLegacy(ctx, data.DefaultRedirectUri), - EnablePropagateAdditionalUserContextData: flex.BoolFromFramework(ctx, data.EnablePropagateAdditionalUserContextData), - EnableTokenRevocation: flex.BoolFromFramework(ctx, data.EnableTokenRevocation), - ExplicitAuthFlows: flex.ExpandFrameworkStringSet(ctx, data.ExplicitAuthFlows), - IdTokenValidity: flex.Int64FromFrameworkLegacy(ctx, data.IdTokenValidity), - LogoutURLs: flex.ExpandFrameworkStringSet(ctx, data.LogoutUrls), - PreventUserExistenceErrors: flex.StringFromFrameworkLegacy(ctx, data.PreventUserExistenceErrors), - ReadAttributes: flex.ExpandFrameworkStringSet(ctx, data.ReadAttributes), - RefreshTokenValidity: flex.Int64FromFramework(ctx, data.RefreshTokenValidity), - SupportedIdentityProviders: flex.ExpandFrameworkStringSet(ctx, data.SupportedIdentityProviders), + AuthSessionValidity: fwflex.Int32FromFramework(ctx, data.AuthSessionValidity), + CallbackURLs: fwflex.ExpandFrameworkStringValueSet(ctx, data.CallbackUrls), + ClientId: fwflex.StringFromFramework(ctx, data.ID), + ClientName: fwflex.StringFromFramework(ctx, data.Name), + DefaultRedirectURI: fwflex.StringFromFrameworkLegacy(ctx, data.DefaultRedirectUri), + EnablePropagateAdditionalUserContextData: fwflex.BoolFromFramework(ctx, data.EnablePropagateAdditionalUserContextData), + EnableTokenRevocation: fwflex.BoolFromFramework(ctx, data.EnableTokenRevocation), + ExplicitAuthFlows: fwflex.ExpandFrameworkStringyValueSet[awstypes.ExplicitAuthFlowsType](ctx, data.ExplicitAuthFlows), + IdTokenValidity: fwflex.Int32FromFrameworkLegacy(ctx, data.IdTokenValidity), + LogoutURLs: fwflex.ExpandFrameworkStringValueSet(ctx, data.LogoutUrls), + PreventUserExistenceErrors: data.PreventUserExistenceErrors.ValueEnum(), + ReadAttributes: fwflex.ExpandFrameworkStringValueSet(ctx, data.ReadAttributes), + RefreshTokenValidity: fwflex.Int32ValueFromFramework(ctx, data.RefreshTokenValidity), + SupportedIdentityProviders: fwflex.ExpandFrameworkStringValueSet(ctx, data.SupportedIdentityProviders), TokenValidityUnits: expandTokenValidityUnits(ctx, data.TokenValidityUnits, diags), - UserPoolId: flex.StringFromFramework(ctx, data.UserPoolID), - WriteAttributes: flex.ExpandFrameworkStringSet(ctx, data.WriteAttributes), + UserPoolId: fwflex.StringFromFramework(ctx, data.UserPoolID), + WriteAttributes: fwflex.ExpandFrameworkStringValueSet(ctx, data.WriteAttributes), } } @@ -791,8 +820,8 @@ func (v resourceManagedUserPoolClientAccessTokenValidityValidator) ValidateResou func(rupcd resourceManagedUserPoolClientData) types.Int64 { return rupcd.AccessTokenValidity }, - func(tvu *tokenValidityUnits) types.String { - return tvu.AccessToken + func(tvu *tokenValidityUnits) awstypes.TimeUnitsType { + return tvu.AccessToken.ValueEnum() }, ) } @@ -808,8 +837,8 @@ func (v resourceManagedUserPoolClientIDTokenValidityValidator) ValidateResource( func(rupcd resourceManagedUserPoolClientData) types.Int64 { return rupcd.IdTokenValidity }, - func(tvu *tokenValidityUnits) types.String { - return tvu.IdToken + func(tvu *tokenValidityUnits) awstypes.TimeUnitsType { + return tvu.IdToken.ValueEnum() }, ) } @@ -825,8 +854,8 @@ func (v resourceManagedUserPoolClientRefreshTokenValidityValidator) ValidateReso func(rupcd resourceManagedUserPoolClientData) types.Int64 { return rupcd.RefreshTokenValidity }, - func(tvu *tokenValidityUnits) types.String { - return tvu.RefreshToken + func(tvu *tokenValidityUnits) awstypes.TimeUnitsType { + return tvu.RefreshToken.ValueEnum() }, ) } @@ -846,7 +875,7 @@ func (v resourceManagedUserPoolClientValidityValidator) MarkdownDescription(_ co return fmt.Sprintf("must have a duration between %s and %s", v.min, v.max) } -func (v resourceManagedUserPoolClientValidityValidator) validate(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse, valF func(resourceManagedUserPoolClientData) types.Int64, unitF func(*tokenValidityUnits) types.String) { +func (v resourceManagedUserPoolClientValidityValidator) validate(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse, valF func(resourceManagedUserPoolClientData) types.Int64, unitF func(*tokenValidityUnits) awstypes.TimeUnitsType) { var config resourceManagedUserPoolClientData resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) if resp.Diagnostics.HasError() { @@ -859,25 +888,23 @@ func (v resourceManagedUserPoolClientValidityValidator) validate(ctx context.Con return } - val := aws.Int64Value(flex.Int64FromFramework(ctx, x)) - var duration time.Duration units := resolveTokenValidityUnits(ctx, config.TokenValidityUnits, &resp.Diagnostics) if resp.Diagnostics.HasError() { return } - if units == nil { + if val := aws.ToInt64(fwflex.Int64FromFramework(ctx, x)); units == nil { duration = time.Duration(val * int64(v.defaultUnit)) } else { - switch aws.StringValue(flex.StringFromFramework(ctx, unitF(units))) { - case cognitoidentityprovider.TimeUnitsTypeSeconds: + switch unitF(units) { + case awstypes.TimeUnitsTypeSeconds: duration = time.Duration(val * int64(time.Second)) - case cognitoidentityprovider.TimeUnitsTypeMinutes: + case awstypes.TimeUnitsTypeMinutes: duration = time.Duration(val * int64(time.Minute)) - case cognitoidentityprovider.TimeUnitsTypeHours: + case awstypes.TimeUnitsTypeHours: duration = time.Duration(val * int64(time.Hour)) - case cognitoidentityprovider.TimeUnitsTypeDays: + case awstypes.TimeUnitsTypeDays: duration = time.Duration(val * 24 * int64(time.Hour)) } } diff --git a/internal/service/cognitoidp/managed_user_pool_client_test.go b/internal/service/cognitoidp/managed_user_pool_client_test.go index 434bae2d5cb..bf7c746a655 100644 --- a/internal/service/cognitoidp/managed_user_pool_client_test.go +++ b/internal/service/cognitoidp/managed_user_pool_client_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -31,7 +31,7 @@ func randomOpenSearchDomainName() string { func TestAccCognitoIDPManagedUserPoolClient_basic(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -92,7 +92,7 @@ func TestAccCognitoIDPManagedUserPoolClient_basic(t *testing.T) { func TestAccCognitoIDPManagedUserPoolClient_namePattern(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -124,7 +124,7 @@ func TestAccCognitoIDPManagedUserPoolClient_namePattern(t *testing.T) { func TestAccCognitoIDPManagedUserPoolClient_enableRevocation(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -173,7 +173,7 @@ func TestAccCognitoIDPManagedUserPoolClient_enableRevocation(t *testing.T) { func TestAccCognitoIDPManagedUserPoolClient_accessTokenValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -234,11 +234,11 @@ func TestAccCognitoIDPManagedUserPoolClient_accessTokenValidity_error(t *testing ExpectError: regexache.MustCompile(`Attribute access_token_validity must have a duration between 5m0s and\s+24h0m0s, got: 25h0m0s`), }, { - Config: testAccManagedUserPoolClientConfig_accessTokenValidityUnit(rName, 2, cognitoidentityprovider.TimeUnitsTypeDays), + Config: testAccManagedUserPoolClientConfig_accessTokenValidityUnit(rName, 2, string(awstypes.TimeUnitsTypeDays)), ExpectError: regexache.MustCompile(`Attribute access_token_validity must have a duration between 5m0s and\s+24h0m0s, got: 48h0m0s`), }, { - Config: testAccManagedUserPoolClientConfig_accessTokenValidityUnit(rName, 4, cognitoidentityprovider.TimeUnitsTypeMinutes), + Config: testAccManagedUserPoolClientConfig_accessTokenValidityUnit(rName, 4, string(awstypes.TimeUnitsTypeMinutes)), ExpectError: regexache.MustCompile(`Attribute access_token_validity must have a duration between 5m0s and\s+24h0m0s, got: 4m0s`), }, }, @@ -247,7 +247,7 @@ func TestAccCognitoIDPManagedUserPoolClient_accessTokenValidity_error(t *testing func TestAccCognitoIDPManagedUserPoolClient_idTokenValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -308,11 +308,11 @@ func TestAccCognitoIDPManagedUserPoolClient_idTokenValidity_error(t *testing.T) ExpectError: regexache.MustCompile(`Attribute id_token_validity must have a duration between 5m0s and\s+24h0m0s,\s+got: 25h0m0s`), }, { - Config: testAccManagedUserPoolClientConfig_idTokenValidityUnit(rName, 2, cognitoidentityprovider.TimeUnitsTypeDays), + Config: testAccManagedUserPoolClientConfig_idTokenValidityUnit(rName, 2, string(awstypes.TimeUnitsTypeDays)), ExpectError: regexache.MustCompile(`Attribute id_token_validity must have a duration between 5m0s and\s+24h0m0s,\s+got: 48h0m0s`), }, { - Config: testAccManagedUserPoolClientConfig_idTokenValidityUnit(rName, 4, cognitoidentityprovider.TimeUnitsTypeMinutes), + Config: testAccManagedUserPoolClientConfig_idTokenValidityUnit(rName, 4, string(awstypes.TimeUnitsTypeMinutes)), ExpectError: regexache.MustCompile(`Attribute id_token_validity must have a duration between 5m0s and\s+24h0m0s,\s+got: 4m0s`), }, }, @@ -321,7 +321,7 @@ func TestAccCognitoIDPManagedUserPoolClient_idTokenValidity_error(t *testing.T) func TestAccCognitoIDPManagedUserPoolClient_refreshTokenValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -382,7 +382,7 @@ func TestAccCognitoIDPManagedUserPoolClient_refreshTokenValidity_error(t *testin ExpectError: regexache.MustCompile(`Attribute refresh_token_validity must have a duration between 1h0m0s and\s+87600h0m0s,\s+got: 87624h0m0s`), }, { - Config: testAccManagedUserPoolClientConfig_refreshTokenValidityUnit(rName, 59, cognitoidentityprovider.TimeUnitsTypeMinutes), + Config: testAccManagedUserPoolClientConfig_refreshTokenValidityUnit(rName, 59, string(awstypes.TimeUnitsTypeMinutes)), ExpectError: regexache.MustCompile(`Attribute refresh_token_validity must have a duration between 1h0m0s and\s+87600h0m0s,\s+got: 59m0s`), }, }, @@ -391,7 +391,7 @@ func TestAccCognitoIDPManagedUserPoolClient_refreshTokenValidity_error(t *testin func TestAccCognitoIDPManagedUserPoolClient_tokenValidityUnits(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -461,7 +461,7 @@ func TestAccCognitoIDPManagedUserPoolClient_tokenValidityUnits(t *testing.T) { func TestAccCognitoIDPManagedUserPoolClient_tokenValidityUnits_explicitDefaults(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -487,7 +487,7 @@ func TestAccCognitoIDPManagedUserPoolClient_tokenValidityUnits_explicitDefaults( func TestAccCognitoIDPManagedUserPoolClient_tokenValidityUnits_AccessToken(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -541,7 +541,7 @@ func TestAccCognitoIDPManagedUserPoolClient_tokenValidityUnits_AccessToken(t *te func TestAccCognitoIDPManagedUserPoolClient_tokenValidityUnitsWTokenValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -597,7 +597,7 @@ func TestAccCognitoIDPManagedUserPoolClient_tokenValidityUnitsWTokenValidity(t * func TestAccCognitoIDPManagedUserPoolClient_allFields(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -655,7 +655,7 @@ func TestAccCognitoIDPManagedUserPoolClient_allFields(t *testing.T) { func TestAccCognitoIDPManagedUserPoolClient_allFieldsUpdatingOneField(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -716,7 +716,7 @@ func TestAccCognitoIDPManagedUserPoolClient_allFieldsUpdatingOneField(t *testing func TestAccCognitoIDPManagedUserPoolClient_analyticsApplicationID(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" pinpointResourceName := "aws_pinpoint_app.analytics" @@ -725,7 +725,7 @@ func TestAccCognitoIDPManagedUserPoolClient_analyticsApplicationID(t *testing.T) PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckIdentityProvider(ctx, t) - testAccPreCheckPinpointApp(ctx, t) + acctest.PreCheckPinpointApp(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -795,7 +795,7 @@ func TestAccCognitoIDPManagedUserPoolClient_analyticsWithARN(t *testing.T) { t.Skip("this test hangs on deletion") ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" pinpointResourceName := "aws_pinpoint_app.analytics" @@ -804,7 +804,7 @@ func TestAccCognitoIDPManagedUserPoolClient_analyticsWithARN(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckIdentityProvider(ctx, t) - testAccPreCheckPinpointApp(ctx, t) + acctest.PreCheckPinpointApp(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -856,7 +856,7 @@ func TestAccCognitoIDPManagedUserPoolClient_analyticsWithARN(t *testing.T) { func TestAccCognitoIDPManagedUserPoolClient_authSessionValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -904,7 +904,7 @@ func TestAccCognitoIDPManagedUserPoolClient_authSessionValidity(t *testing.T) { func TestAccCognitoIDPManagedUserPoolClient_Disappears_OpenSearchDomain(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -928,7 +928,7 @@ func TestAccCognitoIDPManagedUserPoolClient_Disappears_OpenSearchDomain(t *testi func TestAccCognitoIDPManagedUserPoolClient_emptySets(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" @@ -971,7 +971,7 @@ func TestAccCognitoIDPManagedUserPoolClient_emptySets(t *testing.T) { func TestAccCognitoIDPManagedUserPoolClient_nulls(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := randomOpenSearchDomainName() resourceName := "aws_cognito_managed_user_pool_client.test" diff --git a/internal/service/cognitoidp/resource_server.go b/internal/service/cognitoidp/resource_server.go index 9c12cd08d22..4c9d2ab67d6 100644 --- a/internal/service/cognitoidp/resource_server.go +++ b/internal/service/cognitoidp/resource_server.go @@ -5,20 +5,22 @@ package cognitoidp import ( "context" - "errors" "fmt" "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -34,7 +36,6 @@ func resourceResourceServer() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, - // https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateResourceServer.html Schema: map[string]*schema.Schema{ names.AttrIdentifier: { Type: schema.TypeString, @@ -83,112 +84,86 @@ func resourceResourceServer() *schema.Resource { func resourceResourceServerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) identifier := d.Get(names.AttrIdentifier).(string) userPoolID := d.Get(names.AttrUserPoolID).(string) - - params := &cognitoidentityprovider.CreateResourceServerInput{ + id := resourceServerCreateResourceID(userPoolID, identifier) + input := &cognitoidentityprovider.CreateResourceServerInput{ Identifier: aws.String(identifier), Name: aws.String(d.Get(names.AttrName).(string)), UserPoolId: aws.String(userPoolID), } if v, ok := d.GetOk(names.AttrScope); ok { - configs := v.(*schema.Set).List() - params.Scopes = expandServerScope(configs) + input.Scopes = expandResourceServerScopeTypes(v.(*schema.Set).List()) } - log.Printf("[DEBUG] Creating Cognito Resource Server: %s", params) - - _, err := conn.CreateResourceServerWithContext(ctx, params) + _, err := conn.CreateResourceServer(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Cognito Resource Server: %s", err) + return sdkdiag.AppendErrorf(diags, "creating Cognito Resource Server (%s): %s", id, err) } - d.SetId(fmt.Sprintf("%s|%s", userPoolID, identifier)) + d.SetId(id) return append(diags, resourceResourceServerRead(ctx, d, meta)...) } func resourceResourceServerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - userPoolID, identifier, err := DecodeResourceServerID(d.Id()) + userPoolID, identifier, err := resourceServerParseResourceID(d.Id()) if err != nil { - return create.AppendDiagError(diags, names.CognitoIDP, create.ErrActionReading, ResNameResourceServer, d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - params := &cognitoidentityprovider.DescribeResourceServerInput{ - Identifier: aws.String(identifier), - UserPoolId: aws.String(userPoolID), - } - - log.Printf("[DEBUG] Reading Cognito Resource Server: %s", params) - - resp, err := conn.DescribeResourceServerWithContext(ctx, params) + resourceServer, err := findResourceServerByTwoPartKey(ctx, conn, userPoolID, identifier) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { - create.LogNotFoundRemoveState(names.CognitoIDP, create.ErrActionReading, ResNameResourceServer, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] Cognito Resource Server %s not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return create.AppendDiagError(diags, names.CognitoIDP, create.ErrActionReading, ResNameResourceServer, d.Id(), err) - } - - if !d.IsNewResource() && (resp == nil || resp.ResourceServer == nil) { - create.LogNotFoundRemoveState(names.CognitoIDP, create.ErrActionReading, ResNameResourceServer, d.Id()) - d.SetId("") - return diags - } - - if d.IsNewResource() && (resp == nil || resp.ResourceServer == nil) { - return create.AppendDiagError(diags, names.CognitoIDP, create.ErrActionReading, ResNameResourceServer, d.Id(), errors.New("not found after creation")) + return sdkdiag.AppendErrorf(diags, "reading Cognito Resource Server (%s): %s", d.Id(), err) } - d.Set(names.AttrIdentifier, resp.ResourceServer.Identifier) - d.Set(names.AttrName, resp.ResourceServer.Name) - d.Set(names.AttrUserPoolID, resp.ResourceServer.UserPoolId) - - scopes := flattenServerScope(resp.ResourceServer.Scopes) + identifier = aws.ToString(resourceServer.Identifier) + d.Set(names.AttrIdentifier, identifier) + d.Set(names.AttrName, resourceServer.Name) + scopes := flattenResourceServerScopeTypes(resourceServer.Scopes) if err := d.Set(names.AttrScope, scopes); err != nil { - return sdkdiag.AppendErrorf(diags, "Failed setting schema: %s", err) + return sdkdiag.AppendErrorf(diags, "setting scope: %s", err) } + d.Set("scope_identifiers", tfslices.ApplyToAll(scopes, func(tfMap map[string]interface{}) string { + return identifier + "/" + tfMap["scope_name"].(string) + })) + d.Set(names.AttrUserPoolID, resourceServer.UserPoolId) - var scopeIdentifiers []string - for _, elem := range scopes { - scopeIdentifier := fmt.Sprintf("%s/%s", aws.StringValue(resp.ResourceServer.Identifier), elem["scope_name"].(string)) - scopeIdentifiers = append(scopeIdentifiers, scopeIdentifier) - } - if err := d.Set("scope_identifiers", scopeIdentifiers); err != nil { - return sdkdiag.AppendErrorf(diags, "setting scope_identifiers: %s", err) - } return diags } func resourceResourceServerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - userPoolID, identifier, err := DecodeResourceServerID(d.Id()) + userPoolID, identifier, err := resourceServerParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Cognito Resource Server (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - params := &cognitoidentityprovider.UpdateResourceServerInput{ + input := &cognitoidentityprovider.UpdateResourceServerInput{ Identifier: aws.String(identifier), Name: aws.String(d.Get(names.AttrName).(string)), - Scopes: expandServerScope(d.Get(names.AttrScope).(*schema.Set).List()), + Scopes: expandResourceServerScopeTypes(d.Get(names.AttrScope).(*schema.Set).List()), UserPoolId: aws.String(userPoolID), } - log.Printf("[DEBUG] Updating Cognito Resource Server: %s", params) + _, err = conn.UpdateResourceServer(ctx, input) - _, err = conn.UpdateResourceServerWithContext(ctx, params) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Cognito Resource Server (%s): %s", d.Id(), err) } @@ -198,19 +173,20 @@ func resourceResourceServerUpdate(ctx context.Context, d *schema.ResourceData, m func resourceResourceServerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - userPoolID, identifier, err := DecodeResourceServerID(d.Id()) + userPoolID, identifier, err := resourceServerParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Cognito Resource Server (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - _, err = conn.DeleteResourceServerWithContext(ctx, &cognitoidentityprovider.DeleteResourceServerInput{ + log.Printf("[DEBUG] Deleting Cognito Resource Server: %s", d.Id()) + _, err = conn.DeleteResourceServer(ctx, &cognitoidentityprovider.DeleteResourceServerInput{ Identifier: aws.String(identifier), UserPoolId: aws.String(userPoolID), }) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -221,10 +197,83 @@ func resourceResourceServerDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func DecodeResourceServerID(id string) (string, string, error) { - idParts := strings.Split(id, "|") - if len(idParts) != 2 { - return "", "", fmt.Errorf("expected ID in format UserPoolID|Identifier, received: %s", id) +const resourceServerResourceIDSeparator = "|" + +func resourceServerCreateResourceID(userPoolID, identifier string) string { + parts := []string{userPoolID, identifier} + id := strings.Join(parts, resourceServerResourceIDSeparator) + + return id +} + +func resourceServerParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, resourceServerResourceIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected UserPoolID%[2]sIdentifier", id, resourceServerResourceIDSeparator) +} + +func findResourceServerByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID, identifier string) (*awstypes.ResourceServerType, error) { + input := &cognitoidentityprovider.DescribeResourceServerInput{ + Identifier: aws.String(identifier), + UserPoolId: aws.String(userPoolID), + } + + output, err := conn.DescribeResourceServer(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.ResourceServer == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ResourceServer, nil +} + +func expandResourceServerScopeTypes(tfList []interface{}) []awstypes.ResourceServerScopeType { + apiObjects := make([]awstypes.ResourceServerScopeType, len(tfList)) + + for i, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]interface{}) + apiObject := awstypes.ResourceServerScopeType{} + + if v, ok := tfMap["scope_description"]; ok { + apiObject.ScopeDescription = aws.String(v.(string)) + } + + if v, ok := tfMap["scope_name"]; ok { + apiObject.ScopeName = aws.String(v.(string)) + } + + apiObjects[i] = apiObject } - return idParts[0], idParts[1], nil + + return apiObjects +} + +func flattenResourceServerScopeTypes(apiObjects []awstypes.ResourceServerScopeType) []map[string]interface{} { + tfList := make([]map[string]interface{}, 0) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + "scope_description": aws.ToString(apiObject.ScopeDescription), + "scope_name": aws.ToString(apiObject.ScopeName), + } + + tfList = append(tfList, tfMap) + } + + return tfList } diff --git a/internal/service/cognitoidp/resource_server_test.go b/internal/service/cognitoidp/resource_server_test.go index 851a048c7ae..d8ba965ebc0 100644 --- a/internal/service/cognitoidp/resource_server_test.go +++ b/internal/service/cognitoidp/resource_server_test.go @@ -5,30 +5,26 @@ package cognitoidp_test import ( "context" - "errors" "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfcognitoidp "github.com/hashicorp/terraform-provider-aws/internal/service/cognitoidp" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccCognitoIDPResourceServer_basic(t *testing.T) { ctx := acctest.Context(t) - var resourceServer cognitoidentityprovider.ResourceServerType - identifier := fmt.Sprintf("tf-acc-test-resource-server-id-%s", sdkacctest.RandString(10)) - name1 := fmt.Sprintf("tf-acc-test-resource-server-name-%s", sdkacctest.RandString(10)) - name2 := fmt.Sprintf("tf-acc-test-resource-server-name-%s", sdkacctest.RandString(10)) - poolName := fmt.Sprintf("tf-acc-test-pool-%s", sdkacctest.RandString(10)) - resourceName := "aws_cognito_resource_server.main" + var resourceServer awstypes.ResourceServerType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + identifier := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_resource_server.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, @@ -37,21 +33,11 @@ func TestAccCognitoIDPResourceServer_basic(t *testing.T) { CheckDestroy: testAccCheckResourceServerDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccResourceServerConfig_basic(identifier, name1, poolName), + Config: testAccResourceServerConfig_basic(identifier, rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckResourceServerExists(ctx, resourceName, &resourceServer), resource.TestCheckResourceAttr(resourceName, names.AttrIdentifier, identifier), - resource.TestCheckResourceAttr(resourceName, names.AttrName, name1), - resource.TestCheckResourceAttr(resourceName, "scope.#", acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "scope_identifiers.#", acctest.Ct0), - ), - }, - { - Config: testAccResourceServerConfig_basic(identifier, name2, poolName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckResourceServerExists(ctx, resourceName, &resourceServer), - resource.TestCheckResourceAttr(resourceName, names.AttrIdentifier, identifier), - resource.TestCheckResourceAttr(resourceName, names.AttrName, name2), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, "scope.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "scope_identifiers.#", acctest.Ct0), ), @@ -65,13 +51,37 @@ func TestAccCognitoIDPResourceServer_basic(t *testing.T) { }) } +func TestAccCognitoIDPResourceServer_disappears(t *testing.T) { + ctx := acctest.Context(t) + var resourceServer awstypes.ResourceServerType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + identifier := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_resource_server.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckResourceServerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccResourceServerConfig_basic(identifier, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckResourceServerExists(ctx, resourceName, &resourceServer), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfcognitoidp.ResourceResourceServer(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccCognitoIDPResourceServer_scope(t *testing.T) { ctx := acctest.Context(t) - var resourceServer cognitoidentityprovider.ResourceServerType - identifier := fmt.Sprintf("tf-acc-test-resource-server-id-%s", sdkacctest.RandString(10)) - name := fmt.Sprintf("tf-acc-test-resource-server-name-%s", sdkacctest.RandString(10)) - poolName := fmt.Sprintf("tf-acc-test-pool-%s", sdkacctest.RandString(10)) - resourceName := "aws_cognito_resource_server.main" + var resourceServer awstypes.ResourceServerType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + identifier := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_resource_server.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, @@ -80,7 +90,7 @@ func TestAccCognitoIDPResourceServer_scope(t *testing.T) { CheckDestroy: testAccCheckResourceServerDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccResourceServerConfig_scope(identifier, name, poolName), + Config: testAccResourceServerConfig_scope(identifier, rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckResourceServerExists(ctx, resourceName, &resourceServer), resource.TestCheckResourceAttr(resourceName, "scope.#", acctest.Ct2), @@ -88,7 +98,7 @@ func TestAccCognitoIDPResourceServer_scope(t *testing.T) { ), }, { - Config: testAccResourceServerConfig_scopeUpdate(identifier, name, poolName), + Config: testAccResourceServerConfig_scopeUpdate(identifier, rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckResourceServerExists(ctx, resourceName, &resourceServer), resource.TestCheckResourceAttr(resourceName, "scope.#", acctest.Ct1), @@ -102,7 +112,7 @@ func TestAccCognitoIDPResourceServer_scope(t *testing.T) { }, // Ensure we can remove scope completely { - Config: testAccResourceServerConfig_basic(identifier, name, poolName), + Config: testAccResourceServerConfig_basic(identifier, rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckResourceServerExists(ctx, resourceName, &resourceServer), resource.TestCheckResourceAttr(resourceName, "scope.#", acctest.Ct0), @@ -113,38 +123,22 @@ func TestAccCognitoIDPResourceServer_scope(t *testing.T) { }) } -func testAccCheckResourceServerExists(ctx context.Context, n string, resourceServer *cognitoidentityprovider.ResourceServerType) resource.TestCheckFunc { +func testAccCheckResourceServerExists(ctx context.Context, n string, v *awstypes.ResourceServerType) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No Cognito Resource Server ID is set") - } + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + output, err := tfcognitoidp.FindResourceServerByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes[names.AttrIdentifier]) - userPoolID, identifier, err := tfcognitoidp.DecodeResourceServerID(rs.Primary.ID) if err != nil { return err } - output, err := conn.DescribeResourceServerWithContext(ctx, &cognitoidentityprovider.DescribeResourceServerInput{ - Identifier: aws.String(identifier), - UserPoolId: aws.String(userPoolID), - }) - - if err != nil { - return err - } - - if output == nil || output.ResourceServer == nil { - return fmt.Errorf("Cognito Resource Server %q information not found", rs.Primary.ID) - } - - *resourceServer = *output.ResourceServer + *v = *output return nil } @@ -152,54 +146,49 @@ func testAccCheckResourceServerExists(ctx context.Context, n string, resourceSer func testAccCheckResourceServerDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_resource_server" { continue } - userPoolID, identifier, err := tfcognitoidp.DecodeResourceServerID(rs.Primary.ID) - if err != nil { - return err - } + _, err := tfcognitoidp.FindResourceServerByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes[names.AttrIdentifier]) - _, err = conn.DescribeResourceServerWithContext(ctx, &cognitoidentityprovider.DescribeResourceServerInput{ - Identifier: aws.String(identifier), - UserPoolId: aws.String(userPoolID), - }) + if tfresource.NotFound(err) { + continue + } if err != nil { - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { - return nil - } return err } + + return fmt.Errorf("Cognito Resource Server %s still exists", rs.Primary.ID) } return nil } } -func testAccResourceServerConfig_basic(identifier string, name string, poolName string) string { +func testAccResourceServerConfig_basic(identifier, rName string) string { return fmt.Sprintf(` -resource "aws_cognito_resource_server" "main" { - identifier = "%s" - name = "%s" - user_pool_id = aws_cognito_user_pool.main.id +resource "aws_cognito_resource_server" "test" { + identifier = %[1]q + name = %[2]q + user_pool_id = aws_cognito_user_pool.test.id } -resource "aws_cognito_user_pool" "main" { - name = "%s" +resource "aws_cognito_user_pool" "test" { + name = %[2]q } -`, identifier, name, poolName) +`, identifier, rName) } -func testAccResourceServerConfig_scope(identifier string, name string, poolName string) string { +func testAccResourceServerConfig_scope(identifier, rName string) string { return fmt.Sprintf(` -resource "aws_cognito_resource_server" "main" { - identifier = "%s" - name = "%s" +resource "aws_cognito_resource_server" "test" { + identifier = %[1]q + name = %[2]q scope { scope_name = "scope_1_name" @@ -211,31 +200,31 @@ resource "aws_cognito_resource_server" "main" { scope_description = "scope_2_description" } - user_pool_id = aws_cognito_user_pool.main.id + user_pool_id = aws_cognito_user_pool.test.id } -resource "aws_cognito_user_pool" "main" { - name = "%s" +resource "aws_cognito_user_pool" "test" { + name = %[2]q } -`, identifier, name, poolName) +`, identifier, rName) } -func testAccResourceServerConfig_scopeUpdate(identifier string, name string, poolName string) string { +func testAccResourceServerConfig_scopeUpdate(identifier, rName string) string { return fmt.Sprintf(` -resource "aws_cognito_resource_server" "main" { - identifier = "%s" - name = "%s" +resource "aws_cognito_resource_server" "test" { + identifier = %[1]q + name = %[2]q scope { scope_name = "scope_1_name_updated" scope_description = "scope_1_description" } - user_pool_id = aws_cognito_user_pool.main.id + user_pool_id = aws_cognito_user_pool.test.id } -resource "aws_cognito_user_pool" "main" { - name = "%s" +resource "aws_cognito_user_pool" "test" { + name = %[2]q } -`, identifier, name, poolName) +`, identifier, rName) } diff --git a/internal/service/cognitoidp/risk_configuration.go b/internal/service/cognitoidp/risk_configuration.go index 7b56ceb4157..5756b523e42 100644 --- a/internal/service/cognitoidp/risk_configuration.go +++ b/internal/service/cognitoidp/risk_configuration.go @@ -6,16 +6,19 @@ package cognitoidp import ( "context" "fmt" + "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -28,25 +31,14 @@ func resourceRiskConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRiskConfigurationPut, ReadWithoutTimeout: resourceRiskConfigurationRead, - DeleteWithoutTimeout: resourceRiskConfigurationDelete, UpdateWithoutTimeout: resourceRiskConfigurationPut, + DeleteWithoutTimeout: resourceRiskConfigurationDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ - names.AttrUserPoolID: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validUserPoolID, - }, - names.AttrClientID: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, "account_takeover_risk_configuration": { Type: schema.TypeList, Optional: true, @@ -71,9 +63,9 @@ func resourceRiskConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "event_action": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(cognitoidentityprovider.AccountTakeoverEventActionType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.AccountTakeoverEventActionType](), }, "notify": { Type: schema.TypeBool, @@ -89,9 +81,9 @@ func resourceRiskConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "event_action": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(cognitoidentityprovider.AccountTakeoverEventActionType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.AccountTakeoverEventActionType](), }, "notify": { Type: schema.TypeBool, @@ -107,9 +99,9 @@ func resourceRiskConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "event_action": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(cognitoidentityprovider.AccountTakeoverEventActionType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.AccountTakeoverEventActionType](), }, "notify": { Type: schema.TypeBool, @@ -218,21 +210,17 @@ func resourceRiskConfiguration() *schema.Resource { }, }, }, + names.AttrClientID: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "compromised_credentials_risk_configuration": { Type: schema.TypeList, Optional: true, MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "event_filter": { - Type: schema.TypeSet, - Optional: true, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(cognitoidentityprovider.EventFilterType_Values(), false), - }, - }, names.AttrActions: { Type: schema.TypeList, Required: true, @@ -240,13 +228,22 @@ func resourceRiskConfiguration() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "event_action": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(cognitoidentityprovider.CompromisedCredentialsEventActionType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.CompromisedCredentialsEventActionType](), }, }, }, }, + "event_filter": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.EventFilterType](), + }, + }, }, }, }, @@ -286,111 +283,117 @@ func resourceRiskConfiguration() *schema.Resource { }, }, }, + names.AttrUserPoolID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validUserPoolID, + }, }, } } func resourceRiskConfigurationPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - userPoolId := d.Get(names.AttrUserPoolID).(string) - id := userPoolId + userPoolID := d.Get(names.AttrUserPoolID).(string) + id := userPoolID input := &cognitoidentityprovider.SetRiskConfigurationInput{ - UserPoolId: aws.String(userPoolId), + UserPoolId: aws.String(userPoolID), } if v, ok := d.GetOk(names.AttrClientID); ok { - input.ClientId = aws.String(v.(string)) - id = fmt.Sprintf("%s:%s", userPoolId, v.(string)) + v := v.(string) + input.ClientId = aws.String(v) + id = userPoolID + riskConfigurationResourceIDSeparator + v } - if v, ok := d.GetOk("risk_exception_configuration"); ok && len(v.([]interface{})) > 0 { - input.RiskExceptionConfiguration = expandRiskExceptionConfiguration(v.([]interface{})) + if v, ok := d.GetOk("account_takeover_risk_configuration"); ok && len(v.([]interface{})) > 0 { + input.AccountTakeoverRiskConfiguration = expandAccountTakeoverRiskConfigurationType(v.([]interface{})) } if v, ok := d.GetOk("compromised_credentials_risk_configuration"); ok && len(v.([]interface{})) > 0 { - input.CompromisedCredentialsRiskConfiguration = expandCompromisedCredentialsRiskConfiguration(v.([]interface{})) + input.CompromisedCredentialsRiskConfiguration = expandCompromisedCredentialsRiskConfigurationType(v.([]interface{})) } - if v, ok := d.GetOk("account_takeover_risk_configuration"); ok && len(v.([]interface{})) > 0 { - input.AccountTakeoverRiskConfiguration = expandAccountTakeoverRiskConfiguration(v.([]interface{})) + if v, ok := d.GetOk("risk_exception_configuration"); ok && len(v.([]interface{})) > 0 { + input.RiskExceptionConfiguration = expandRiskExceptionConfigurationType(v.([]interface{})) } - _, err := conn.SetRiskConfigurationWithContext(ctx, input) + _, err := conn.SetRiskConfiguration(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "setting risk configuration: %s", err) + return sdkdiag.AppendErrorf(diags, "setting Cognito Risk Configuration (%s): %s", id, err) } - d.SetId(id) + if d.IsNewResource() { + d.SetId(id) + } return append(diags, resourceRiskConfigurationRead(ctx, d, meta)...) } func resourceRiskConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - userPoolId, clientId, err := RiskConfigurationParseID(d.Id()) + userPoolID, clientID, err := riskConfigurationParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Cognito Risk Configuration (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - riskConfig, err := FindRiskConfigurationById(ctx, conn, d.Id()) + riskConfig, err := findRiskConfigurationByTwoPartKey(ctx, conn, userPoolID, clientID) if !d.IsNewResource() && tfresource.NotFound(err) { - create.LogNotFoundRemoveState(names.CognitoIDP, create.ErrActionReading, ResNameRiskConfiguration, d.Id()) + log.Printf("[WARN] Cognito Risk Configuration %s not found, removing from state", d.Id()) d.SetId("") return diags } + if err != nil { return sdkdiag.AppendErrorf(diags, "reading Cognito Risk Configuration (%s): %s", d.Id(), err) } - d.Set(names.AttrUserPoolID, userPoolId) - - if clientId != "" { - d.Set(names.AttrClientID, clientId) + if err := d.Set("account_takeover_risk_configuration", flattenAccountTakeoverRiskConfigurationType(riskConfig.AccountTakeoverRiskConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting account_takeover_risk_configuration: %s", err) } - - if riskConfig.RiskExceptionConfiguration != nil { - if err := d.Set("risk_exception_configuration", flattenRiskExceptionConfiguration(riskConfig.RiskExceptionConfiguration)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting risk_exception_configuration: %s", err) - } + if clientID != "" { + d.Set(names.AttrClientID, clientID) } - if err := d.Set("compromised_credentials_risk_configuration", flattenCompromisedCredentialsRiskConfiguration(riskConfig.CompromisedCredentialsRiskConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting compromised_credentials_risk_configuration: %s", err) } - - if err := d.Set("account_takeover_risk_configuration", flattenAccountTakeoverRiskConfiguration(riskConfig.AccountTakeoverRiskConfiguration)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting account_takeover_risk_configuration: %s", err) + if riskConfig.RiskExceptionConfiguration != nil { + if err := d.Set("risk_exception_configuration", flattenRiskExceptionConfiguration(riskConfig.RiskExceptionConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting risk_exception_configuration: %s", err) + } } + d.Set(names.AttrUserPoolID, userPoolID) return diags } func resourceRiskConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - userPoolId, clientId, err := RiskConfigurationParseID(d.Id()) + userPoolID, clientID, err := riskConfigurationParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Cognito Risk Configuration (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } input := &cognitoidentityprovider.SetRiskConfigurationInput{ - UserPoolId: aws.String(userPoolId), + UserPoolId: aws.String(userPoolID), } - - if clientId != "" { - input.ClientId = aws.String(clientId) + if clientID != "" { + input.ClientId = aws.String(clientID) } - _, err = conn.SetRiskConfigurationWithContext(ctx, input) + log.Printf("[DEBUG] Deleting Cognito Risk Configuration: %s", d.Id()) + _, err = conn.SetRiskConfiguration(ctx, input) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -401,27 +404,70 @@ func resourceRiskConfigurationDelete(ctx context.Context, d *schema.ResourceData return diags } -func expandRiskExceptionConfiguration(riskConfig []interface{}) *cognitoidentityprovider.RiskExceptionConfigurationType { - if len(riskConfig) == 0 || riskConfig[0] == nil { - return nil +const riskConfigurationResourceIDSeparator = ":" + +func riskConfigurationParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, riskConfigurationResourceIDSeparator) + + if len(parts) == 1 && parts[0] != "" { + return parts[0], "", nil + } + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected UserPoolID%[2]sClientID or UserPoolID", id, riskConfigurationResourceIDSeparator) +} + +func findRiskConfigurationByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID, clientID string) (*awstypes.RiskConfigurationType, error) { + input := &cognitoidentityprovider.DescribeRiskConfigurationInput{ + UserPoolId: aws.String(userPoolID), + } + if clientID != "" { + input.ClientId = aws.String(clientID) + } + + output, err := conn.DescribeRiskConfiguration(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err } - config := riskConfig[0].(map[string]interface{}) + if output == nil || output.RiskConfiguration == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.RiskConfiguration, nil +} + +func expandRiskExceptionConfigurationType(tfList []interface{}) *awstypes.RiskExceptionConfigurationType { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } - riskExceptionConfigurationType := &cognitoidentityprovider.RiskExceptionConfigurationType{} + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.RiskExceptionConfigurationType{} - if v, ok := config["blocked_ip_range_list"].(*schema.Set); ok && v.Len() > 0 { - riskExceptionConfigurationType.BlockedIPRangeList = flex.ExpandStringSet(v) + if v, ok := tfMap["blocked_ip_range_list"].(*schema.Set); ok && v.Len() > 0 { + apiObject.BlockedIPRangeList = flex.ExpandStringValueSet(v) } - if v, ok := config["skipped_ip_range_list"].(*schema.Set); ok && v.Len() > 0 { - riskExceptionConfigurationType.SkippedIPRangeList = flex.ExpandStringSet(v) + if v, ok := tfMap["skipped_ip_range_list"].(*schema.Set); ok && v.Len() > 0 { + apiObject.SkippedIPRangeList = flex.ExpandStringValueSet(v) } - return riskExceptionConfigurationType + return apiObject } -func flattenRiskExceptionConfiguration(apiObject *cognitoidentityprovider.RiskExceptionConfigurationType) []interface{} { +func flattenRiskExceptionConfiguration(apiObject *awstypes.RiskExceptionConfigurationType) []interface{} { if apiObject == nil { return nil } @@ -429,37 +475,36 @@ func flattenRiskExceptionConfiguration(apiObject *cognitoidentityprovider.RiskEx tfMap := map[string]interface{}{} if v := apiObject.BlockedIPRangeList; v != nil { - tfMap["blocked_ip_range_list"] = flex.FlattenStringSet(v) + tfMap["blocked_ip_range_list"] = v } if v := apiObject.SkippedIPRangeList; v != nil { - tfMap["skipped_ip_range_list"] = flex.FlattenStringSet(v) + tfMap["skipped_ip_range_list"] = v } return []interface{}{tfMap} } -func expandCompromisedCredentialsRiskConfiguration(riskConfig []interface{}) *cognitoidentityprovider.CompromisedCredentialsRiskConfigurationType { - if len(riskConfig) == 0 || riskConfig[0] == nil { +func expandCompromisedCredentialsRiskConfigurationType(tfList []interface{}) *awstypes.CompromisedCredentialsRiskConfigurationType { + if len(tfList) == 0 || tfList[0] == nil { return nil } - config := riskConfig[0].(map[string]interface{}) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.CompromisedCredentialsRiskConfigurationType{} - riskExceptionConfigurationType := &cognitoidentityprovider.CompromisedCredentialsRiskConfigurationType{} - - if v, ok := config["event_filter"].(*schema.Set); ok && v.Len() > 0 { - riskExceptionConfigurationType.EventFilter = flex.ExpandStringSet(v) + if v, ok := tfMap["event_filter"].(*schema.Set); ok && v.Len() > 0 { + apiObject.EventFilter = flex.ExpandStringyValueSet[awstypes.EventFilterType](v) } - if v, ok := config[names.AttrActions].([]interface{}); ok && len(v) > 0 { - riskExceptionConfigurationType.Actions = expandCompromisedCredentialsActions(v) + if v, ok := tfMap[names.AttrActions].([]interface{}); ok && len(v) > 0 { + apiObject.Actions = expandCompromisedCredentialsActionsType(v) } - return riskExceptionConfigurationType + return apiObject } -func flattenCompromisedCredentialsRiskConfiguration(apiObject *cognitoidentityprovider.CompromisedCredentialsRiskConfigurationType) []interface{} { +func flattenCompromisedCredentialsRiskConfiguration(apiObject *awstypes.CompromisedCredentialsRiskConfigurationType) []interface{} { if apiObject == nil { return nil } @@ -467,7 +512,7 @@ func flattenCompromisedCredentialsRiskConfiguration(apiObject *cognitoidentitypr tfMap := map[string]interface{}{} if v := apiObject.EventFilter; v != nil { - tfMap["event_filter"] = flex.FlattenStringSet(v) + tfMap["event_filter"] = v } if v := apiObject.Actions; v != nil { @@ -477,57 +522,53 @@ func flattenCompromisedCredentialsRiskConfiguration(apiObject *cognitoidentitypr return []interface{}{tfMap} } -func expandCompromisedCredentialsActions(riskConfig []interface{}) *cognitoidentityprovider.CompromisedCredentialsActionsType { - if len(riskConfig) == 0 || riskConfig[0] == nil { +func expandCompromisedCredentialsActionsType(tfList []interface{}) *awstypes.CompromisedCredentialsActionsType { + if len(tfList) == 0 || tfList[0] == nil { return nil } - config := riskConfig[0].(map[string]interface{}) - - compromisedCredentialsAction := &cognitoidentityprovider.CompromisedCredentialsActionsType{} + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.CompromisedCredentialsActionsType{} - if v, ok := config["event_action"].(string); ok && v != "" { - compromisedCredentialsAction.EventAction = aws.String(v) + if v, ok := tfMap["event_action"].(string); ok && v != "" { + apiObject.EventAction = awstypes.CompromisedCredentialsEventActionType(v) } - return compromisedCredentialsAction + return apiObject } -func flattenCompromisedCredentialsActions(apiObject *cognitoidentityprovider.CompromisedCredentialsActionsType) []interface{} { +func flattenCompromisedCredentialsActions(apiObject *awstypes.CompromisedCredentialsActionsType) []interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.EventAction; v != nil { - tfMap["event_action"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "event_action": apiObject.EventAction, } return []interface{}{tfMap} } -func expandAccountTakeoverRiskConfiguration(riskConfig []interface{}) *cognitoidentityprovider.AccountTakeoverRiskConfigurationType { - if len(riskConfig) == 0 || riskConfig[0] == nil { +func expandAccountTakeoverRiskConfigurationType(tfList []interface{}) *awstypes.AccountTakeoverRiskConfigurationType { + if len(tfList) == 0 || tfList[0] == nil { return nil } - config := riskConfig[0].(map[string]interface{}) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.AccountTakeoverRiskConfigurationType{} - accountTakeoverRiskConfiguration := &cognitoidentityprovider.AccountTakeoverRiskConfigurationType{} - - if v, ok := config["notify_configuration"].([]interface{}); ok && len(v) > 0 { - accountTakeoverRiskConfiguration.NotifyConfiguration = expandNotifyConfiguration(v) + if v, ok := tfMap[names.AttrActions].([]interface{}); ok && len(v) > 0 { + apiObject.Actions = expandAccountTakeoverActionsType(v) } - if v, ok := config[names.AttrActions].([]interface{}); ok && len(v) > 0 { - accountTakeoverRiskConfiguration.Actions = expandAccountTakeoverActions(v) + if v, ok := tfMap["notify_configuration"].([]interface{}); ok && len(v) > 0 { + apiObject.NotifyConfiguration = expandNotifyConfigurationType(v) } - return accountTakeoverRiskConfiguration + return apiObject } -func flattenAccountTakeoverRiskConfiguration(apiObject *cognitoidentityprovider.AccountTakeoverRiskConfigurationType) []interface{} { +func flattenAccountTakeoverRiskConfigurationType(apiObject *awstypes.AccountTakeoverRiskConfigurationType) []interface{} { if apiObject == nil { return nil } @@ -535,41 +576,40 @@ func flattenAccountTakeoverRiskConfiguration(apiObject *cognitoidentityprovider. tfMap := map[string]interface{}{} if v := apiObject.Actions; v != nil { - tfMap[names.AttrActions] = flattenAccountTakeoverActions(v) + tfMap[names.AttrActions] = flattenAccountTakeoverActionsType(v) } if v := apiObject.NotifyConfiguration; v != nil { - tfMap["notify_configuration"] = flattenNotifyConfiguration(v) + tfMap["notify_configuration"] = flattemNotifyConfigurationType(v) } return []interface{}{tfMap} } -func expandAccountTakeoverActions(riskConfig []interface{}) *cognitoidentityprovider.AccountTakeoverActionsType { - if len(riskConfig) == 0 || riskConfig[0] == nil { +func expandAccountTakeoverActionsType(tfList []interface{}) *awstypes.AccountTakeoverActionsType { + if len(tfList) == 0 || tfList[0] == nil { return nil } - config := riskConfig[0].(map[string]interface{}) - - actions := &cognitoidentityprovider.AccountTakeoverActionsType{} + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.AccountTakeoverActionsType{} - if v, ok := config["high_action"].([]interface{}); ok && len(v) > 0 { - actions.HighAction = expandAccountTakeoverAction(v) + if v, ok := tfMap["high_action"].([]interface{}); ok && len(v) > 0 { + apiObject.HighAction = expandAccountTakeoverActionType(v) } - if v, ok := config["low_action"].([]interface{}); ok && len(v) > 0 { - actions.LowAction = expandAccountTakeoverAction(v) + if v, ok := tfMap["low_action"].([]interface{}); ok && len(v) > 0 { + apiObject.LowAction = expandAccountTakeoverActionType(v) } - if v, ok := config["medium_action"].([]interface{}); ok && len(v) > 0 { - actions.MediumAction = expandAccountTakeoverAction(v) + if v, ok := tfMap["medium_action"].([]interface{}); ok && len(v) > 0 { + apiObject.MediumAction = expandAccountTakeoverActionType(v) } - return actions + return apiObject } -func flattenAccountTakeoverActions(apiObject *cognitoidentityprovider.AccountTakeoverActionsType) []interface{} { +func flattenAccountTakeoverActionsType(apiObject *awstypes.AccountTakeoverActionsType) []interface{} { if apiObject == nil { return nil } @@ -577,153 +617,145 @@ func flattenAccountTakeoverActions(apiObject *cognitoidentityprovider.AccountTak tfMap := map[string]interface{}{} if v := apiObject.HighAction; v != nil { - tfMap["high_action"] = flattenAccountTakeoverAction(v) + tfMap["high_action"] = flattenAccountTakeoverActionType(v) } if v := apiObject.LowAction; v != nil { - tfMap["low_action"] = flattenAccountTakeoverAction(v) + tfMap["low_action"] = flattenAccountTakeoverActionType(v) } if v := apiObject.MediumAction; v != nil { - tfMap["medium_action"] = flattenAccountTakeoverAction(v) + tfMap["medium_action"] = flattenAccountTakeoverActionType(v) } return []interface{}{tfMap} } -func expandAccountTakeoverAction(riskConfig []interface{}) *cognitoidentityprovider.AccountTakeoverActionType { - if len(riskConfig) == 0 || riskConfig[0] == nil { +func expandAccountTakeoverActionType(tfList []interface{}) *awstypes.AccountTakeoverActionType { + if len(tfList) == 0 || tfList[0] == nil { return nil } - config := riskConfig[0].(map[string]interface{}) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.AccountTakeoverActionType{} - action := &cognitoidentityprovider.AccountTakeoverActionType{} - - if v, ok := config["event_action"].(string); ok && v != "" { - action.EventAction = aws.String(v) + if v, ok := tfMap["event_action"].(string); ok && v != "" { + apiObject.EventAction = awstypes.AccountTakeoverEventActionType(v) } - if v, ok := config["notify"].(bool); ok { - action.Notify = aws.Bool(v) + if v, ok := tfMap["notify"].(bool); ok { + apiObject.Notify = v } - return action + return apiObject } -func flattenAccountTakeoverAction(apiObject *cognitoidentityprovider.AccountTakeoverActionType) []interface{} { +func flattenAccountTakeoverActionType(apiObject *awstypes.AccountTakeoverActionType) []interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.EventAction; v != nil { - tfMap["event_action"] = aws.StringValue(v) - } - - if v := apiObject.Notify; v != nil { - tfMap["notify"] = aws.BoolValue(v) + tfMap := map[string]interface{}{ + "event_action": apiObject.EventAction, + "notify": apiObject.Notify, } return []interface{}{tfMap} } -func expandNotifyConfiguration(riskConfig []interface{}) *cognitoidentityprovider.NotifyConfigurationType { - if len(riskConfig) == 0 || riskConfig[0] == nil { +func expandNotifyConfigurationType(tfList []interface{}) *awstypes.NotifyConfigurationType { + if len(tfList) == 0 || tfList[0] == nil { return nil } - config := riskConfig[0].(map[string]interface{}) - - notifConfig := &cognitoidentityprovider.NotifyConfigurationType{} + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.NotifyConfigurationType{} - if v, ok := config["from"].(string); ok && v != "" { - notifConfig.From = aws.String(v) + if v, ok := tfMap["block_email"].([]interface{}); ok && len(v) > 0 { + apiObject.BlockEmail = expandNotifyEmailType(v) } - if v, ok := config["reply_to"].(string); ok && v != "" { - notifConfig.ReplyTo = aws.String(v) + if v, ok := tfMap["from"].(string); ok && v != "" { + apiObject.From = aws.String(v) } - if v, ok := config["source_arn"].(string); ok && v != "" { - notifConfig.SourceArn = aws.String(v) + if v, ok := tfMap["mfa_email"].([]interface{}); ok && len(v) > 0 { + apiObject.MfaEmail = expandNotifyEmailType(v) } - if v, ok := config["block_email"].([]interface{}); ok && len(v) > 0 { - notifConfig.BlockEmail = expandNotifyEmail(v) + if v, ok := tfMap["no_action_email"].([]interface{}); ok && len(v) > 0 { + apiObject.NoActionEmail = expandNotifyEmailType(v) } - if v, ok := config["mfa_email"].([]interface{}); ok && len(v) > 0 { - notifConfig.MfaEmail = expandNotifyEmail(v) + if v, ok := tfMap["reply_to"].(string); ok && v != "" { + apiObject.ReplyTo = aws.String(v) } - if v, ok := config["no_action_email"].([]interface{}); ok && len(v) > 0 { - notifConfig.NoActionEmail = expandNotifyEmail(v) + if v, ok := tfMap["source_arn"].(string); ok && v != "" { + apiObject.SourceArn = aws.String(v) } - return notifConfig + return apiObject } -func flattenNotifyConfiguration(apiObject *cognitoidentityprovider.NotifyConfigurationType) []interface{} { +func flattemNotifyConfigurationType(apiObject *awstypes.NotifyConfigurationType) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.From; v != nil { - tfMap["from"] = aws.StringValue(v) + if v := apiObject.BlockEmail; v != nil { + tfMap["block_email"] = flattenNotifyEmailType(v) } - if v := apiObject.ReplyTo; v != nil { - tfMap["reply_to"] = aws.StringValue(v) + if v := apiObject.From; v != nil { + tfMap["from"] = aws.ToString(v) } - if v := apiObject.SourceArn; v != nil { - tfMap["source_arn"] = aws.StringValue(v) + if v := apiObject.MfaEmail; v != nil { + tfMap["mfa_email"] = flattenNotifyEmailType(v) } - if v := apiObject.BlockEmail; v != nil { - tfMap["block_email"] = flattenNotifyEmail(v) + if v := apiObject.NoActionEmail; v != nil { + tfMap["no_action_email"] = flattenNotifyEmailType(v) } - if v := apiObject.MfaEmail; v != nil { - tfMap["mfa_email"] = flattenNotifyEmail(v) + if v := apiObject.ReplyTo; v != nil { + tfMap["reply_to"] = aws.ToString(v) } - if v := apiObject.NoActionEmail; v != nil { - tfMap["no_action_email"] = flattenNotifyEmail(v) + if v := apiObject.SourceArn; v != nil { + tfMap["source_arn"] = aws.ToString(v) } return []interface{}{tfMap} } -func expandNotifyEmail(riskConfig []interface{}) *cognitoidentityprovider.NotifyEmailType { - if len(riskConfig) == 0 || riskConfig[0] == nil { +func expandNotifyEmailType(tfList []interface{}) *awstypes.NotifyEmailType { + if len(tfList) == 0 || tfList[0] == nil { return nil } - config := riskConfig[0].(map[string]interface{}) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.NotifyEmailType{} - notifyEmail := &cognitoidentityprovider.NotifyEmailType{} - - if v, ok := config["html_body"].(string); ok && v != "" { - notifyEmail.HtmlBody = aws.String(v) + if v, ok := tfMap["html_body"].(string); ok && v != "" { + apiObject.HtmlBody = aws.String(v) } - if v, ok := config["subject"].(string); ok && v != "" { - notifyEmail.Subject = aws.String(v) + if v, ok := tfMap["subject"].(string); ok && v != "" { + apiObject.Subject = aws.String(v) } - if v, ok := config["text_body"].(string); ok && v != "" { - notifyEmail.TextBody = aws.String(v) + if v, ok := tfMap["text_body"].(string); ok && v != "" { + apiObject.TextBody = aws.String(v) } - return notifyEmail + return apiObject } -func flattenNotifyEmail(apiObject *cognitoidentityprovider.NotifyEmailType) []interface{} { +func flattenNotifyEmailType(apiObject *awstypes.NotifyEmailType) []interface{} { if apiObject == nil { return nil } @@ -731,30 +763,16 @@ func flattenNotifyEmail(apiObject *cognitoidentityprovider.NotifyEmailType) []in tfMap := map[string]interface{}{} if v := apiObject.HtmlBody; v != nil { - tfMap["html_body"] = aws.StringValue(v) + tfMap["html_body"] = aws.ToString(v) } if v := apiObject.Subject; v != nil { - tfMap["subject"] = aws.StringValue(v) + tfMap["subject"] = aws.ToString(v) } if v := apiObject.TextBody; v != nil { - tfMap["text_body"] = aws.StringValue(v) + tfMap["text_body"] = aws.ToString(v) } return []interface{}{tfMap} } - -func RiskConfigurationParseID(id string) (string, string, error) { - parts := strings.Split(id, ":") - - if len(parts) > 2 || len(parts) < 1 { - return "", "", fmt.Errorf("wrong format of import ID (%s), use: 'userpool-id/client-id' or 'userpool-id'", id) - } - - if len(parts) == 2 { - return parts[0], parts[1], nil - } else { - return parts[0], "", nil - } -} diff --git a/internal/service/cognitoidp/risk_configuration_test.go b/internal/service/cognitoidp/risk_configuration_test.go index 5e601ab2df4..dbfbd4f2990 100644 --- a/internal/service/cognitoidp/risk_configuration_test.go +++ b/internal/service/cognitoidp/risk_configuration_test.go @@ -5,7 +5,6 @@ package cognitoidp_test import ( "context" - "errors" "fmt" "testing" @@ -246,14 +245,14 @@ func TestAccCognitoIDPRiskConfiguration_emptyRiskException(t *testing.T) { func testAccCheckRiskConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_risk_configuration" { continue } - _, err := tfcognitoidp.FindRiskConfigurationById(ctx, conn, rs.Primary.ID) + _, err := tfcognitoidp.FindRiskConfigurationByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes[names.AttrClientID]) if tfresource.NotFound(err) { continue @@ -262,26 +261,24 @@ func testAccCheckRiskConfigurationDestroy(ctx context.Context) resource.TestChec if err != nil { return err } + + return fmt.Errorf("Cognito Risk Configuration %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckRiskConfigurationExists(ctx context.Context, name string) resource.TestCheckFunc { +func testAccCheckRiskConfigurationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return errors.New("No Cognito Risk Configuration ID set") + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) - _, err := tfcognitoidp.FindRiskConfigurationById(ctx, conn, rs.Primary.ID) + _, err := tfcognitoidp.FindRiskConfigurationByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes[names.AttrClientID]) return err } diff --git a/internal/service/cognitoidp/service_endpoint_resolver_gen.go b/internal/service/cognitoidp/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..647eeb7dad3 --- /dev/null +++ b/internal/service/cognitoidp/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cognitoidp + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cognitoidentityprovider_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ cognitoidentityprovider_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver cognitoidentityprovider_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: cognitoidentityprovider_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params cognitoidentityprovider_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up cognitoidentityprovider endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*cognitoidentityprovider_sdkv2.Options) { + return func(o *cognitoidentityprovider_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cognitoidp/service_endpoints_gen_test.go b/internal/service/cognitoidp/service_endpoints_gen_test.go index a48fe5cbe9c..0852e65f2a6 100644 --- a/internal/service/cognitoidp/service_endpoints_gen_test.go +++ b/internal/service/cognitoidp/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package cognitoidp_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - cognitoidentityprovider_sdkv1 "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + cognitoidentityprovider_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/go-cty/cty" @@ -88,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -271,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -292,55 +297,66 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := cognitoidentityprovider_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(cognitoidentityprovider_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), cognitoidentityprovider_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := cognitoidentityprovider_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(cognitoidentityprovider_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), cognitoidentityprovider_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.CognitoIDPConn(ctx) - - req, _ := client.ListUserPoolsRequest(&cognitoidentityprovider_sdkv1.ListUserPoolsInput{}) + client := meta.CognitoIDPClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListUserPools(ctx, &cognitoidentityprovider_sdkv2.ListUserPoolsInput{ + MaxResults: aws_sdkv2.Int32(1), + }, + func(opts *cognitoidentityprovider_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -396,16 +412,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -523,6 +561,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/cognitoidp/service_package_gen.go b/internal/service/cognitoidp/service_package_gen.go index 527b597144c..35efd7838b4 100644 --- a/internal/service/cognitoidp/service_package_gen.go +++ b/internal/service/cognitoidp/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cognitoidp import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - cognitoidentityprovider_sdkv1 "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + cognitoidentityprovider_sdkv2 "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -27,6 +24,10 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv Factory: newUserGroupsDataSource, Name: "User Groups", }, + { + Factory: newUserPoolDataSource, + Name: "User Pool", + }, } } @@ -123,25 +124,14 @@ func (p *servicePackage) ServicePackageName() string { return names.CognitoIDP } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*cognitoidentityprovider_sdkv1.CognitoIdentityProvider, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*cognitoidentityprovider_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return cognitoidentityprovider_sdkv1.New(sess.Copy(&cfg)), nil + return cognitoidentityprovider_sdkv2.NewFromConfig(cfg, + cognitoidentityprovider_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/cognitoidp/sweep.go b/internal/service/cognitoidp/sweep.go index 952f41f7755..71ce75c037d 100644 --- a/internal/service/cognitoidp/sweep.go +++ b/internal/service/cognitoidp/sweep.go @@ -7,11 +7,12 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/names" ) func RegisterSweepers() { @@ -35,48 +36,49 @@ func sweepUserPoolDomains(region string) error { if err != nil { return fmt.Errorf("Error getting client: %s", err) } - conn := client.CognitoIDPConn(ctx) - input := &cognitoidentityprovider.ListUserPoolsInput{ - MaxResults: aws.Int64(50), + MaxResults: aws.Int32(50), } + conn := client.CognitoIDPClient(ctx) + sweepResources := make([]sweep.Sweepable, 0) + + pages := cognitoidentityprovider.NewListUserPoolsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Cognito User Pool Domain sweep for %s: %s", region, err) + return nil + } - err = conn.ListUserPoolsPagesWithContext(ctx, input, func(resp *cognitoidentityprovider.ListUserPoolsOutput, lastPage bool) bool { - if len(resp.UserPools) == 0 { - log.Print("[DEBUG] No Cognito user pools (i.e. domains) to sweep") - return false + if err != nil { + return fmt.Errorf("error listing Cognito User Pools (%s): %w", region, err) } - for _, u := range resp.UserPools { - output, err := conn.DescribeUserPoolWithContext(ctx, &cognitoidentityprovider.DescribeUserPoolInput{ - UserPoolId: u.Id, - }) + for _, v := range page.UserPools { + userPoolID := aws.ToString(v.Id) + userPool, err := findUserPoolByID(ctx, conn, userPoolID) + if err != nil { - log.Printf("[ERROR] Failed describing Cognito user pool (%s): %s", aws.StringValue(u.Name), err) + log.Printf("[ERROR] Reading Cognito User Pool (%s): %s", userPoolID, err) continue } - if output.UserPool != nil && output.UserPool.Domain != nil { - domain := aws.StringValue(output.UserPool.Domain) - - log.Printf("[INFO] Deleting Cognito user pool domain: %s", domain) - _, err := conn.DeleteUserPoolDomainWithContext(ctx, &cognitoidentityprovider.DeleteUserPoolDomainInput{ - Domain: output.UserPool.Domain, - UserPoolId: u.Id, - }) - if err != nil { - log.Printf("[ERROR] Failed deleting Cognito user pool domain (%s): %s", domain, err) - } + + if domain := aws.ToString(userPool.Domain); domain != "" { + r := resourceUserPoolDomain() + d := r.Data(nil) + d.SetId(domain) + d.Set(names.AttrUserPoolID, userPoolID) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } } - return !lastPage - }) + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) if err != nil { - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Cognito User Pool Domain sweep for %s: %s", region, err) - return nil - } - return fmt.Errorf("Error retrieving Cognito User Pools: %s", err) + return fmt.Errorf("error sweeping Cognito User Pool Domains (%s): %w", region, err) } return nil @@ -86,40 +88,40 @@ func sweepUserPools(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error getting client: %w", err) + return fmt.Errorf("Error getting client: %s", err) } - conn := client.CognitoIDPConn(ctx) - input := &cognitoidentityprovider.ListUserPoolsInput{ - MaxResults: aws.Int64(50), + MaxResults: aws.Int32(50), } + conn := client.CognitoIDPClient(ctx) + sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListUserPoolsPagesWithContext(ctx, input, func(resp *cognitoidentityprovider.ListUserPoolsOutput, lastPage bool) bool { - if len(resp.UserPools) == 0 { - log.Print("[DEBUG] No Cognito User Pools to sweep") - return false + pages := cognitoidentityprovider.NewListUserPoolsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Cognito User Pool sweep for %s: %s", region, err) + return nil } - for _, userPool := range resp.UserPools { - name := aws.StringValue(userPool.Name) + if err != nil { + return fmt.Errorf("error listing Cognito User Pools (%s): %w", region, err) + } - log.Printf("[INFO] Deleting Cognito User Pool: %s", name) - _, err := conn.DeleteUserPoolWithContext(ctx, &cognitoidentityprovider.DeleteUserPoolInput{ - UserPoolId: userPool.Id, - }) - if err != nil { - log.Printf("[ERROR] Failed deleting Cognito User Pool (%s): %s", name, err) - } + for _, v := range page.UserPools { + r := resourceUserPool() + d := r.Data(nil) + d.SetId(aws.ToString(v.Id)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - return !lastPage - }) + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) if err != nil { - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Cognito User Pool sweep for %s: %s", region, err) - return nil - } - return fmt.Errorf("Error retrieving Cognito User Pools: %w", err) + return fmt.Errorf("error sweeping Cognito User Pools (%s): %w", region, err) } return nil diff --git a/internal/service/cognitoidp/tags_gen.go b/internal/service/cognitoidp/tags_gen.go index 7f201d4ac34..c587721d368 100644 --- a/internal/service/cognitoidp/tags_gen.go +++ b/internal/service/cognitoidp/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider/cognitoidentityprovideriface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists cognitoidp service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn cognitoidentityprovideriface.CognitoIdentityProviderAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *cognitoidentityprovider.Client, identifier string, optFns ...func(*cognitoidentityprovider.Options)) (tftags.KeyValueTags, error) { input := &cognitoidentityprovider.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn cognitoidentityprovideriface.CognitoIden // ListTags lists cognitoidp service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).CognitoIDPConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).CognitoIDPClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns cognitoidp service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from cognitoidp service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns cognitoidp service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets cognitoidp service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates cognitoidp service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn cognitoidentityprovideriface.CognitoIdentityProviderAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *cognitoidentityprovider.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*cognitoidentityprovider.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn cognitoidentityprovideriface.CognitoId if len(removedTags) > 0 { input := &cognitoidentityprovider.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn cognitoidentityprovideriface.CognitoId Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn cognitoidentityprovideriface.CognitoId // UpdateTags updates cognitoidp service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).CognitoIDPConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).CognitoIDPClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/cognitoidp/user.go b/internal/service/cognitoidp/user.go index e6433bd5c36..e77d05c7045 100644 --- a/internal/service/cognitoidp/user.go +++ b/internal/service/cognitoidp/user.go @@ -5,22 +5,26 @@ package cognitoidp import ( "context" - "errors" "fmt" "log" + "slices" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -34,13 +38,24 @@ func resourceUser() *schema.Resource { DeleteWithoutTimeout: resourceUserDelete, Importer: &schema.ResourceImporter{ - StateContext: resourceUserImport, + StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), userResourceIDSeparator) + + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return nil, fmt.Errorf("unexpected format for ID (%[1]s), expected UserPoolID%[2]sUsername", d.Id(), userResourceIDSeparator) + } + + d.Set(names.AttrUserPoolID, parts[0]) + d.Set(names.AttrUsername, parts[1]) + + return []*schema.ResourceData{d}, nil + }, }, - // https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_AdminCreateUser.html Schema: map[string]*schema.Schema{ names.AttrAttributes: { - Type: schema.TypeMap, + Type: schema.TypeMap, + Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -51,24 +66,23 @@ func resourceUser() *schema.Resource { return false }, - Optional: true, }, "client_metadata": { Type: schema.TypeMap, - Elem: &schema.Schema{Type: schema.TypeString}, Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, names.AttrCreationDate: { Type: schema.TypeString, Computed: true, }, "desired_delivery_mediums": { - Type: schema.TypeSet, + Type: schema.TypeSet, + Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(cognitoidentityprovider.DeliveryMediumType_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.DeliveryMediumType](), }, - Optional: true, }, names.AttrEnabled: { Type: schema.TypeBool, @@ -84,32 +98,28 @@ func resourceUser() *schema.Resource { Computed: true, }, "message_action": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(cognitoidentityprovider.MessageActionType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.MessageActionType](), }, "mfa_setting_list": { - Type: schema.TypeSet, + Type: schema.TypeSet, + Computed: true, Elem: &schema.Schema{ Type: schema.TypeString, }, - Computed: true, + }, + names.AttrPassword: { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ValidateFunc: validation.StringLenBetween(6, 256), + ConflictsWith: []string{"temporary_password"}, }, "preferred_mfa_setting": { Type: schema.TypeString, Computed: true, }, - names.AttrUserPoolID: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - names.AttrUsername: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, names.AttrStatus: { Type: schema.TypeString, Computed: true, @@ -118,13 +128,6 @@ func resourceUser() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrPassword: { - Type: schema.TypeString, - Sensitive: true, - Optional: true, - ValidateFunc: validation.StringLenBetween(6, 256), - ConflictsWith: []string{"temporary_password"}, - }, "temporary_password": { Type: schema.TypeString, Sensitive: true, @@ -132,12 +135,23 @@ func resourceUser() *schema.Resource { ValidateFunc: validation.StringLenBetween(6, 256), ConflictsWith: []string{names.AttrPassword}, }, + names.AttrUserPoolID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + names.AttrUsername: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, "validation_data": { - Type: schema.TypeMap, + Type: schema.TypeMap, + Optional: true, Elem: &schema.Schema{ Type: schema.TypeString, }, - Optional: true, }, }, } @@ -145,82 +159,77 @@ func resourceUser() *schema.Resource { func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) + userPoolID := d.Get(names.AttrUserPoolID).(string) username := d.Get(names.AttrUsername).(string) - userPoolId := d.Get(names.AttrUserPoolID).(string) - - params := &cognitoidentityprovider.AdminCreateUserInput{ + id := userCreateResourceID(userPoolID, username) + input := &cognitoidentityprovider.AdminCreateUserInput{ Username: aws.String(username), - UserPoolId: aws.String(userPoolId), + UserPoolId: aws.String(userPoolID), } if v, ok := d.GetOk("client_metadata"); ok { - metadata := v.(map[string]interface{}) - params.ClientMetadata = expandUserClientMetadata(metadata) + input.ClientMetadata = flex.ExpandStringValueMap(v.(map[string]interface{})) } if v, ok := d.GetOk("desired_delivery_mediums"); ok { - mediums := v.(*schema.Set) - params.DesiredDeliveryMediums = expandUserDesiredDeliveryMediums(mediums) + input.DesiredDeliveryMediums = flex.ExpandStringyValueSet[awstypes.DeliveryMediumType](v.(*schema.Set)) } if v, ok := d.GetOk("force_alias_creation"); ok { - params.ForceAliasCreation = aws.Bool(v.(bool)) + input.ForceAliasCreation = v.(bool) } if v, ok := d.GetOk("message_action"); ok { - params.MessageAction = aws.String(v.(string)) + input.MessageAction = awstypes.MessageActionType(v.(string)) } - if v, ok := d.GetOk(names.AttrAttributes); ok { - attributes := v.(map[string]interface{}) - params.UserAttributes = expandAttribute(attributes) + if v, ok := d.GetOk("temporary_password"); ok { + input.TemporaryPassword = aws.String(v.(string)) } - if v, ok := d.GetOk("validation_data"); ok { - attributes := v.(map[string]interface{}) - // aws sdk uses the same type for both validation data and user attributes - // https://docs.aws.amazon.com/sdk-for-go/api/service/cognitoidentityprovider/#AdminCreateUserInput - params.ValidationData = expandAttribute(attributes) + if v, ok := d.GetOk(names.AttrAttributes); ok { + input.UserAttributes = expandAttributeTypes(v.(map[string]interface{})) } - if v, ok := d.GetOk("temporary_password"); ok { - params.TemporaryPassword = aws.String(v.(string)) + if v, ok := d.GetOk("validation_data"); ok { + input.ValidationData = expandAttributeTypes(v.(map[string]interface{})) } - log.Print("[DEBUG] Creating Cognito User") + _, err := conn.AdminCreateUser(ctx, input) - resp, err := conn.AdminCreateUserWithContext(ctx, params) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Cognito User (%s/%s): %s", userPoolId, username, err) + return sdkdiag.AppendErrorf(diags, "creating Cognito User (%s): %s", id, err) } - d.SetId(fmt.Sprintf("%s/%s", aws.StringValue(params.UserPoolId), aws.StringValue(resp.User.Username))) + d.SetId(id) if v := d.Get(names.AttrEnabled); !v.(bool) { - disableParams := &cognitoidentityprovider.AdminDisableUserInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), + input := &cognitoidentityprovider.AdminDisableUserInput{ + Username: aws.String(username), + UserPoolId: aws.String(userPoolID), } - _, err := conn.AdminDisableUserWithContext(ctx, disableParams) + _, err := conn.AdminDisableUser(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "disabling Cognito User (%s): %s", d.Id(), err) } } if v, ok := d.GetOk(names.AttrPassword); ok { - setPasswordParams := &cognitoidentityprovider.AdminSetUserPasswordInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), + input := &cognitoidentityprovider.AdminSetUserPasswordInput{ Password: aws.String(v.(string)), - Permanent: aws.Bool(true), + Permanent: true, + Username: aws.String(username), + UserPoolId: aws.String(userPoolID), } - _, err := conn.AdminSetUserPasswordWithContext(ctx, setPasswordParams) + _, err := conn.AdminSetUserPassword(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "setting Cognito User's password (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "setting Cognito User (%s) password: %s", d.Id(), err) } } @@ -229,97 +238,102 @@ func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta interf func resourceUserRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - user, err := findUserByTwoPartKey(ctx, conn, d.Get(names.AttrUserPoolID).(string), d.Get(names.AttrUsername).(string)) + userPoolID, username := d.Get(names.AttrUserPoolID).(string), d.Get(names.AttrUsername).(string) + user, err := findUserByTwoPartKey(ctx, conn, userPoolID, username) if !d.IsNewResource() && tfresource.NotFound(err) { - create.LogNotFoundRemoveState(names.CognitoIDP, create.ErrActionReading, ResNameUser, d.Get(names.AttrUsername).(string)) + log.Printf("[WARN] Cognito User %s not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return create.AppendDiagError(diags, names.CognitoIDP, create.ErrActionReading, ResNameUser, d.Get(names.AttrUsername).(string), err) + return sdkdiag.AppendErrorf(diags, "reading Cognito User (%s): %s", d.Id(), err) } - if err := d.Set(names.AttrAttributes, flattenUserAttributes(user.UserAttributes)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting user attributes (%s): %s", d.Id(), err) + if err := d.Set(names.AttrAttributes, flattenAttributeTypes(user.UserAttributes)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting attributes: %s", err) } - + d.Set(names.AttrCreationDate, user.UserCreateDate.Format(time.RFC3339)) + d.Set(names.AttrEnabled, user.Enabled) + d.Set("last_modified_date", user.UserLastModifiedDate.Format(time.RFC3339)) if err := d.Set("mfa_setting_list", user.UserMFASettingList); err != nil { - return sdkdiag.AppendErrorf(diags, "setting user's mfa settings (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "setting mfa_setting_list: %s", err) } - d.Set("preferred_mfa_setting", user.PreferredMfaSetting) d.Set(names.AttrStatus, user.UserStatus) - d.Set(names.AttrEnabled, user.Enabled) - d.Set(names.AttrCreationDate, user.UserCreateDate.Format(time.RFC3339)) - d.Set("last_modified_date", user.UserLastModifiedDate.Format(time.RFC3339)) - d.Set("sub", retrieveUserSub(user.UserAttributes)) + d.Set("sub", flattenUserSub(user.UserAttributes)) + d.Set(names.AttrUserPoolID, userPoolID) + d.Set(names.AttrUsername, username) return diags } func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - log.Println("[DEBUG] Updating Cognito User") + userPoolID, username := d.Get(names.AttrUserPoolID).(string), d.Get(names.AttrUsername).(string) if d.HasChange(names.AttrAttributes) { - old, new := d.GetChange(names.AttrAttributes) - - upd, del := computeUserAttributesUpdate(old, new) + o, n := d.GetChange(names.AttrAttributes) + upd, del := expandUpdateUserAttributes(o.(map[string]interface{}), n.(map[string]interface{})) if len(upd) > 0 { - params := &cognitoidentityprovider.AdminUpdateUserAttributesInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), - UserAttributes: expandAttribute(upd), + input := &cognitoidentityprovider.AdminUpdateUserAttributesInput{ + Username: aws.String(username), + UserAttributes: expandAttributeTypes(upd), + UserPoolId: aws.String(userPoolID), } if v, ok := d.GetOk("client_metadata"); ok { - metadata := v.(map[string]interface{}) - params.ClientMetadata = expandUserClientMetadata(metadata) + input.ClientMetadata = flex.ExpandStringValueMap(v.(map[string]interface{})) } - _, err := conn.AdminUpdateUserAttributesWithContext(ctx, params) + _, err := conn.AdminUpdateUserAttributes(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Cognito User Attributes (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating Cognito User (%s) attributes: %s", d.Id(), err) } } + if len(del) > 0 { - params := &cognitoidentityprovider.AdminDeleteUserAttributesInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), - UserAttributeNames: expandUserAttributesDelete(del), + input := &cognitoidentityprovider.AdminDeleteUserAttributesInput{ + Username: aws.String(username), + UserAttributeNames: tfslices.ApplyToAll(del, normalizeUserAttributeKey), + UserPoolId: aws.String(userPoolID), } - _, err := conn.AdminDeleteUserAttributesWithContext(ctx, params) + + _, err := conn.AdminDeleteUserAttributes(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Cognito User Attributes (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting Cognito User (%s) attributes: %s", d.Id(), err) } } } if d.HasChange(names.AttrEnabled) { - enabled := d.Get(names.AttrEnabled).(bool) - - if enabled { - enableParams := &cognitoidentityprovider.AdminEnableUserInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), + if d.Get(names.AttrEnabled).(bool) { + input := &cognitoidentityprovider.AdminEnableUserInput{ + Username: aws.String(username), + UserPoolId: aws.String(userPoolID), } - _, err := conn.AdminEnableUserWithContext(ctx, enableParams) + + _, err := conn.AdminEnableUser(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "enabling Cognito User (%s): %s", d.Id(), err) } } else { - disableParams := &cognitoidentityprovider.AdminDisableUserInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), + input := &cognitoidentityprovider.AdminDisableUserInput{ + Username: aws.String(username), + UserPoolId: aws.String(userPoolID), } - _, err := conn.AdminDisableUserWithContext(ctx, disableParams) + + _, err := conn.AdminDisableUser(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "disabling Cognito User (%s): %s", d.Id(), err) } @@ -327,19 +341,18 @@ func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta interf } if d.HasChange("temporary_password") { - password := d.Get("temporary_password").(string) - - if password != "" { - setPasswordParams := &cognitoidentityprovider.AdminSetUserPasswordInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), - Password: aws.String(password), - Permanent: aws.Bool(false), + if v := d.Get("temporary_password").(string); v != "" { + input := &cognitoidentityprovider.AdminSetUserPasswordInput{ + Password: aws.String(v), + Permanent: false, + Username: aws.String(username), + UserPoolId: aws.String(userPoolID), } - _, err := conn.AdminSetUserPasswordWithContext(ctx, setPasswordParams) + _, err := conn.AdminSetUserPassword(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "changing Cognito User's temporary password (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "setting Cognito User (%s) password: %s", d.Id(), err) } } else { d.Set("temporary_password", nil) @@ -347,19 +360,18 @@ func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta interf } if d.HasChange(names.AttrPassword) { - password := d.Get(names.AttrPassword).(string) - - if password != "" { - setPasswordParams := &cognitoidentityprovider.AdminSetUserPasswordInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), - Password: aws.String(password), - Permanent: aws.Bool(true), + if v := d.Get(names.AttrPassword).(string); v != "" { + input := &cognitoidentityprovider.AdminSetUserPasswordInput{ + Password: aws.String(v), + Permanent: true, + Username: aws.String(username), + UserPoolId: aws.String(userPoolID), } - _, err := conn.AdminSetUserPasswordWithContext(ctx, setPasswordParams) + _, err := conn.AdminSetUserPassword(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "changing Cognito User's password (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "setting Cognito User (%s) password: %s", d.Id(), err) } } else { d.Set(names.AttrPassword, nil) @@ -371,15 +383,17 @@ func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta interf func resourceUserDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) + + userPoolID, username := d.Get(names.AttrUserPoolID).(string), d.Get(names.AttrUsername).(string) log.Printf("[DEBUG] Deleting Cognito User: %s", d.Id()) - _, err := conn.AdminDeleteUserWithContext(ctx, &cognitoidentityprovider.AdminDeleteUserInput{ - Username: aws.String(d.Get(names.AttrUsername).(string)), - UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), + _, err := conn.AdminDeleteUser(ctx, &cognitoidentityprovider.AdminDeleteUserInput{ + Username: aws.String(username), + UserPoolId: aws.String(userPoolID), }) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeUserNotFoundException, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.UserNotFoundException](err) || errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -390,27 +404,26 @@ func resourceUserDelete(ctx context.Context, d *schema.ResourceData, meta interf return diags } -func resourceUserImport(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - idSplit := strings.Split(d.Id(), "/") - if len(idSplit) != 2 { - return nil, errors.New("error importing Cognito User. Must specify user_pool_id/username") - } - userPoolId := idSplit[0] - name := idSplit[1] - d.Set(names.AttrUserPoolID, userPoolId) - d.Set(names.AttrUsername, name) - return []*schema.ResourceData{d}, nil +const userResourceIDSeparator = "/" + +func userCreateResourceID(userPoolID, username string) string { + parts := []string{userPoolID, username} + id := strings.Join(parts, userResourceIDSeparator) + + return id } -func findUserByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, userPoolID, username string) (*cognitoidentityprovider.AdminGetUserOutput, error) { +// No userParseResourceID as pre-v5.56.0, the ID wasn't parsed -- user_pool_id and username attribute were used directly. + +func findUserByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID, username string) (*cognitoidentityprovider.AdminGetUserOutput, error) { input := &cognitoidentityprovider.AdminGetUserInput{ Username: aws.String(username), UserPoolId: aws.String(userPoolID), } - output, err := conn.AdminGetUserWithContext(ctx, input) + output, err := conn.AdminGetUser(ctx, input) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeUserNotFoundException, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.UserNotFoundException](err) || errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -428,51 +441,33 @@ func findUserByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.Cog return output, nil } -func expandAttribute(tfMap map[string]interface{}) []*cognitoidentityprovider.AttributeType { +func expandAttributeTypes(tfMap map[string]interface{}) []awstypes.AttributeType { if len(tfMap) == 0 { return nil } - apiList := make([]*cognitoidentityprovider.AttributeType, 0, len(tfMap)) + apiObjects := make([]awstypes.AttributeType, 0, len(tfMap)) for k, v := range tfMap { - if !UserAttributeKeyMatchesStandardAttribute(k) && !strings.HasPrefix(k, "custom:") { - k = fmt.Sprintf("custom:%v", k) - } - apiList = append(apiList, &cognitoidentityprovider.AttributeType{ - Name: aws.String(k), + apiObjects = append(apiObjects, awstypes.AttributeType{ + Name: aws.String(normalizeUserAttributeKey(k)), Value: aws.String(v.(string)), }) } - return apiList + return apiObjects } -func expandUserAttributesDelete(input []*string) []*string { - result := make([]*string, 0, len(input)) - - for _, v := range input { - if !UserAttributeKeyMatchesStandardAttribute(*v) && !strings.HasPrefix(*v, "custom:") { - formattedV := fmt.Sprintf("custom:%v", *v) - result = append(result, &formattedV) - } else { - result = append(result, v) - } - } - - return result -} - -func flattenUserAttributes(apiList []*cognitoidentityprovider.AttributeType) map[string]interface{} { +func flattenAttributeTypes(apiObjects []awstypes.AttributeType) map[string]interface{} { tfMap := make(map[string]interface{}) - for _, apiAttribute := range apiList { - if apiAttribute.Name != nil { - if UserAttributeKeyMatchesStandardAttribute(*apiAttribute.Name) { - tfMap[aws.StringValue(apiAttribute.Name)] = aws.StringValue(apiAttribute.Value) + for _, apiObject := range apiObjects { + if apiObject.Name != nil { + if k, v := aws.ToString(apiObject.Name), aws.ToString(apiObject.Value); userAttributeKeyMatchesStandardAttribute(k) { + tfMap[k] = v } else { - name := strings.TrimPrefix(strings.TrimPrefix(aws.StringValue(apiAttribute.Name), "dev:"), "custom:") - tfMap[name] = aws.StringValue(apiAttribute.Value) + k := strings.TrimPrefix(strings.TrimPrefix(k, attributeDevPrefix), attributeCustomPrefix) + tfMap[k] = v } } } @@ -480,18 +475,22 @@ func flattenUserAttributes(apiList []*cognitoidentityprovider.AttributeType) map return tfMap } -// computeUserAttributesUpdate computes which user attributes should be updated and which ones should be deleted. -// We should do it like this because we cannot set a list of user attributes in cognito. -// We can either perfor update or delete operation -func computeUserAttributesUpdate(old interface{}, new interface{}) (map[string]interface{}, []*string) { - oldMap := old.(map[string]interface{}) - newMap := new.(map[string]interface{}) +func flattenUserSub(apiObjects []awstypes.AttributeType) *string { + for _, apiObject := range apiObjects { + if aws.ToString(apiObject.Name) == "sub" { + return apiObject.Value + } + } + return nil +} + +func expandUpdateUserAttributes(oldMap, newMap map[string]interface{}) (map[string]interface{}, []string) { upd := make(map[string]interface{}) for k, v := range newMap { - if oldV, ok := oldMap[k]; ok { - if oldV.(string) != v.(string) { + if old, ok := oldMap[k]; ok { + if old.(string) != v.(string) { upd[k] = v } delete(oldMap, k) @@ -500,50 +499,26 @@ func computeUserAttributesUpdate(old interface{}, new interface{}) (map[string]i } } - del := make([]*string, 0, len(oldMap)) - for k := range oldMap { - del = append(del, aws.String(k)) - } + del := tfmaps.Keys(oldMap) return upd, del } -func expandUserDesiredDeliveryMediums(tfSet *schema.Set) []*string { - apiList := []*string{} - - for _, elem := range tfSet.List() { - apiList = append(apiList, aws.String(elem.(string))) - } - - return apiList -} - -func retrieveUserSub(apiList []*cognitoidentityprovider.AttributeType) string { - for _, attr := range apiList { - if aws.StringValue(attr.Name) == "sub" { - return aws.StringValue(attr.Value) - } - } - - return "" -} +const ( + attributeCustomPrefix = "custom:" + attributeDevPrefix = "dev:" +) -// For ClientMetadata we only need expand since AWS doesn't store its value -func expandUserClientMetadata(tfMap map[string]interface{}) map[string]*string { - apiMap := map[string]*string{} - for k, v := range tfMap { - apiMap[k] = aws.String(v.(string)) +func normalizeUserAttributeKey(k string) string { + if !userAttributeKeyMatchesStandardAttribute(k) && !strings.HasPrefix(k, attributeCustomPrefix) { + return attributeCustomPrefix + k } - return apiMap + return k } -func UserAttributeKeyMatchesStandardAttribute(input string) bool { - if len(input) == 0 { - return false - } - - var standardAttributeKeys = []string{ +func userAttributeKeyMatchesStandardAttribute(k string) bool { + standardAttributeKeys := []string{ names.AttrAddress, "birthdate", names.AttrEmail, @@ -566,10 +541,5 @@ func UserAttributeKeyMatchesStandardAttribute(input string) bool { "zoneinfo", } - for _, attribute := range standardAttributeKeys { - if input == attribute { - return true - } - } - return false + return slices.Contains(standardAttributeKeys, k) } diff --git a/internal/service/cognitoidp/user_group.go b/internal/service/cognitoidp/user_group.go index c3539a6a907..dd1809b115e 100644 --- a/internal/service/cognitoidp/user_group.go +++ b/internal/service/cognitoidp/user_group.go @@ -5,19 +5,19 @@ package cognitoidp import ( "context" - "errors" "fmt" "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -33,7 +33,7 @@ func resourceUserGroup() *schema.Resource { DeleteWithoutTimeout: resourceUserGroupDelete, Importer: &schema.ResourceImporter{ - StateContext: resourceUserGroupImport, + StateContext: schema.ImportStatePassthroughContext, }, // https://docs.aws.amazon.com/cognito-user-identity-pools/latest/APIReference/API_CreateGroup.html @@ -70,7 +70,7 @@ func resourceUserGroup() *schema.Resource { func resourceUserGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) name := d.Get(names.AttrName).(string) input := &cognitoidentityprovider.CreateGroupInput{ @@ -83,27 +83,27 @@ func resourceUserGroupCreate(ctx context.Context, d *schema.ResourceData, meta i } if v, ok := d.GetOk("precedence"); ok { - input.Precedence = aws.Int64(int64(v.(int))) + input.Precedence = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk(names.AttrRoleARN); ok { input.RoleArn = aws.String(v.(string)) } - output, err := conn.CreateGroupWithContext(ctx, input) + output, err := conn.CreateGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Cognito User Group (%s): %s", name, err) } - d.SetId(userGroupCreateResourceID(aws.StringValue(output.Group.UserPoolId), aws.StringValue(output.Group.GroupName))) + d.SetId(userGroupCreateResourceID(aws.ToString(output.Group.UserPoolId), aws.ToString(output.Group.GroupName))) return append(diags, resourceUserGroupRead(ctx, d, meta)...) } func resourceUserGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID, groupName, err := userGroupParseResourceID(d.Id()) if err != nil { @@ -123,15 +123,17 @@ func resourceUserGroupRead(ctx context.Context, d *schema.ResourceData, meta int } d.Set(names.AttrDescription, group.Description) + d.Set(names.AttrName, groupName) d.Set("precedence", group.Precedence) d.Set(names.AttrRoleARN, group.RoleArn) + d.Set(names.AttrUserPoolID, userPoolID) return diags } func resourceUserGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID, groupName, err := userGroupParseResourceID(d.Id()) if err != nil { @@ -148,14 +150,14 @@ func resourceUserGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i } if d.HasChange("precedence") { - input.Precedence = aws.Int64(int64(d.Get("precedence").(int))) + input.Precedence = aws.Int32(int32(d.Get("precedence").(int))) } if d.HasChange(names.AttrRoleARN) { input.RoleArn = aws.String(d.Get(names.AttrRoleARN).(string)) } - _, err = conn.UpdateGroupWithContext(ctx, input) + _, err = conn.UpdateGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Cognito User Group (%s): %s", d.Id(), err) @@ -166,7 +168,7 @@ func resourceUserGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceUserGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID, groupName, err := userGroupParseResourceID(d.Id()) if err != nil { @@ -174,12 +176,12 @@ func resourceUserGroupDelete(ctx context.Context, d *schema.ResourceData, meta i } log.Printf("[DEBUG] Deleting Cognito User Group: %s", d.Id()) - _, err = conn.DeleteGroupWithContext(ctx, &cognitoidentityprovider.DeleteGroupInput{ + _, err = conn.DeleteGroup(ctx, &cognitoidentityprovider.DeleteGroupInput{ GroupName: aws.String(groupName), UserPoolId: aws.String(userPoolID), }) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -190,18 +192,6 @@ func resourceUserGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func resourceUserGroupImport(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - parts := strings.SplitN(d.Id(), "/", 2) - if len(parts) != 2 { - return nil, errors.New("Error importing Cognito User Group. Must specify user_pool_id/group_name") - } - - d.Set(names.AttrUserPoolID, parts[0]) - d.Set(names.AttrName, parts[1]) - - return []*schema.ResourceData{d}, nil -} - const userGroupResourceIDSeparator = "/" func userGroupCreateResourceID(userPoolID, groupName string) string { @@ -218,18 +208,18 @@ func userGroupParseResourceID(id string) (string, string, error) { return parts[0], parts[1], nil } - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected USERPOOLID%[2]sGROUPNAME", id, userGroupResourceIDSeparator) + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected UserPoolID%[2]sGroupName", id, userGroupResourceIDSeparator) } -func findGroupByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, userPoolID, groupName string) (*cognitoidentityprovider.GroupType, error) { +func findGroupByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID, groupName string) (*awstypes.GroupType, error) { input := &cognitoidentityprovider.GetGroupInput{ GroupName: aws.String(groupName), UserPoolId: aws.String(userPoolID), } - output, err := conn.GetGroupWithContext(ctx, input) + output, err := conn.GetGroup(ctx, input) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/cognitoidp/user_group_data_source.go b/internal/service/cognitoidp/user_group_data_source.go index 5d08455aa76..f22c29d26e9 100644 --- a/internal/service/cognitoidp/user_group_data_source.go +++ b/internal/service/cognitoidp/user_group_data_source.go @@ -5,16 +5,13 @@ package cognitoidp import ( "context" + "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" "github.com/hashicorp/terraform-plugin-framework/datasource" "github.com/hashicorp/terraform-plugin-framework/datasource/schema" "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-provider-aws/internal/create" - intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,15 +20,11 @@ func newUserGroupDataSource(context.Context) (datasource.DataSourceWithConfigure return &userGroupDataSource{}, nil } -const ( - DSNameUserGroup = "User Group Data Source" -) - type userGroupDataSource struct { framework.DataSourceWithConfigure } -func (d *userGroupDataSource) Metadata(_ context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { +func (*userGroupDataSource) Metadata(_ context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { response.TypeName = "aws_cognito_user_group" } @@ -59,57 +52,37 @@ func (d *userGroupDataSource) Schema(ctx context.Context, request datasource.Sch } func (d *userGroupDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { - var data dataSourceDataSourceUserGroupData - + var data userGroupDataSourceModel response.Diagnostics.Append(request.Config.Get(ctx, &data)...) if response.Diagnostics.HasError() { return } - parts := []string{ - data.Name.ValueString(), - data.UserPoolID.ValueString(), - } - partCount := 2 - id, err := intflex.FlattenResourceId(parts, partCount, false) - if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.CognitoIDP, create.ErrActionFlatteningResourceId, DSNameUserGroup, data.Name.String(), err), - err.Error(), - ) - return - } - data.ID = types.StringValue(id) + conn := d.Meta().CognitoIDPClient(ctx) + + id := userGroupCreateResourceID(data.UserPoolID.ValueString(), data.GroupName.ValueString()) + group, err := findGroupByTwoPartKey(ctx, conn, data.UserPoolID.ValueString(), data.GroupName.ValueString()) - params := &cognitoidentityprovider.GetGroupInput{ - GroupName: data.Name.ValueStringPointer(), - UserPoolId: data.UserPoolID.ValueStringPointer(), - } - // 🌱 For the person who migrates to sdkv2: - // this should work by just updating the client, and removing the WithContext method. - conn := d.Meta().CognitoIDPConn(ctx) - resp, err := conn.GetGroupWithContext(ctx, params) if err != nil { - response.Diagnostics.AddError( - create.ProblemStandardMessage(names.CognitoIDP, create.ErrActionReading, DSNameUserGroup, data.ID.String(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito User Group (%s)", id), err.Error()) + return } - response.Diagnostics.Append(flex.Flatten(ctx, resp.Group, &data)...) + response.Diagnostics.Append(fwflex.Flatten(ctx, group, &data)...) if response.Diagnostics.HasError() { return } - data.Name = types.StringValue(aws.StringValue(resp.Group.GroupName)) + + data.ID = fwflex.StringValueToFramework(ctx, id) response.Diagnostics.Append(response.State.Set(ctx, &data)...) } -type dataSourceDataSourceUserGroupData struct { +type userGroupDataSourceModel struct { Description types.String `tfsdk:"description"` + GroupName types.String `tfsdk:"name"` ID types.String `tfsdk:"id"` - Name types.String `tfsdk:"name"` Precedence types.Int64 `tfsdk:"precedence"` RoleARN types.String `tfsdk:"role_arn"` UserPoolID types.String `tfsdk:"user_pool_id"` diff --git a/internal/service/cognitoidp/user_group_test.go b/internal/service/cognitoidp/user_group_test.go index 9de1911179f..ee56f997522 100644 --- a/internal/service/cognitoidp/user_group_test.go +++ b/internal/service/cognitoidp/user_group_test.go @@ -161,7 +161,7 @@ func testAccCheckUserGroupExists(ctx context.Context, n string) resource.TestChe return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) _, err := tfcognitoidp.FindGroupByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes[names.AttrName]) @@ -171,7 +171,7 @@ func testAccCheckUserGroupExists(ctx context.Context, n string) resource.TestChe func testAccCheckUserGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_user_group" { @@ -260,22 +260,31 @@ resource "aws_cognito_user_pool" "test" { name = %[1]q } +resource "aws_cognito_identity_pool" "test" { + identity_pool_name = %[1]q + allow_unauthenticated_identities = false +} + resource "aws_iam_role" "test1" { name = "%[1]s-1" assume_role_policy = < 0 { - input.EmailConfiguration = expandUserPoolEmailConfig(v.([]interface{})) + input.AutoVerifiedAttributes = flex.ExpandStringyValueSet[awstypes.VerifiedAttributeType](v.(*schema.Set)) } if v, ok := d.GetOk(names.AttrDeletionProtection); ok { - input.DeletionProtection = aws.String(v.(string)) + input.DeletionProtection = awstypes.DeletionProtectionType(v.(string)) } if v, ok := d.GetOk("device_configuration"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - input.DeviceConfiguration = expandUserPoolDeviceConfiguration(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.DeviceConfiguration = expandDeviceConfigurationType(v) } } + if v, ok := d.GetOk("email_configuration"); ok && len(v.([]interface{})) > 0 { + input.EmailConfiguration = expandEmailConfigurationType(v.([]interface{})) + } + if v, ok := d.GetOk("email_verification_subject"); ok { input.EmailVerificationSubject = aws.String(v.(string)) } @@ -696,27 +692,21 @@ func resourceUserPoolCreate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("lambda_config"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - input.LambdaConfig = expandUserPoolLambdaConfig(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.LambdaConfig = expandLambdaConfigType(v) } } if v, ok := d.GetOk("password_policy"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - policies := &cognitoidentityprovider.UserPoolPolicyType{} - policies.PasswordPolicy = expandUserPoolPasswordPolicy(config) - input.Policies = policies + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.Policies = &awstypes.UserPoolPolicyType{ + PasswordPolicy: expandPasswordPolicyType(v), + } } } if v, ok := d.GetOk(names.AttrSchema); ok { - input.Schema = expandUserPoolSchema(v.(*schema.Set).List()) + input.Schema = expandSchemaAttributeTypes(v.(*schema.Set).List()) } // For backwards compatibility, include this outside of MFA configuration @@ -728,7 +718,7 @@ func resourceUserPoolCreate(ctx context.Context, d *schema.ResourceData, meta in // Include the SMS configuration outside of MFA configuration since it // can be used for user verification. if v, ok := d.GetOk("sms_configuration"); ok { - input.SmsConfiguration = expandSMSConfiguration(v.([]interface{})) + input.SmsConfiguration = expandSMSConfigurationType(v.([]interface{})) } if v, ok := d.GetOk("sms_verification_message"); ok { @@ -736,70 +726,57 @@ func resourceUserPoolCreate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("username_attributes"); ok { - input.UsernameAttributes = flex.ExpandStringSet(v.(*schema.Set)) + input.UsernameAttributes = flex.ExpandStringyValueSet[awstypes.UsernameAttributeType](v.(*schema.Set)) } if v, ok := d.GetOk("username_configuration"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - input.UsernameConfiguration = expandUserPoolUsernameConfiguration(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.UsernameConfiguration = expandUsernameConfigurationType(v) } } if v, ok := d.GetOk("user_attribute_update_settings"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - input.UserAttributeUpdateSettings = expandUserPoolUserAttributeUpdateSettings(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.UserAttributeUpdateSettings = expandUserAttributeUpdateSettingsType(v) } } if v, ok := d.GetOk("user_pool_add_ons"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok { - userPoolAddons := &cognitoidentityprovider.UserPoolAddOnsType{} + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.UserPoolAddOns = &awstypes.UserPoolAddOnsType{} - if v, ok := config["advanced_security_mode"]; ok && v.(string) != "" { - userPoolAddons.AdvancedSecurityMode = aws.String(v.(string)) + if v, ok := v["advanced_security_mode"]; ok && v.(string) != "" { + input.UserPoolAddOns.AdvancedSecurityMode = awstypes.AdvancedSecurityModeType(v.(string)) } - input.UserPoolAddOns = userPoolAddons } } if v, ok := d.GetOk("verification_message_template"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - input.VerificationMessageTemplate = expandUserPoolVerificationMessageTemplate(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.VerificationMessageTemplate = expandVerificationMessageTemplateType(v) } } outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (any, error) { - return conn.CreateUserPoolWithContext(ctx, input) + return conn.CreateUserPool(ctx, input) }, userPoolErrorRetryable) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Cognito User Pool (%s): %s", name, err) } - d.SetId(aws.StringValue(outputRaw.(*cognitoidentityprovider.CreateUserPoolOutput).UserPool.Id)) + d.SetId(aws.ToString(outputRaw.(*cognitoidentityprovider.CreateUserPoolOutput).UserPool.Id)) - if v := d.Get("mfa_configuration").(string); v != cognitoidentityprovider.UserPoolMfaTypeOff { + if v := awstypes.UserPoolMfaType(d.Get("mfa_configuration").(string)); v != awstypes.UserPoolMfaTypeOff { input := &cognitoidentityprovider.SetUserPoolMfaConfigInput{ - MfaConfiguration: aws.String(v), - SoftwareTokenMfaConfiguration: expandSoftwareTokenMFAConfiguration(d.Get("software_token_mfa_configuration").([]interface{})), + MfaConfiguration: v, + SoftwareTokenMfaConfiguration: expandSoftwareTokenMFAConfigType(d.Get("software_token_mfa_configuration").([]interface{})), UserPoolId: aws.String(d.Id()), } if v := d.Get("sms_configuration").([]interface{}); len(v) > 0 && v[0] != nil { - input.SmsMfaConfiguration = &cognitoidentityprovider.SmsMfaConfigType{ - SmsConfiguration: expandSMSConfiguration(v), + input.SmsMfaConfiguration = &awstypes.SmsMfaConfigType{ + SmsConfiguration: expandSMSConfigurationType(v), } if v, ok := d.GetOk("sms_authentication_message"); ok { @@ -808,7 +785,7 @@ func resourceUserPoolCreate(ctx context.Context, d *schema.ResourceData, meta in } _, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (any, error) { - return conn.SetUserPoolMfaConfigWithContext(ctx, input) + return conn.SetUserPoolMfaConfig(ctx, input) }, userPoolErrorRetryable) if err != nil { @@ -821,7 +798,7 @@ func resourceUserPoolCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceUserPoolRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPool, err := findUserPoolByID(ctx, conn, d.Id()) @@ -835,79 +812,75 @@ func resourceUserPoolRead(ctx context.Context, d *schema.ResourceData, meta inte return sdkdiag.AppendErrorf(diags, "reading Cognito User Pool (%s): %s", d.Id(), err) } - if err := d.Set("account_recovery_setting", flattenUserPoolAccountRecoverySettingConfig(userPool.AccountRecoverySetting)); err != nil { + if err := d.Set("account_recovery_setting", flattenAccountRecoverySettingType(userPool.AccountRecoverySetting)); err != nil { return sdkdiag.AppendErrorf(diags, "setting account_recovery_setting: %s", err) } - if err := d.Set("admin_create_user_config", flattenUserPoolAdminCreateUserConfig(userPool.AdminCreateUserConfig)); err != nil { + if err := d.Set("admin_create_user_config", flattenAdminCreateUserConfigType(userPool.AdminCreateUserConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting admin_create_user_config: %s", err) } - if userPool.AliasAttributes != nil { - d.Set("alias_attributes", aws.StringValueSlice(userPool.AliasAttributes)) + if userPool.AliasAttributes != nil { // nosemgrep:ci.helper-schema-ResourceData-Set-extraneous-nil-check + d.Set("alias_attributes", userPool.AliasAttributes) } d.Set(names.AttrARN, userPool.Arn) - d.Set("auto_verified_attributes", aws.StringValueSlice(userPool.AutoVerifiedAttributes)) + d.Set("auto_verified_attributes", userPool.AutoVerifiedAttributes) d.Set(names.AttrCreationDate, userPool.CreationDate.Format(time.RFC3339)) d.Set("custom_domain", userPool.CustomDomain) d.Set(names.AttrDeletionProtection, userPool.DeletionProtection) - if err := d.Set("device_configuration", flattenUserPoolDeviceConfiguration(userPool.DeviceConfiguration)); err != nil { + if err := d.Set("device_configuration", flattenDeviceConfigurationType(userPool.DeviceConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting device_configuration: %s", err) } d.Set(names.AttrDomain, userPool.Domain) - if err := d.Set("email_configuration", flattenUserPoolEmailConfiguration(userPool.EmailConfiguration)); err != nil { + if err := d.Set("email_configuration", flattenEmailConfigurationType(userPool.EmailConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting email_configuration: %s", err) } d.Set("email_verification_subject", userPool.EmailVerificationSubject) d.Set("email_verification_message", userPool.EmailVerificationMessage) d.Set(names.AttrEndpoint, fmt.Sprintf("%s/%s", meta.(*conns.AWSClient).RegionalHostname(ctx, "cognito-idp"), d.Id())) d.Set("estimated_number_of_users", userPool.EstimatedNumberOfUsers) - if err := d.Set("lambda_config", flattenUserPoolLambdaConfig(userPool.LambdaConfig)); err != nil { + if err := d.Set("lambda_config", flattenLambdaConfigType(userPool.LambdaConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting lambda_config: %s", err) } d.Set("last_modified_date", userPool.LastModifiedDate.Format(time.RFC3339)) d.Set(names.AttrName, userPool.Name) - if err := d.Set("password_policy", flattenUserPoolPasswordPolicy(userPool.Policies.PasswordPolicy)); err != nil { + if err := d.Set("password_policy", flattenPasswordPolicyType(userPool.Policies.PasswordPolicy)); err != nil { return sdkdiag.AppendErrorf(diags, "setting password_policy: %s", err) } var configuredSchema []interface{} if v, ok := d.GetOk(names.AttrSchema); ok { configuredSchema = v.(*schema.Set).List() } - if err := d.Set(names.AttrSchema, flattenUserPoolSchema(expandUserPoolSchema(configuredSchema), userPool.SchemaAttributes)); err != nil { + if err := d.Set(names.AttrSchema, flattenSchemaAttributeTypes(expandSchemaAttributeTypes(configuredSchema), userPool.SchemaAttributes)); err != nil { return sdkdiag.AppendErrorf(diags, "setting schema: %s", err) } d.Set("sms_authentication_message", userPool.SmsAuthenticationMessage) - if err := d.Set("sms_configuration", flattenSMSConfiguration(userPool.SmsConfiguration)); err != nil { + if err := d.Set("sms_configuration", flattenSMSConfigurationType(userPool.SmsConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting sms_configuration: %s", err) } d.Set("sms_verification_message", userPool.SmsVerificationMessage) - if err := d.Set("user_attribute_update_settings", flattenUserPoolUserAttributeUpdateSettings(userPool.UserAttributeUpdateSettings)); err != nil { + if err := d.Set("user_attribute_update_settings", flattenUserAttributeUpdateSettingsType(userPool.UserAttributeUpdateSettings)); err != nil { return sdkdiag.AppendErrorf(diags, "setting user_attribute_update_settings: %s", err) } - if err := d.Set("user_pool_add_ons", flattenUserPoolUserPoolAddOns(userPool.UserPoolAddOns)); err != nil { + if err := d.Set("user_pool_add_ons", flattenUserPoolAddOnsType(userPool.UserPoolAddOns)); err != nil { return sdkdiag.AppendErrorf(diags, "setting user_pool_add_ons: %s", err) } - d.Set("username_attributes", flex.FlattenStringSet(userPool.UsernameAttributes)) - if err := d.Set("username_configuration", flattenUserPoolUsernameConfiguration(userPool.UsernameConfiguration)); err != nil { + d.Set("username_attributes", userPool.UsernameAttributes) + if err := d.Set("username_configuration", flattenUsernameConfigurationType(userPool.UsernameConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting username_configuration: %s", err) } - if err := d.Set("verification_message_template", flattenUserPoolVerificationMessageTemplate(userPool.VerificationMessageTemplate)); err != nil { + if err := d.Set("verification_message_template", flattenVerificationMessageTemplateType(userPool.VerificationMessageTemplate)); err != nil { return sdkdiag.AppendErrorf(diags, "setting verification_message_template: %s", err) } setTagsOut(ctx, userPool.UserPoolTags) - input := &cognitoidentityprovider.GetUserPoolMfaConfigInput{ - UserPoolId: aws.String(d.Id()), - } - - output, err := conn.GetUserPoolMfaConfigWithContext(ctx, input) + output, err := findUserPoolMFAConfigByID(ctx, conn, d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Cognito User Pool (%s) MFA configuration: %s", d.Id(), err) } d.Set("mfa_configuration", output.MfaConfiguration) - if err := d.Set("software_token_mfa_configuration", flattenSoftwareTokenMFAConfiguration(output.SoftwareTokenMfaConfiguration)); err != nil { + if err := d.Set("software_token_mfa_configuration", flattenSoftwareTokenMFAConfigType(output.SoftwareTokenMfaConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting software_token_mfa_configuration: %s", err) } @@ -916,7 +889,7 @@ func resourceUserPoolRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) // MFA updates. if d.HasChanges( @@ -925,19 +898,19 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in "sms_configuration", "software_token_mfa_configuration", ) { - mfaConfiguration := d.Get("mfa_configuration").(string) + mfaConfiguration := awstypes.UserPoolMfaType(d.Get("mfa_configuration").(string)) input := &cognitoidentityprovider.SetUserPoolMfaConfigInput{ - MfaConfiguration: aws.String(mfaConfiguration), - SoftwareTokenMfaConfiguration: expandSoftwareTokenMFAConfiguration(d.Get("software_token_mfa_configuration").([]interface{})), + MfaConfiguration: mfaConfiguration, + SoftwareTokenMfaConfiguration: expandSoftwareTokenMFAConfigType(d.Get("software_token_mfa_configuration").([]interface{})), UserPoolId: aws.String(d.Id()), } // Since SMS configuration applies to both verification and MFA, only include if MFA is enabled. // Otherwise, the API will return the following error: // InvalidParameterException: Invalid MFA configuration given, can't turn off MFA and configure an MFA together. - if v := d.Get("sms_configuration").([]interface{}); len(v) > 0 && v[0] != nil && mfaConfiguration != cognitoidentityprovider.UserPoolMfaTypeOff { - input.SmsMfaConfiguration = &cognitoidentityprovider.SmsMfaConfigType{ - SmsConfiguration: expandSMSConfiguration(v), + if v := d.Get("sms_configuration").([]interface{}); len(v) > 0 && v[0] != nil && mfaConfiguration != awstypes.UserPoolMfaTypeOff { + input.SmsMfaConfiguration = &awstypes.SmsMfaConfigType{ + SmsConfiguration: expandSMSConfigurationType(v), } if v, ok := d.GetOk("sms_authentication_message"); ok { @@ -946,7 +919,7 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in } _, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (any, error) { - return conn.SetUserPoolMfaConfigWithContext(ctx, input) + return conn.SetUserPoolMfaConfig(ctx, input) }, userPoolErrorRetryable) if err != nil { @@ -984,39 +957,33 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("account_recovery_setting"); ok { - if config, ok := v.([]interface{})[0].(map[string]interface{}); ok { - input.AccountRecoverySetting = expandUserPoolAccountRecoverySettingConfig(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok { + input.AccountRecoverySetting = expandAccountRecoverySettingType(v) } } if v, ok := d.GetOk("admin_create_user_config"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - input.AdminCreateUserConfig = expandUserPoolAdminCreateUserConfig(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.AdminCreateUserConfig = expandAdminCreateUserConfigType(v) } } if v, ok := d.GetOk("auto_verified_attributes"); ok { - input.AutoVerifiedAttributes = flex.ExpandStringSet(v.(*schema.Set)) + input.AutoVerifiedAttributes = flex.ExpandStringyValueSet[awstypes.VerifiedAttributeType](v.(*schema.Set)) } if v, ok := d.GetOk(names.AttrDeletionProtection); ok { - input.DeletionProtection = aws.String(v.(string)) + input.DeletionProtection = awstypes.DeletionProtectionType(v.(string)) } if v, ok := d.GetOk("device_configuration"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - input.DeviceConfiguration = expandUserPoolDeviceConfiguration(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.DeviceConfiguration = expandDeviceConfigurationType(v) } } if v, ok := d.GetOk("email_configuration"); ok && len(v.([]interface{})) > 0 { - input.EmailConfiguration = expandUserPoolEmailConfig(v.([]interface{})) + input.EmailConfiguration = expandEmailConfigurationType(v.([]interface{})) } if v, ok := d.GetOk("email_verification_subject"); ok { @@ -1028,33 +995,36 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("lambda_config"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - if ok && config != nil { + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { if d.HasChange("lambda_config.0.pre_token_generation") { - config["pre_token_generation_config"].([]interface{})[0].(map[string]interface{})["lambda_arn"] = d.Get("lambda_config.0.pre_token_generation") + preTokenGeneration := d.Get("lambda_config.0.pre_token_generation") + if tfList, ok := v["pre_token_generation_config"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { + v["pre_token_generation_config"].([]interface{})[0].(map[string]interface{})["lambda_arn"] = preTokenGeneration + } else { + v["pre_token_generation_config"] = []interface{}{map[string]interface{}{ + "lambda_arn": preTokenGeneration, + "lambda_version": string(awstypes.PreTokenGenerationLambdaVersionTypeV10), // A guess... + }} + } } if d.HasChange("lambda_config.0.pre_token_generation_config.0.lambda_arn") { - config["pre_token_generation"] = d.Get("lambda_config.0.pre_token_generation_config.0.lambda_arn") + v["pre_token_generation"] = d.Get("lambda_config.0.pre_token_generation_config.0.lambda_arn") } - input.LambdaConfig = expandUserPoolLambdaConfig(config) + input.LambdaConfig = expandLambdaConfigType(v) } } if v, ok := d.GetOk("mfa_configuration"); ok { - input.MfaConfiguration = aws.String(v.(string)) + input.MfaConfiguration = awstypes.UserPoolMfaType(v.(string)) } if v, ok := d.GetOk("password_policy"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - policies := &cognitoidentityprovider.UserPoolPolicyType{} - policies.PasswordPolicy = expandUserPoolPasswordPolicy(config) - input.Policies = policies + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.Policies = &awstypes.UserPoolPolicyType{ + PasswordPolicy: expandPasswordPolicyType(v), + } } } @@ -1063,7 +1033,7 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("sms_configuration"); ok { - input.SmsConfiguration = expandSMSConfiguration(v.([]interface{})) + input.SmsConfiguration = expandSMSConfigurationType(v.([]interface{})) } if v, ok := d.GetOk("sms_verification_message"); ok { @@ -1071,57 +1041,47 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("user_attribute_update_settings"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - input.UserAttributeUpdateSettings = expandUserPoolUserAttributeUpdateSettings(config) + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.UserAttributeUpdateSettings = expandUserAttributeUpdateSettingsType(v) } } if d.HasChange("user_attribute_update_settings") && input.UserAttributeUpdateSettings == nil { // An empty array must be sent to disable this setting if previously enabled. A nil // UserAttibutesUpdateSetting param will result in no modifications. - input.UserAttributeUpdateSettings = &cognitoidentityprovider.UserAttributeUpdateSettingsType{ - AttributesRequireVerificationBeforeUpdate: []*string{}, + input.UserAttributeUpdateSettings = &awstypes.UserAttributeUpdateSettingsType{ + AttributesRequireVerificationBeforeUpdate: []awstypes.VerifiedAttributeType{}, } } if v, ok := d.GetOk("user_pool_add_ons"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if ok && config != nil { - userPoolAddons := &cognitoidentityprovider.UserPoolAddOnsType{} + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + input.UserPoolAddOns = &awstypes.UserPoolAddOnsType{} - if v, ok := config["advanced_security_mode"]; ok && v.(string) != "" { - userPoolAddons.AdvancedSecurityMode = aws.String(v.(string)) + if v, ok := v["advanced_security_mode"]; ok && v.(string) != "" { + input.UserPoolAddOns.AdvancedSecurityMode = awstypes.AdvancedSecurityModeType(v.(string)) } - input.UserPoolAddOns = userPoolAddons } } if v, ok := d.GetOk("verification_message_template"); ok { - configs := v.([]interface{}) - config, ok := configs[0].(map[string]interface{}) - - if d.HasChange("email_verification_message") { - config["email_message"] = d.Get("email_verification_message") - } - if d.HasChange("email_verification_subject") { - config["email_subject"] = d.Get("email_verification_subject") - } - if d.HasChange("sms_verification_message") { - config["sms_message"] = d.Get("sms_verification_message") - } + if v, ok := v.([]interface{})[0].(map[string]interface{}); ok && v != nil { + if d.HasChange("email_verification_message") { + v["email_message"] = d.Get("email_verification_message") + } + if d.HasChange("email_verification_subject") { + v["email_subject"] = d.Get("email_verification_subject") + } + if d.HasChange("sms_verification_message") { + v["sms_message"] = d.Get("sms_verification_message") + } - if ok && config != nil { - input.VerificationMessageTemplate = expandUserPoolVerificationMessageTemplate(config) + input.VerificationMessageTemplate = expandVerificationMessageTemplateType(v) } } _, err := tfresource.RetryWhen(ctx, propagationTimeout, func() (any, error) { - return conn.UpdateUserPoolWithContext(ctx, input) + return conn.UpdateUserPool(ctx, input) }, func(err error) (bool, error) { if ok, err := userPoolErrorRetryable(err); ok { @@ -1129,8 +1089,8 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in } switch { - case tfawserr.ErrMessageContains(err, cognitoidentityprovider.ErrCodeInvalidParameterException, "Please use TemporaryPasswordValidityDays in PasswordPolicy instead of UnusedAccountValidityDays") && input.AdminCreateUserConfig.UnusedAccountValidityDays != nil: - input.AdminCreateUserConfig.UnusedAccountValidityDays = nil + case errs.IsAErrorMessageContains[*awstypes.InvalidParameterException](err, "Please use TemporaryPasswordValidityDays in PasswordPolicy instead of UnusedAccountValidityDays") && input.AdminCreateUserConfig.UnusedAccountValidityDays != 0: + input.AdminCreateUserConfig.UnusedAccountValidityDays = 0 return true, err default: @@ -1149,11 +1109,11 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in if os.Difference(ns).Len() == 0 { input := &cognitoidentityprovider.AddCustomAttributesInput{ - CustomAttributes: expandUserPoolSchema(ns.Difference(os).List()), + CustomAttributes: expandSchemaAttributeTypes(ns.Difference(os).List()), UserPoolId: aws.String(d.Id()), } - _, err := conn.AddCustomAttributesWithContext(ctx, input) + _, err := conn.AddCustomAttributes(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "adding Cognito User Pool (%s) custom attributes: %s", d.Id(), err) @@ -1168,14 +1128,14 @@ func resourceUserPoolUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceUserPoolDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) log.Printf("[DEBUG] Deleting Cognito User Pool: %s", d.Id()) - _, err := conn.DeleteUserPoolWithContext(ctx, &cognitoidentityprovider.DeleteUserPoolInput{ + _, err := conn.DeleteUserPool(ctx, &cognitoidentityprovider.DeleteUserPoolInput{ UserPoolId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -1189,8 +1149,8 @@ func resourceUserPoolDelete(ctx context.Context, d *schema.ResourceData, meta in // IAM roles & policies can take some time to propagate and be attached to the User Pool. func userPoolErrorRetryable(err error) (bool, error) { switch { - case tfawserr.ErrMessageContains(err, cognitoidentityprovider.ErrCodeInvalidSmsRoleTrustRelationshipException, "Role does not have a trust relationship allowing Cognito to assume the role"), - tfawserr.ErrMessageContains(err, cognitoidentityprovider.ErrCodeInvalidSmsRoleAccessPolicyException, "Role does not have permission to publish with SNS"): + case errs.IsAErrorMessageContains[*awstypes.InvalidSmsRoleTrustRelationshipException](err, "Role does not have a trust relationship allowing Cognito to assume the role"), + errs.IsAErrorMessageContains[*awstypes.InvalidSmsRoleAccessPolicyException](err, "Role does not have permission to publish with SNS"): return true, err default: @@ -1198,14 +1158,14 @@ func userPoolErrorRetryable(err error) (bool, error) { } } -func findUserPoolByID(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, id string) (*cognitoidentityprovider.UserPoolType, error) { +func findUserPoolByID(ctx context.Context, conn *cognitoidentityprovider.Client, id string) (*awstypes.UserPoolType, error) { input := &cognitoidentityprovider.DescribeUserPoolInput{ UserPoolId: aws.String(id), } - output, err := conn.DescribeUserPoolWithContext(ctx, input) + output, err := conn.DescribeUserPool(ctx, input) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -1223,14 +1183,38 @@ func findUserPoolByID(ctx context.Context, conn *cognitoidentityprovider.Cognito return output.UserPool, nil } -func expandSMSConfiguration(tfList []interface{}) *cognitoidentityprovider.SmsConfigurationType { +func findUserPoolMFAConfigByID(ctx context.Context, conn *cognitoidentityprovider.Client, id string) (*cognitoidentityprovider.GetUserPoolMfaConfigOutput, error) { + input := &cognitoidentityprovider.GetUserPoolMfaConfigInput{ + UserPoolId: aws.String(id), + } + + output, err := conn.GetUserPoolMfaConfig(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func expandSMSConfigurationType(tfList []interface{}) *awstypes.SmsConfigurationType { if len(tfList) == 0 || tfList[0] == nil { return nil } tfMap := tfList[0].(map[string]interface{}) - - apiObject := &cognitoidentityprovider.SmsConfigurationType{} + apiObject := &awstypes.SmsConfigurationType{} if v, ok := tfMap[names.AttrExternalID].(string); ok && v != "" { apiObject.ExternalId = aws.String(v) @@ -1247,23 +1231,22 @@ func expandSMSConfiguration(tfList []interface{}) *cognitoidentityprovider.SmsCo return apiObject } -func expandSoftwareTokenMFAConfiguration(tfList []interface{}) *cognitoidentityprovider.SoftwareTokenMfaConfigType { +func expandSoftwareTokenMFAConfigType(tfList []interface{}) *awstypes.SoftwareTokenMfaConfigType { if len(tfList) == 0 || tfList[0] == nil { return nil } tfMap := tfList[0].(map[string]interface{}) - - apiObject := &cognitoidentityprovider.SoftwareTokenMfaConfigType{} + apiObject := &awstypes.SoftwareTokenMfaConfigType{} if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObject.Enabled = aws.Bool(v) + apiObject.Enabled = v } return apiObject } -func flattenSMSConfiguration(apiObject *cognitoidentityprovider.SmsConfigurationType) []interface{} { +func flattenSMSConfigurationType(apiObject *awstypes.SmsConfigurationType) []interface{} { if apiObject == nil { return nil } @@ -1271,1054 +1254,1013 @@ func flattenSMSConfiguration(apiObject *cognitoidentityprovider.SmsConfiguration tfMap := map[string]interface{}{} if v := apiObject.ExternalId; v != nil { - tfMap[names.AttrExternalID] = aws.StringValue(v) + tfMap[names.AttrExternalID] = aws.ToString(v) } if v := apiObject.SnsCallerArn; v != nil { - tfMap["sns_caller_arn"] = aws.StringValue(v) + tfMap["sns_caller_arn"] = aws.ToString(v) } if v := apiObject.SnsRegion; v != nil { - tfMap["sns_region"] = aws.StringValue(v) + tfMap["sns_region"] = aws.ToString(v) } return []interface{}{tfMap} } -func flattenSoftwareTokenMFAConfiguration(apiObject *cognitoidentityprovider.SoftwareTokenMfaConfigType) []interface{} { +func flattenSoftwareTokenMFAConfigType(apiObject *awstypes.SoftwareTokenMfaConfigType) []interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Enabled; v != nil { - tfMap[names.AttrEnabled] = aws.BoolValue(v) + tfMap := map[string]interface{}{ + names.AttrEnabled: apiObject.Enabled, } return []interface{}{tfMap} } -func expandUserPoolAccountRecoverySettingConfig(config map[string]interface{}) *cognitoidentityprovider.AccountRecoverySettingType { - if len(config) == 0 { +func expandAccountRecoverySettingType(tfMap map[string]interface{}) *awstypes.AccountRecoverySettingType { + if len(tfMap) == 0 { return nil } - configs := &cognitoidentityprovider.AccountRecoverySettingType{} + apiObjects := make([]awstypes.RecoveryOptionType, 0) - mechs := make([]*cognitoidentityprovider.RecoveryOptionType, 0) + if v, ok := tfMap["recovery_mechanism"]; ok { + for _, tfMapRaw := range v.(*schema.Set).List() { + tfMap := tfMapRaw.(map[string]interface{}) + apiObject := awstypes.RecoveryOptionType{} - if v, ok := config["recovery_mechanism"]; ok { - data := v.(*schema.Set).List() - - for _, m := range data { - param := m.(map[string]interface{}) - opt := &cognitoidentityprovider.RecoveryOptionType{} - - if v, ok := param[names.AttrName]; ok { - opt.Name = aws.String(v.(string)) + if v, ok := tfMap[names.AttrName]; ok { + apiObject.Name = awstypes.RecoveryOptionNameType(v.(string)) } - if v, ok := param[names.AttrPriority]; ok { - opt.Priority = aws.Int64(int64(v.(int))) + if v, ok := tfMap[names.AttrPriority]; ok { + apiObject.Priority = aws.Int32(int32(v.(int))) } - mechs = append(mechs, opt) + apiObjects = append(apiObjects, apiObject) } } - configs.RecoveryMechanisms = mechs + apiObject := &awstypes.AccountRecoverySettingType{ + RecoveryMechanisms: apiObjects, + } - return configs + return apiObject } -func flattenUserPoolAccountRecoverySettingConfig(config *cognitoidentityprovider.AccountRecoverySettingType) []interface{} { - if config == nil || len(config.RecoveryMechanisms) == 0 { +func flattenAccountRecoverySettingType(apiObject *awstypes.AccountRecoverySettingType) []interface{} { + if apiObject == nil || len(apiObject.RecoveryMechanisms) == 0 { return nil } - settings := map[string]interface{}{} - - mechanisms := make([]map[string]interface{}, 0) + tfList := make([]map[string]interface{}, 0) - for _, conf := range config.RecoveryMechanisms { - mech := map[string]interface{}{ - names.AttrName: aws.StringValue(conf.Name), - names.AttrPriority: aws.Int64Value(conf.Priority), + for _, apiObject := range apiObject.RecoveryMechanisms { + tfMap := map[string]interface{}{ + names.AttrName: apiObject.Name, + names.AttrPriority: aws.ToInt32(apiObject.Priority), } - mechanisms = append(mechanisms, mech) + + tfList = append(tfList, tfMap) } - settings["recovery_mechanism"] = mechanisms + tfMap := map[string]interface{}{ + "recovery_mechanism": tfList, + } - return []interface{}{settings} + return []interface{}{tfMap} } -func flattenUserPoolEmailConfiguration(s *cognitoidentityprovider.EmailConfigurationType) []map[string]interface{} { - m := make(map[string]interface{}) - - if s == nil { +func flattenEmailConfigurationType(apiObject *awstypes.EmailConfigurationType) []interface{} { + if apiObject == nil { return nil } - if s.ReplyToEmailAddress != nil { - m["reply_to_email_address"] = aws.StringValue(s.ReplyToEmailAddress) - } + tfMap := make(map[string]interface{}) - if s.From != nil { - m["from_email_address"] = aws.StringValue(s.From) + if apiObject.ConfigurationSet != nil { + tfMap["configuration_set"] = aws.ToString(apiObject.ConfigurationSet) } - if s.SourceArn != nil { - m["source_arn"] = aws.StringValue(s.SourceArn) + tfMap["email_sending_account"] = apiObject.EmailSendingAccount + + if apiObject.From != nil { + tfMap["from_email_address"] = aws.ToString(apiObject.From) } - if s.EmailSendingAccount != nil { - m["email_sending_account"] = aws.StringValue(s.EmailSendingAccount) + if apiObject.ReplyToEmailAddress != nil { + tfMap["reply_to_email_address"] = aws.ToString(apiObject.ReplyToEmailAddress) } - if s.ConfigurationSet != nil { - m["configuration_set"] = aws.StringValue(s.ConfigurationSet) + if apiObject.SourceArn != nil { + tfMap["source_arn"] = aws.ToString(apiObject.SourceArn) } - if len(m) > 0 { - return []map[string]interface{}{m} + if len(tfMap) > 0 { + return []interface{}{tfMap} } - return []map[string]interface{}{} + return []interface{}{} } -func expandUserPoolAdminCreateUserConfig(config map[string]interface{}) *cognitoidentityprovider.AdminCreateUserConfigType { - configs := &cognitoidentityprovider.AdminCreateUserConfigType{} +func expandAdminCreateUserConfigType(tfMap map[string]interface{}) *awstypes.AdminCreateUserConfigType { + apiObject := &awstypes.AdminCreateUserConfigType{} - if v, ok := config["allow_admin_create_user_only"]; ok { - configs.AllowAdminCreateUserOnly = aws.Bool(v.(bool)) + if v, ok := tfMap["allow_admin_create_user_only"]; ok { + apiObject.AllowAdminCreateUserOnly = v.(bool) } - if v, ok := config["invite_message_template"]; ok { - data := v.([]interface{}) - - if len(data) > 0 { - m, ok := data[0].(map[string]interface{}) - - if ok { - imt := &cognitoidentityprovider.MessageTemplateType{} + if v, ok := tfMap["invite_message_template"]; ok { + if tfList := v.([]interface{}); len(tfList) > 0 { + if tfMap, ok := tfList[0].(map[string]interface{}); ok { + imt := &awstypes.MessageTemplateType{} - if v, ok := m["email_message"]; ok { + if v, ok := tfMap["email_message"]; ok { imt.EmailMessage = aws.String(v.(string)) } - if v, ok := m["email_subject"]; ok { + if v, ok := tfMap["email_subject"]; ok { imt.EmailSubject = aws.String(v.(string)) } - if v, ok := m["sms_message"]; ok { + if v, ok := tfMap["sms_message"]; ok { imt.SMSMessage = aws.String(v.(string)) } - configs.InviteMessageTemplate = imt + apiObject.InviteMessageTemplate = imt } } } - return configs + return apiObject } -func flattenUserPoolAdminCreateUserConfig(s *cognitoidentityprovider.AdminCreateUserConfigType) []map[string]interface{} { - config := map[string]interface{}{} - - if s == nil { +func flattenAdminCreateUserConfigType(apiObject *awstypes.AdminCreateUserConfigType) []interface{} { + if apiObject == nil { return nil } - if s.AllowAdminCreateUserOnly != nil { - config["allow_admin_create_user_only"] = aws.BoolValue(s.AllowAdminCreateUserOnly) + tfMap := map[string]interface{}{ + "allow_admin_create_user_only": apiObject.AllowAdminCreateUserOnly, } - if s.InviteMessageTemplate != nil { - subconfig := map[string]interface{}{} + if apiObject := apiObject.InviteMessageTemplate; apiObject != nil { + imt := map[string]interface{}{} - if s.InviteMessageTemplate.EmailMessage != nil { - subconfig["email_message"] = aws.StringValue(s.InviteMessageTemplate.EmailMessage) + if apiObject.EmailMessage != nil { + imt["email_message"] = aws.ToString(apiObject.EmailMessage) } - if s.InviteMessageTemplate.EmailSubject != nil { - subconfig["email_subject"] = aws.StringValue(s.InviteMessageTemplate.EmailSubject) + if apiObject.EmailSubject != nil { + imt["email_subject"] = aws.ToString(apiObject.EmailSubject) } - if s.InviteMessageTemplate.SMSMessage != nil { - subconfig["sms_message"] = aws.StringValue(s.InviteMessageTemplate.SMSMessage) + if apiObject.SMSMessage != nil { + imt["sms_message"] = aws.ToString(apiObject.SMSMessage) } - if len(subconfig) > 0 { - config["invite_message_template"] = []map[string]interface{}{subconfig} + if len(imt) > 0 { + tfMap["invite_message_template"] = []map[string]interface{}{imt} } } - return []map[string]interface{}{config} + return []interface{}{tfMap} } -func expandUserPoolDeviceConfiguration(config map[string]interface{}) *cognitoidentityprovider.DeviceConfigurationType { - configs := &cognitoidentityprovider.DeviceConfigurationType{} +func expandDeviceConfigurationType(tfMap map[string]interface{}) *awstypes.DeviceConfigurationType { + apiObject := &awstypes.DeviceConfigurationType{} - if v, ok := config["challenge_required_on_new_device"]; ok { - configs.ChallengeRequiredOnNewDevice = aws.Bool(v.(bool)) + if v, ok := tfMap["challenge_required_on_new_device"]; ok { + apiObject.ChallengeRequiredOnNewDevice = v.(bool) } - if v, ok := config["device_only_remembered_on_user_prompt"]; ok { - configs.DeviceOnlyRememberedOnUserPrompt = aws.Bool(v.(bool)) + if v, ok := tfMap["device_only_remembered_on_user_prompt"]; ok { + apiObject.DeviceOnlyRememberedOnUserPrompt = v.(bool) } - return configs + return apiObject } -func expandUserPoolLambdaConfig(config map[string]interface{}) *cognitoidentityprovider.LambdaConfigType { - configs := &cognitoidentityprovider.LambdaConfigType{} +func expandLambdaConfigType(tfMap map[string]interface{}) *awstypes.LambdaConfigType { + apiObject := &awstypes.LambdaConfigType{} - if v, ok := config["create_auth_challenge"]; ok && v.(string) != "" { - configs.CreateAuthChallenge = aws.String(v.(string)) + if v, ok := tfMap["create_auth_challenge"]; ok && v.(string) != "" { + apiObject.CreateAuthChallenge = aws.String(v.(string)) } - if v, ok := config["custom_message"]; ok && v.(string) != "" { - configs.CustomMessage = aws.String(v.(string)) + if v, ok := tfMap["custom_email_sender"].([]interface{}); ok && len(v) > 0 { + if v, ok := v[0].(map[string]interface{}); ok && v != nil { + apiObject.CustomEmailSender = expandCustomEmailLambdaVersionConfigType(v) + } } - if v, ok := config["define_auth_challenge"]; ok && v.(string) != "" { - configs.DefineAuthChallenge = aws.String(v.(string)) + if v, ok := tfMap["custom_message"]; ok && v.(string) != "" { + apiObject.CustomMessage = aws.String(v.(string)) } - if v, ok := config["post_authentication"]; ok && v.(string) != "" { - configs.PostAuthentication = aws.String(v.(string)) + if v, ok := tfMap["custom_sms_sender"].([]interface{}); ok && len(v) > 0 { + if v, ok := v[0].(map[string]interface{}); ok && v != nil { + apiObject.CustomSMSSender = expandCustomSMSLambdaVersionConfigType(v) + } } - if v, ok := config["post_confirmation"]; ok && v.(string) != "" { - configs.PostConfirmation = aws.String(v.(string)) + if v, ok := tfMap["define_auth_challenge"]; ok && v.(string) != "" { + apiObject.DefineAuthChallenge = aws.String(v.(string)) } - if v, ok := config["pre_authentication"]; ok && v.(string) != "" { - configs.PreAuthentication = aws.String(v.(string)) + if v, ok := tfMap[names.AttrKMSKeyID]; ok && v.(string) != "" { + apiObject.KMSKeyID = aws.String(v.(string)) } - if v, ok := config["pre_sign_up"]; ok && v.(string) != "" { - configs.PreSignUp = aws.String(v.(string)) + if v, ok := tfMap["post_authentication"]; ok && v.(string) != "" { + apiObject.PostAuthentication = aws.String(v.(string)) } - if v, ok := config["pre_token_generation"]; ok && v.(string) != "" { - configs.PreTokenGeneration = aws.String(v.(string)) + if v, ok := tfMap["post_confirmation"]; ok && v.(string) != "" { + apiObject.PostConfirmation = aws.String(v.(string)) } - if v, ok := config["pre_token_generation_config"].([]interface{}); ok && len(v) > 0 { - s, sok := v[0].(map[string]interface{}) - if sok && s != nil { - configs.PreTokenGenerationConfig = expandedUserPoolPreGenerationConfig(s) - } + if v, ok := tfMap["pre_authentication"]; ok && v.(string) != "" { + apiObject.PreAuthentication = aws.String(v.(string)) } - if v, ok := config["user_migration"]; ok && v.(string) != "" { - configs.UserMigration = aws.String(v.(string)) + if v, ok := tfMap["pre_sign_up"]; ok && v.(string) != "" { + apiObject.PreSignUp = aws.String(v.(string)) } - if v, ok := config["verify_auth_challenge_response"]; ok && v.(string) != "" { - configs.VerifyAuthChallengeResponse = aws.String(v.(string)) + if v, ok := tfMap["pre_token_generation"]; ok && v.(string) != "" { + apiObject.PreTokenGeneration = aws.String(v.(string)) } - if v, ok := config[names.AttrKMSKeyID]; ok && v.(string) != "" { - configs.KMSKeyID = aws.String(v.(string)) + if v, ok := tfMap["pre_token_generation_config"].([]interface{}); ok && len(v) > 0 { + if v, ok := v[0].(map[string]interface{}); ok && v != nil { + apiObject.PreTokenGenerationConfig = expandPreTokenGenerationVersionConfigType(v) + } } - if v, ok := config["custom_sms_sender"].([]interface{}); ok && len(v) > 0 { - s, sok := v[0].(map[string]interface{}) - if sok && s != nil { - configs.CustomSMSSender = expandUserPoolCustomSMSSender(s) - } + if v, ok := tfMap["user_migration"]; ok && v.(string) != "" { + apiObject.UserMigration = aws.String(v.(string)) } - if v, ok := config["custom_email_sender"].([]interface{}); ok && len(v) > 0 { - s, sok := v[0].(map[string]interface{}) - if sok && s != nil { - configs.CustomEmailSender = expandUserPoolCustomEmailSender(s) - } + if v, ok := tfMap["verify_auth_challenge_response"]; ok && v.(string) != "" { + apiObject.VerifyAuthChallengeResponse = aws.String(v.(string)) } - return configs + return apiObject } -func flattenUserPoolLambdaConfig(s *cognitoidentityprovider.LambdaConfigType) []map[string]interface{} { - m := map[string]interface{}{} - if s == nil { +func flattenLambdaConfigType(apiObject *awstypes.LambdaConfigType) []interface{} { + if apiObject == nil { return nil } - if s.CreateAuthChallenge != nil { - m["create_auth_challenge"] = aws.StringValue(s.CreateAuthChallenge) + tfMap := map[string]interface{}{} + + if apiObject.CreateAuthChallenge != nil { + tfMap["create_auth_challenge"] = aws.ToString(apiObject.CreateAuthChallenge) } - if s.CustomMessage != nil { - m["custom_message"] = aws.StringValue(s.CustomMessage) + if apiObject.CustomEmailSender != nil { + tfMap["custom_email_sender"] = flattenCustomEmailLambdaVersionConfigType(apiObject.CustomEmailSender) } - if s.DefineAuthChallenge != nil { - m["define_auth_challenge"] = aws.StringValue(s.DefineAuthChallenge) + if apiObject.CustomMessage != nil { + tfMap["custom_message"] = aws.ToString(apiObject.CustomMessage) } - if s.PostAuthentication != nil { - m["post_authentication"] = aws.StringValue(s.PostAuthentication) + if apiObject.CustomSMSSender != nil { + tfMap["custom_sms_sender"] = flattenCustomSMSLambdaVersionConfigType(apiObject.CustomSMSSender) } - if s.PostConfirmation != nil { - m["post_confirmation"] = aws.StringValue(s.PostConfirmation) + if apiObject.DefineAuthChallenge != nil { + tfMap["define_auth_challenge"] = aws.ToString(apiObject.DefineAuthChallenge) } - if s.PreAuthentication != nil { - m["pre_authentication"] = aws.StringValue(s.PreAuthentication) + if apiObject.KMSKeyID != nil { + tfMap[names.AttrKMSKeyID] = aws.ToString(apiObject.KMSKeyID) } - if s.PreSignUp != nil { - m["pre_sign_up"] = aws.StringValue(s.PreSignUp) + if apiObject.PostAuthentication != nil { + tfMap["post_authentication"] = aws.ToString(apiObject.PostAuthentication) } - if s.PreTokenGeneration != nil { - m["pre_token_generation"] = aws.StringValue(s.PreTokenGeneration) + if apiObject.PostConfirmation != nil { + tfMap["post_confirmation"] = aws.ToString(apiObject.PostConfirmation) } - if s.PreTokenGenerationConfig != nil { - m["pre_token_generation_config"] = flattenUserPoolPreTokenGenerationConfig(s.PreTokenGenerationConfig) + if apiObject.PreAuthentication != nil { + tfMap["pre_authentication"] = aws.ToString(apiObject.PreAuthentication) } - if s.UserMigration != nil { - m["user_migration"] = aws.StringValue(s.UserMigration) + if apiObject.PreSignUp != nil { + tfMap["pre_sign_up"] = aws.ToString(apiObject.PreSignUp) } - if s.VerifyAuthChallengeResponse != nil { - m["verify_auth_challenge_response"] = aws.StringValue(s.VerifyAuthChallengeResponse) + if apiObject.PreTokenGeneration != nil { + tfMap["pre_token_generation"] = aws.ToString(apiObject.PreTokenGeneration) } - if s.KMSKeyID != nil { - m[names.AttrKMSKeyID] = aws.StringValue(s.KMSKeyID) + if apiObject.PreTokenGenerationConfig != nil { + tfMap["pre_token_generation_config"] = flattenPreTokenGenerationVersionConfigType(apiObject.PreTokenGenerationConfig) } - if s.CustomSMSSender != nil { - m["custom_sms_sender"] = flattenUserPoolCustomSMSSender(s.CustomSMSSender) + if apiObject.UserMigration != nil { + tfMap["user_migration"] = aws.ToString(apiObject.UserMigration) } - if s.CustomEmailSender != nil { - m["custom_email_sender"] = flattenUserPoolCustomEmailSender(s.CustomEmailSender) + if apiObject.VerifyAuthChallengeResponse != nil { + tfMap["verify_auth_challenge_response"] = aws.ToString(apiObject.VerifyAuthChallengeResponse) } - if len(m) > 0 { - return []map[string]interface{}{m} + if len(tfMap) > 0 { + return []interface{}{tfMap} } - return []map[string]interface{}{} + return []interface{}{} } -func expandUserPoolPasswordPolicy(config map[string]interface{}) *cognitoidentityprovider.PasswordPolicyType { - configs := &cognitoidentityprovider.PasswordPolicyType{} +func expandPasswordPolicyType(tfMap map[string]interface{}) *awstypes.PasswordPolicyType { + apiObject := &awstypes.PasswordPolicyType{} - if v, ok := config["minimum_length"]; ok { - configs.MinimumLength = aws.Int64(int64(v.(int))) + if v, ok := tfMap["minimum_length"]; ok { + apiObject.MinimumLength = aws.Int32(int32(v.(int))) } - if v, ok := config["require_lowercase"]; ok { - configs.RequireLowercase = aws.Bool(v.(bool)) + if v, ok := tfMap["require_lowercase"]; ok { + apiObject.RequireLowercase = v.(bool) } - if v, ok := config["require_numbers"]; ok { - configs.RequireNumbers = aws.Bool(v.(bool)) + if v, ok := tfMap["require_numbers"]; ok { + apiObject.RequireNumbers = v.(bool) } - if v, ok := config["require_symbols"]; ok { - configs.RequireSymbols = aws.Bool(v.(bool)) + if v, ok := tfMap["require_symbols"]; ok { + apiObject.RequireSymbols = v.(bool) } - if v, ok := config["require_uppercase"]; ok { - configs.RequireUppercase = aws.Bool(v.(bool)) + if v, ok := tfMap["require_uppercase"]; ok { + apiObject.RequireUppercase = v.(bool) } - if v, ok := config["temporary_password_validity_days"]; ok { - configs.TemporaryPasswordValidityDays = aws.Int64(int64(v.(int))) + if v, ok := tfMap["temporary_password_validity_days"]; ok { + apiObject.TemporaryPasswordValidityDays = int32(v.(int)) } - return configs + return apiObject } -func flattenUserPoolUserPoolAddOns(s *cognitoidentityprovider.UserPoolAddOnsType) []map[string]interface{} { - config := make(map[string]interface{}) - - if s == nil { - return []map[string]interface{}{} +func flattenUserPoolAddOnsType(apiObject *awstypes.UserPoolAddOnsType) []interface{} { + if apiObject == nil { + return []interface{}{} } - if s.AdvancedSecurityMode != nil { - config["advanced_security_mode"] = aws.StringValue(s.AdvancedSecurityMode) - } + tfMap := make(map[string]interface{}) - return []map[string]interface{}{config} + tfMap["advanced_security_mode"] = apiObject.AdvancedSecurityMode + + return []interface{}{tfMap} } -func expandUserPoolSchema(inputs []interface{}) []*cognitoidentityprovider.SchemaAttributeType { - configs := make([]*cognitoidentityprovider.SchemaAttributeType, len(inputs)) +func expandSchemaAttributeTypes(tfList []interface{}) []awstypes.SchemaAttributeType { + apiObjects := make([]awstypes.SchemaAttributeType, len(tfList)) - for i, input := range inputs { - param := input.(map[string]interface{}) - config := &cognitoidentityprovider.SchemaAttributeType{} + for i, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]interface{}) + apiObject := awstypes.SchemaAttributeType{} - if v, ok := param["attribute_data_type"]; ok { - config.AttributeDataType = aws.String(v.(string)) + if v, ok := tfMap["attribute_data_type"]; ok { + apiObject.AttributeDataType = awstypes.AttributeDataType(v.(string)) } - if v, ok := param["developer_only_attribute"]; ok { - config.DeveloperOnlyAttribute = aws.Bool(v.(bool)) + if v, ok := tfMap["developer_only_attribute"]; ok { + apiObject.DeveloperOnlyAttribute = aws.Bool(v.(bool)) } - if v, ok := param["mutable"]; ok { - config.Mutable = aws.Bool(v.(bool)) + if v, ok := tfMap["mutable"]; ok { + apiObject.Mutable = aws.Bool(v.(bool)) } - if v, ok := param[names.AttrName]; ok { - config.Name = aws.String(v.(string)) + if v, ok := tfMap[names.AttrName]; ok { + apiObject.Name = aws.String(v.(string)) } - if v, ok := param["required"]; ok { - config.Required = aws.Bool(v.(bool)) - } - - if v, ok := param["number_attribute_constraints"]; ok { - data := v.([]interface{}) - - if len(data) > 0 { - m, ok := data[0].(map[string]interface{}) - if ok { - numberAttributeConstraintsType := &cognitoidentityprovider.NumberAttributeConstraintsType{} + if v, ok := tfMap["number_attribute_constraints"]; ok { + if tfList := v.([]interface{}); len(tfList) > 0 { + if tfMap, ok := tfList[0].(map[string]interface{}); ok { + nact := &awstypes.NumberAttributeConstraintsType{} - if v, ok := m["min_value"]; ok && v.(string) != "" { - numberAttributeConstraintsType.MinValue = aws.String(v.(string)) + if v, ok := tfMap["max_value"]; ok && v.(string) != "" { + nact.MaxValue = aws.String(v.(string)) } - if v, ok := m["max_value"]; ok && v.(string) != "" { - numberAttributeConstraintsType.MaxValue = aws.String(v.(string)) + if v, ok := tfMap["min_value"]; ok && v.(string) != "" { + nact.MinValue = aws.String(v.(string)) } - config.NumberAttributeConstraints = numberAttributeConstraintsType + apiObject.NumberAttributeConstraints = nact } } } - if v, ok := param["string_attribute_constraints"]; ok { - data := v.([]interface{}) + if v, ok := tfMap["required"]; ok { + apiObject.Required = aws.Bool(v.(bool)) + } - if len(data) > 0 { - m, _ := data[0].(map[string]interface{}) - if ok { - stringAttributeConstraintsType := &cognitoidentityprovider.StringAttributeConstraintsType{} + if v, ok := tfMap["string_attribute_constraints"]; ok { + if tfList := v.([]interface{}); len(tfList) > 0 { + if tfMap, ok := tfList[0].(map[string]interface{}); ok { + sact := &awstypes.StringAttributeConstraintsType{} - if l, ok := m["min_length"]; ok && l.(string) != "" { - stringAttributeConstraintsType.MinLength = aws.String(l.(string)) + if v, ok := tfMap["max_length"]; ok && v.(string) != "" { + sact.MaxLength = aws.String(v.(string)) } - if l, ok := m["max_length"]; ok && l.(string) != "" { - stringAttributeConstraintsType.MaxLength = aws.String(l.(string)) + if v, ok := tfMap["min_length"]; ok && v.(string) != "" { + sact.MinLength = aws.String(v.(string)) } - config.StringAttributeConstraints = stringAttributeConstraintsType + apiObject.StringAttributeConstraints = sact } } } - configs[i] = config + apiObjects[i] = apiObject } - return configs + return apiObjects } -func flattenUserPoolSchema(configuredAttributes, inputs []*cognitoidentityprovider.SchemaAttributeType) []map[string]interface{} { - values := make([]map[string]interface{}, 0) - - for _, input := range inputs { - if input == nil { - continue - } +func flattenSchemaAttributeTypes(configuredAttributes, apiObjects []awstypes.SchemaAttributeType) []interface{} { + tfList := make([]interface{}, 0) + for _, apiObject := range apiObjects { // The API returns all standard attributes // https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#cognito-user-pools-standard-attributes // Ignore setting them in state if they are unconfigured to prevent a huge and unexpected diff configured := false for _, configuredAttribute := range configuredAttributes { - if reflect.DeepEqual(input, configuredAttribute) { + if reflect.DeepEqual(apiObject, configuredAttribute) { configured = true } } if !configured { - if UserPoolSchemaAttributeMatchesStandardAttribute(input) { + if userPoolSchemaAttributeMatchesStandardAttribute(&apiObject) { continue } + // When adding a Cognito Identity Provider, the API will automatically add an "identities" attribute - identitiesAttribute := cognitoidentityprovider.SchemaAttributeType{ - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + identitiesAttribute := awstypes.SchemaAttributeType{ + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("identities"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{}, + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{}, } - if reflect.DeepEqual(*input, identitiesAttribute) { + + if reflect.DeepEqual(apiObject, identitiesAttribute) { continue } } - var value = map[string]interface{}{ - "attribute_data_type": aws.StringValue(input.AttributeDataType), - "developer_only_attribute": aws.BoolValue(input.DeveloperOnlyAttribute), - "mutable": aws.BoolValue(input.Mutable), - names.AttrName: strings.TrimPrefix(strings.TrimPrefix(aws.StringValue(input.Name), "dev:"), "custom:"), - "required": aws.BoolValue(input.Required), + var tfMap = map[string]interface{}{ + "attribute_data_type": apiObject.AttributeDataType, + "developer_only_attribute": aws.ToBool(apiObject.DeveloperOnlyAttribute), + "mutable": aws.ToBool(apiObject.Mutable), + names.AttrName: strings.TrimPrefix(strings.TrimPrefix(aws.ToString(apiObject.Name), attributeDevPrefix), attributeCustomPrefix), + "required": aws.ToBool(apiObject.Required), } - if input.NumberAttributeConstraints != nil { - subvalue := make(map[string]interface{}) + if apiObject.NumberAttributeConstraints != nil { + nact := make(map[string]interface{}) - if input.NumberAttributeConstraints.MinValue != nil { - subvalue["min_value"] = aws.StringValue(input.NumberAttributeConstraints.MinValue) + if apiObject.NumberAttributeConstraints.MaxValue != nil { + nact["max_value"] = aws.ToString(apiObject.NumberAttributeConstraints.MaxValue) } - if input.NumberAttributeConstraints.MaxValue != nil { - subvalue["max_value"] = aws.StringValue(input.NumberAttributeConstraints.MaxValue) + if apiObject.NumberAttributeConstraints.MinValue != nil { + nact["min_value"] = aws.ToString(apiObject.NumberAttributeConstraints.MinValue) } - value["number_attribute_constraints"] = []map[string]interface{}{subvalue} + tfMap["number_attribute_constraints"] = []interface{}{nact} } - if input.StringAttributeConstraints != nil && !skipFlatteningStringAttributeContraints(configuredAttributes, input) { - subvalue := make(map[string]interface{}) + if apiObject.StringAttributeConstraints != nil && !skipFlatteningStringAttributeContraints(configuredAttributes, &apiObject) { + sact := make(map[string]interface{}) - if input.StringAttributeConstraints.MinLength != nil { - subvalue["min_length"] = aws.StringValue(input.StringAttributeConstraints.MinLength) + if apiObject.StringAttributeConstraints.MaxLength != nil { + sact["max_length"] = aws.ToString(apiObject.StringAttributeConstraints.MaxLength) } - if input.StringAttributeConstraints.MaxLength != nil { - subvalue["max_length"] = aws.StringValue(input.StringAttributeConstraints.MaxLength) + if apiObject.StringAttributeConstraints.MinLength != nil { + sact["min_length"] = aws.ToString(apiObject.StringAttributeConstraints.MinLength) } - value["string_attribute_constraints"] = []map[string]interface{}{subvalue} + tfMap["string_attribute_constraints"] = []interface{}{sact} } - values = append(values, value) + tfList = append(tfList, tfMap) } - return values + return tfList } -func expandUserPoolUsernameConfiguration(config map[string]interface{}) *cognitoidentityprovider.UsernameConfigurationType { - usernameConfigurationType := &cognitoidentityprovider.UsernameConfigurationType{ - CaseSensitive: aws.Bool(config["case_sensitive"].(bool)), +func expandUsernameConfigurationType(tfMap map[string]interface{}) *awstypes.UsernameConfigurationType { + apiObject := &awstypes.UsernameConfigurationType{ + CaseSensitive: aws.Bool(tfMap["case_sensitive"].(bool)), } - return usernameConfigurationType + return apiObject } -func flattenUserPoolUsernameConfiguration(u *cognitoidentityprovider.UsernameConfigurationType) []map[string]interface{} { - m := map[string]interface{}{} - - if u == nil { +func flattenUsernameConfigurationType(apiObject *awstypes.UsernameConfigurationType) []interface{} { + if apiObject == nil { return nil } - m["case_sensitive"] = aws.BoolValue(u.CaseSensitive) + tfMap := map[string]interface{}{} + + tfMap["case_sensitive"] = aws.ToBool(apiObject.CaseSensitive) - return []map[string]interface{}{m} + return []interface{}{tfMap} } -func expandUserPoolVerificationMessageTemplate(config map[string]interface{}) *cognitoidentityprovider.VerificationMessageTemplateType { - verificationMessageTemplateType := &cognitoidentityprovider.VerificationMessageTemplateType{} +func expandVerificationMessageTemplateType(tfMap map[string]interface{}) *awstypes.VerificationMessageTemplateType { + apiObject := &awstypes.VerificationMessageTemplateType{} - if v, ok := config["default_email_option"]; ok && v.(string) != "" { - verificationMessageTemplateType.DefaultEmailOption = aws.String(v.(string)) + if v, ok := tfMap["default_email_option"]; ok && v.(string) != "" { + apiObject.DefaultEmailOption = awstypes.DefaultEmailOptionType(v.(string)) } - if v, ok := config["email_message"]; ok && v.(string) != "" { - verificationMessageTemplateType.EmailMessage = aws.String(v.(string)) + if v, ok := tfMap["email_message"]; ok && v.(string) != "" { + apiObject.EmailMessage = aws.String(v.(string)) } - if v, ok := config["email_message_by_link"]; ok && v.(string) != "" { - verificationMessageTemplateType.EmailMessageByLink = aws.String(v.(string)) + if v, ok := tfMap["email_message_by_link"]; ok && v.(string) != "" { + apiObject.EmailMessageByLink = aws.String(v.(string)) } - if v, ok := config["email_subject"]; ok && v.(string) != "" { - verificationMessageTemplateType.EmailSubject = aws.String(v.(string)) + if v, ok := tfMap["email_subject"]; ok && v.(string) != "" { + apiObject.EmailSubject = aws.String(v.(string)) } - if v, ok := config["email_subject_by_link"]; ok && v.(string) != "" { - verificationMessageTemplateType.EmailSubjectByLink = aws.String(v.(string)) + if v, ok := tfMap["email_subject_by_link"]; ok && v.(string) != "" { + apiObject.EmailSubjectByLink = aws.String(v.(string)) } - if v, ok := config["sms_message"]; ok && v.(string) != "" { - verificationMessageTemplateType.SmsMessage = aws.String(v.(string)) + if v, ok := tfMap["sms_message"]; ok && v.(string) != "" { + apiObject.SmsMessage = aws.String(v.(string)) } - return verificationMessageTemplateType + return apiObject } -func flattenUserPoolVerificationMessageTemplate(s *cognitoidentityprovider.VerificationMessageTemplateType) []map[string]interface{} { - m := map[string]interface{}{} - - if s == nil { +func flattenVerificationMessageTemplateType(apiObject *awstypes.VerificationMessageTemplateType) []interface{} { + if apiObject == nil { return nil } - if s.DefaultEmailOption != nil { - m["default_email_option"] = aws.StringValue(s.DefaultEmailOption) + tfMap := map[string]interface{}{ + "default_email_option": apiObject.DefaultEmailOption, } - if s.EmailMessage != nil { - m["email_message"] = aws.StringValue(s.EmailMessage) + if apiObject.EmailMessage != nil { + tfMap["email_message"] = aws.ToString(apiObject.EmailMessage) } - if s.EmailMessageByLink != nil { - m["email_message_by_link"] = aws.StringValue(s.EmailMessageByLink) + if apiObject.EmailMessageByLink != nil { + tfMap["email_message_by_link"] = aws.ToString(apiObject.EmailMessageByLink) } - if s.EmailSubject != nil { - m["email_subject"] = aws.StringValue(s.EmailSubject) + if apiObject.EmailSubject != nil { + tfMap["email_subject"] = aws.ToString(apiObject.EmailSubject) } - if s.EmailSubjectByLink != nil { - m["email_subject_by_link"] = aws.StringValue(s.EmailSubjectByLink) + if apiObject.EmailSubjectByLink != nil { + tfMap["email_subject_by_link"] = aws.ToString(apiObject.EmailSubjectByLink) } - if s.SmsMessage != nil { - m["sms_message"] = aws.StringValue(s.SmsMessage) + if apiObject.SmsMessage != nil { + tfMap["sms_message"] = aws.ToString(apiObject.SmsMessage) } - if len(m) > 0 { - return []map[string]interface{}{m} + if len(tfMap) > 0 { + return []interface{}{tfMap} } - return []map[string]interface{}{} + return []interface{}{} } -func flattenUserPoolDeviceConfiguration(s *cognitoidentityprovider.DeviceConfigurationType) []map[string]interface{} { - config := map[string]interface{}{} +func flattenDeviceConfigurationType(apiObject *awstypes.DeviceConfigurationType) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{ + "challenge_required_on_new_device": apiObject.ChallengeRequiredOnNewDevice, + "device_only_remembered_on_user_prompt": apiObject.DeviceOnlyRememberedOnUserPrompt, + } - if s == nil { + return []interface{}{tfMap} +} + +func flattenPasswordPolicyType(apiObject *awstypes.PasswordPolicyType) []interface{} { + if apiObject == nil { return nil } - if s.ChallengeRequiredOnNewDevice != nil { - config["challenge_required_on_new_device"] = aws.BoolValue(s.ChallengeRequiredOnNewDevice) + tfMap := map[string]interface{}{ + "require_lowercase": apiObject.RequireLowercase, + "require_numbers": apiObject.RequireNumbers, + "require_symbols": apiObject.RequireSymbols, + "require_uppercase": apiObject.RequireUppercase, + "temporary_password_validity_days": apiObject.TemporaryPasswordValidityDays, } - if s.DeviceOnlyRememberedOnUserPrompt != nil { - config["device_only_remembered_on_user_prompt"] = aws.BoolValue(s.DeviceOnlyRememberedOnUserPrompt) + if apiObject.MinimumLength != nil { + tfMap["minimum_length"] = aws.ToInt32(apiObject.MinimumLength) } - return []map[string]interface{}{config} + if len(tfMap) > 0 { + return []interface{}{tfMap} + } + + return []interface{}{} } -func flattenUserPoolPasswordPolicy(s *cognitoidentityprovider.PasswordPolicyType) []map[string]interface{} { - m := map[string]interface{}{} +func expandPreTokenGenerationVersionConfigType(tfMap map[string]interface{}) *awstypes.PreTokenGenerationVersionConfigType { + apiObject := &awstypes.PreTokenGenerationVersionConfigType{ + LambdaArn: aws.String(tfMap["lambda_arn"].(string)), + LambdaVersion: awstypes.PreTokenGenerationLambdaVersionType(tfMap["lambda_version"].(string)), + } - if s == nil { + return apiObject +} + +func flattenPreTokenGenerationVersionConfigType(apiObject *awstypes.PreTokenGenerationVersionConfigType) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + tfMap["lambda_arn"] = aws.ToString(apiObject.LambdaArn) + tfMap["lambda_version"] = apiObject.LambdaVersion + + return []interface{}{tfMap} +} + +func expandCustomSMSLambdaVersionConfigType(tfMap map[string]interface{}) *awstypes.CustomSMSLambdaVersionConfigType { + apiObject := &awstypes.CustomSMSLambdaVersionConfigType{ + LambdaArn: aws.String(tfMap["lambda_arn"].(string)), + LambdaVersion: awstypes.CustomSMSSenderLambdaVersionType(tfMap["lambda_version"].(string)), + } + + return apiObject +} + +func flattenCustomSMSLambdaVersionConfigType(apiObject *awstypes.CustomSMSLambdaVersionConfigType) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + tfMap["lambda_arn"] = aws.ToString(apiObject.LambdaArn) + tfMap["lambda_version"] = apiObject.LambdaVersion + + return []interface{}{tfMap} +} + +func expandCustomEmailLambdaVersionConfigType(tfMap map[string]interface{}) *awstypes.CustomEmailLambdaVersionConfigType { + apiObject := &awstypes.CustomEmailLambdaVersionConfigType{ + LambdaArn: aws.String(tfMap["lambda_arn"].(string)), + LambdaVersion: awstypes.CustomEmailSenderLambdaVersionType(tfMap["lambda_version"].(string)), + } + + return apiObject +} + +func flattenCustomEmailLambdaVersionConfigType(apiObject *awstypes.CustomEmailLambdaVersionConfigType) []interface{} { + if apiObject == nil { return nil } - if s.MinimumLength != nil { - m["minimum_length"] = aws.Int64Value(s.MinimumLength) + tfMap := map[string]interface{}{} + + tfMap["lambda_arn"] = aws.ToString(apiObject.LambdaArn) + tfMap["lambda_version"] = apiObject.LambdaVersion + + return []interface{}{tfMap} +} + +func expandEmailConfigurationType(tfList []interface{}) *awstypes.EmailConfigurationType { + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.EmailConfigurationType{} + + if v, ok := tfMap["configuration_set"]; ok && v.(string) != "" { + apiObject.ConfigurationSet = aws.String(v.(string)) + } + + if v, ok := tfMap["email_sending_account"]; ok && v.(string) != "" { + apiObject.EmailSendingAccount = awstypes.EmailSendingAccountType(v.(string)) } - if s.RequireLowercase != nil { - m["require_lowercase"] = aws.BoolValue(s.RequireLowercase) + if v, ok := tfMap["from_email_address"]; ok && v.(string) != "" { + apiObject.From = aws.String(v.(string)) } - if s.RequireNumbers != nil { - m["require_numbers"] = aws.BoolValue(s.RequireNumbers) + if v, ok := tfMap["reply_to_email_address"]; ok && v.(string) != "" { + apiObject.ReplyToEmailAddress = aws.String(v.(string)) } - if s.RequireSymbols != nil { - m["require_symbols"] = aws.BoolValue(s.RequireSymbols) + if v, ok := tfMap["source_arn"]; ok && v.(string) != "" { + apiObject.SourceArn = aws.String(v.(string)) } - if s.RequireUppercase != nil { - m["require_uppercase"] = aws.BoolValue(s.RequireUppercase) + return apiObject +} + +func expandUserAttributeUpdateSettingsType(tfMap map[string]interface{}) *awstypes.UserAttributeUpdateSettingsType { + apiObject := &awstypes.UserAttributeUpdateSettingsType{} + + if v, ok := tfMap["attributes_require_verification_before_update"]; ok { + apiObject.AttributesRequireVerificationBeforeUpdate = flex.ExpandStringyValueSet[awstypes.VerifiedAttributeType](v.(*schema.Set)) } - if s.TemporaryPasswordValidityDays != nil { - m["temporary_password_validity_days"] = aws.Int64Value(s.TemporaryPasswordValidityDays) + return apiObject +} + +func flattenUserAttributeUpdateSettingsType(apiObject *awstypes.UserAttributeUpdateSettingsType) []interface{} { + if apiObject == nil { + return nil } - if len(m) > 0 { - return []map[string]interface{}{m} + // If this setting is enabled then disabled, the API returns a nested empty slice instead of nil + if len(apiObject.AttributesRequireVerificationBeforeUpdate) == 0 { + return nil + } + + tfMap := map[string]interface{}{} + tfMap["attributes_require_verification_before_update"] = apiObject.AttributesRequireVerificationBeforeUpdate + + return []interface{}{tfMap} +} + +// skipFlatteningStringAttributeContraints returns true when all of the schema arguments +// match an existing configured attribute, except an empty "string_attribute_constraints" block. +// In this situation the Describe API returns default constraint values, and a persistent diff +// would be present if written to state. +func skipFlatteningStringAttributeContraints(configuredAttributes []awstypes.SchemaAttributeType, apiObject *awstypes.SchemaAttributeType) bool { + for _, configuredAttribute := range configuredAttributes { + // Root elements are all equal + if reflect.DeepEqual(apiObject.AttributeDataType, configuredAttribute.AttributeDataType) && + reflect.DeepEqual(apiObject.DeveloperOnlyAttribute, configuredAttribute.DeveloperOnlyAttribute) && + reflect.DeepEqual(apiObject.Mutable, configuredAttribute.Mutable) && + reflect.DeepEqual(apiObject.Name, configuredAttribute.Name) && + reflect.DeepEqual(apiObject.Required, configuredAttribute.Required) && + // The configured "string_attribute_constraints" object is empty, but the returned value is not + (configuredAttribute.AttributeDataType == awstypes.AttributeDataTypeString && + configuredAttribute.StringAttributeConstraints == nil && + apiObject.StringAttributeConstraints != nil) { + return true + } } - return []map[string]interface{}{} + return false } -func UserPoolSchemaAttributeMatchesStandardAttribute(input *cognitoidentityprovider.SchemaAttributeType) bool { - if input == nil { +func userPoolSchemaAttributeMatchesStandardAttribute(apiObject *awstypes.SchemaAttributeType) bool { + if apiObject == nil { return false } // All standard attributes always returned by API // https://docs.aws.amazon.com/cognito/latest/developerguide/user-pool-settings-attributes.html#cognito-user-pools-standard-attributes - var standardAttributes = []cognitoidentityprovider.SchemaAttributeType{ + var standardAttributes = []awstypes.SchemaAttributeType{ { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String(names.AttrAddress), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("birthdate"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("10"), MinLength: aws.String("10"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String(names.AttrEmail), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeBoolean), + AttributeDataType: awstypes.AttributeDataTypeBoolean, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("email_verified"), Required: aws.Bool(false), }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), - Name: aws.String("gender"), + Name: aws.String("family_name"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), - Name: aws.String("given_name"), + Name: aws.String("gender"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), - Name: aws.String("family_name"), + Name: aws.String("given_name"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("locale"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("middle_name"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String(names.AttrName), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("nickname"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("phone_number"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeBoolean), + AttributeDataType: awstypes.AttributeDataTypeBoolean, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("phone_number_verified"), Required: aws.Bool(false), }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("picture"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("preferred_username"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String(names.AttrProfile), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(false), Name: aws.String("sub"), Required: aws.Bool(true), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("1"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeNumber), + AttributeDataType: awstypes.AttributeDataTypeNumber, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("updated_at"), - NumberAttributeConstraints: &cognitoidentityprovider.NumberAttributeConstraintsType{ + NumberAttributeConstraints: &awstypes.NumberAttributeConstraintsType{ MinValue: aws.String("0"), }, Required: aws.Bool(false), }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("website"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, { - AttributeDataType: aws.String(cognitoidentityprovider.AttributeDataTypeString), + AttributeDataType: awstypes.AttributeDataTypeString, DeveloperOnlyAttribute: aws.Bool(false), Mutable: aws.Bool(true), Name: aws.String("zoneinfo"), Required: aws.Bool(false), - StringAttributeConstraints: &cognitoidentityprovider.StringAttributeConstraintsType{ + StringAttributeConstraints: &awstypes.StringAttributeConstraintsType{ MaxLength: aws.String("2048"), MinLength: aws.String("0"), }, }, } + for _, standardAttribute := range standardAttributes { - if reflect.DeepEqual(*input, standardAttribute) { + if reflect.DeepEqual(*apiObject, standardAttribute) { return true } } - return false -} - -func expandedUserPoolPreGenerationConfig(config map[string]interface{}) *cognitoidentityprovider.PreTokenGenerationVersionConfigType { - preTokenGenerationConfig := &cognitoidentityprovider.PreTokenGenerationVersionConfigType{ - LambdaArn: aws.String(config["lambda_arn"].(string)), - LambdaVersion: aws.String(config["lambda_version"].(string)), - } - - return preTokenGenerationConfig -} - -func flattenUserPoolPreTokenGenerationConfig(u *cognitoidentityprovider.PreTokenGenerationVersionConfigType) []map[string]interface{} { - m := map[string]interface{}{} - - if u == nil { - return nil - } - - m["lambda_arn"] = aws.StringValue(u.LambdaArn) - m["lambda_version"] = aws.StringValue(u.LambdaVersion) - - return []map[string]interface{}{m} -} - -func expandUserPoolCustomSMSSender(config map[string]interface{}) *cognitoidentityprovider.CustomSMSLambdaVersionConfigType { - usernameConfigurationType := &cognitoidentityprovider.CustomSMSLambdaVersionConfigType{ - LambdaArn: aws.String(config["lambda_arn"].(string)), - LambdaVersion: aws.String(config["lambda_version"].(string)), - } - - return usernameConfigurationType -} - -func flattenUserPoolCustomSMSSender(u *cognitoidentityprovider.CustomSMSLambdaVersionConfigType) []map[string]interface{} { - m := map[string]interface{}{} - - if u == nil { - return nil - } - - m["lambda_arn"] = aws.StringValue(u.LambdaArn) - m["lambda_version"] = aws.StringValue(u.LambdaVersion) - - return []map[string]interface{}{m} -} - -func expandUserPoolCustomEmailSender(config map[string]interface{}) *cognitoidentityprovider.CustomEmailLambdaVersionConfigType { - usernameConfigurationType := &cognitoidentityprovider.CustomEmailLambdaVersionConfigType{ - LambdaArn: aws.String(config["lambda_arn"].(string)), - LambdaVersion: aws.String(config["lambda_version"].(string)), - } - - return usernameConfigurationType -} - -func flattenUserPoolCustomEmailSender(u *cognitoidentityprovider.CustomEmailLambdaVersionConfigType) []map[string]interface{} { - m := map[string]interface{}{} - - if u == nil { - return nil - } - - m["lambda_arn"] = aws.StringValue(u.LambdaArn) - m["lambda_version"] = aws.StringValue(u.LambdaVersion) - - return []map[string]interface{}{m} -} - -func expandUserPoolEmailConfig(emailConfig []interface{}) *cognitoidentityprovider.EmailConfigurationType { - config := emailConfig[0].(map[string]interface{}) - - emailConfigurationType := &cognitoidentityprovider.EmailConfigurationType{} - - if v, ok := config["reply_to_email_address"]; ok && v.(string) != "" { - emailConfigurationType.ReplyToEmailAddress = aws.String(v.(string)) - } - - if v, ok := config["source_arn"]; ok && v.(string) != "" { - emailConfigurationType.SourceArn = aws.String(v.(string)) - } - - if v, ok := config["from_email_address"]; ok && v.(string) != "" { - emailConfigurationType.From = aws.String(v.(string)) - } - - if v, ok := config["email_sending_account"]; ok && v.(string) != "" { - emailConfigurationType.EmailSendingAccount = aws.String(v.(string)) - } - - if v, ok := config["configuration_set"]; ok && v.(string) != "" { - emailConfigurationType.ConfigurationSet = aws.String(v.(string)) - } - - return emailConfigurationType -} - -func expandUserPoolUserAttributeUpdateSettings(config map[string]interface{}) *cognitoidentityprovider.UserAttributeUpdateSettingsType { - userAttributeUpdateSettings := &cognitoidentityprovider.UserAttributeUpdateSettingsType{} - if v, ok := config["attributes_require_verification_before_update"]; ok { - userAttributeUpdateSettings.AttributesRequireVerificationBeforeUpdate = flex.ExpandStringSet(v.(*schema.Set)) - } - - return userAttributeUpdateSettings -} - -func flattenUserPoolUserAttributeUpdateSettings(u *cognitoidentityprovider.UserAttributeUpdateSettingsType) []map[string]interface{} { - if u == nil { - return nil - } - // If this setting is enabled then disabled, the API returns a nested empty slice instead of nil - if len(u.AttributesRequireVerificationBeforeUpdate) == 0 { - return nil - } - m := map[string]interface{}{} - m["attributes_require_verification_before_update"] = flex.FlattenStringSet(u.AttributesRequireVerificationBeforeUpdate) - - return []map[string]interface{}{m} -} - -// skipFlatteningStringAttributeContraints returns true when all of the schema arguments -// match an existing configured attribute, except an empty "string_attribute_constraints" block. -// In this situation the Describe API returns default constraint values, and a persistent diff -// would be present if written to state. -func skipFlatteningStringAttributeContraints(configuredAttributes []*cognitoidentityprovider.SchemaAttributeType, input *cognitoidentityprovider.SchemaAttributeType) bool { - skip := false - for _, configuredAttribute := range configuredAttributes { - // Root elements are all equal - if reflect.DeepEqual(input.AttributeDataType, configuredAttribute.AttributeDataType) && - reflect.DeepEqual(input.DeveloperOnlyAttribute, configuredAttribute.DeveloperOnlyAttribute) && - reflect.DeepEqual(input.Mutable, configuredAttribute.Mutable) && - reflect.DeepEqual(input.Name, configuredAttribute.Name) && - reflect.DeepEqual(input.Required, configuredAttribute.Required) && - // The configured "string_attribute_constraints" object is empty, but the returned value is not - (aws.StringValue(configuredAttribute.AttributeDataType) == cognitoidentityprovider.AttributeDataTypeString && - configuredAttribute.StringAttributeConstraints == nil && - input.StringAttributeConstraints != nil) { - skip = true - } - } - return skip + return false } diff --git a/internal/service/cognitoidp/user_pool_client.go b/internal/service/cognitoidp/user_pool_client.go index f0c12bea016..32923df0454 100644 --- a/internal/service/cognitoidp/user_pool_client.go +++ b/internal/service/cognitoidp/user_pool_client.go @@ -9,9 +9,9 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-framework-validators/helpers/validatordiag" "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" @@ -26,19 +26,25 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-log/tflog" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) +var ( + timeUnitsType = fwtypes.StringEnumType[awstypes.TimeUnitsType]() +) + // @FrameworkResource(name="User Pool Client") func newUserPoolClientResource(context.Context) (resource.ResourceWithConfigure, error) { r := &userPoolClientResource{} @@ -50,7 +56,7 @@ type userPoolClientResource struct { framework.ResourceWithConfigure } -func (r *userPoolClientResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { +func (*userPoolClientResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { response.TypeName = "aws_cognito_user_pool_client" } @@ -72,7 +78,7 @@ func (r *userPoolClientResource) Schema(ctx context.Context, request resource.Sc Validators: []validator.Set{ setvalidator.SizeAtMost(3), setvalidator.ValueStringsAre( - stringvalidator.OneOf(cognitoidentityprovider.OAuthFlowType_Values()...), + enum.FrameworkValidate[awstypes.OAuthFlowType](), ), }, PlanModifiers: []planmodifier.Set{ @@ -156,7 +162,7 @@ func (r *userPoolClientResource) Schema(ctx context.Context, request resource.Sc Computed: true, Validators: []validator.Set{ setvalidator.ValueStringsAre( - stringvalidator.OneOf(cognitoidentityprovider.ExplicitAuthFlowsType_Values()...), + enum.FrameworkValidate[awstypes.ExplicitAuthFlowsType](), ), }, PlanModifiers: []planmodifier.Set{ @@ -196,11 +202,9 @@ func (r *userPoolClientResource) Schema(ctx context.Context, request resource.Sc Validators: userPoolClientNameValidator, }, "prevent_user_existence_errors": schema.StringAttribute{ - Optional: true, - Computed: true, - Validators: []validator.String{ - stringvalidator.OneOf(cognitoidentityprovider.PreventUserExistenceErrorTypes_Values()...), - }, + CustomType: fwtypes.StringEnumType[awstypes.PreventUserExistenceErrorTypes](), + Optional: true, + Computed: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.UseStateForUnknown(), }, @@ -300,28 +304,22 @@ func (r *userPoolClientResource) Schema(ctx context.Context, request resource.Sc NestedObject: schema.NestedBlockObject{ Attributes: map[string]schema.Attribute{ "access_token": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(cognitoidentityprovider.TimeUnitsTypeHours), - Validators: []validator.String{ - stringvalidator.OneOf(cognitoidentityprovider.TimeUnitsType_Values()...), - }, + CustomType: timeUnitsType, + Optional: true, + Computed: true, + Default: timeUnitsType.AttributeDefault(awstypes.TimeUnitsTypeHours), }, "id_token": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(cognitoidentityprovider.TimeUnitsTypeHours), - Validators: []validator.String{ - stringvalidator.OneOf(cognitoidentityprovider.TimeUnitsType_Values()...), - }, + CustomType: timeUnitsType, + Optional: true, + Computed: true, + Default: timeUnitsType.AttributeDefault(awstypes.TimeUnitsTypeHours), }, "refresh_token": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(cognitoidentityprovider.TimeUnitsTypeDays), - Validators: []validator.String{ - stringvalidator.OneOf(cognitoidentityprovider.TimeUnitsType_Values()...), - }, + CustomType: timeUnitsType, + Optional: true, + Computed: true, + Default: timeUnitsType.AttributeDefault(awstypes.TimeUnitsTypeDays), }, }, }, @@ -333,7 +331,7 @@ func (r *userPoolClientResource) Schema(ctx context.Context, request resource.Sc } func (r *userPoolClientResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { - conn := r.Meta().CognitoIDPConn(ctx) + conn := r.Meta().CognitoIDPClient(ctx) var config resourceUserPoolClientData response.Diagnostics.Append(request.Config.Get(ctx, &config)...) @@ -352,7 +350,7 @@ func (r *userPoolClientResource) Create(ctx context.Context, request resource.Cr return } - resp, err := conn.CreateUserPoolClientWithContext(ctx, params) + resp, err := conn.CreateUserPoolClient(ctx, params) if err != nil { response.Diagnostics.AddError( fmt.Sprintf("creating Cognito User Pool Client (%s)", plan.Name.ValueString()), @@ -363,29 +361,29 @@ func (r *userPoolClientResource) Create(ctx context.Context, request resource.Cr poolClient := resp.UserPoolClient - config.AccessTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) - config.AllowedOauthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthFlows) - config.AllowedOauthFlowsUserPoolClient = flex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) - config.AllowedOauthScopes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthScopes) + config.AccessTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) + config.AllowedOauthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthFlows) + config.AllowedOauthFlowsUserPoolClient = fwflex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) + config.AllowedOauthScopes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthScopes) config.AnalyticsConfiguration = flattenAnaylticsConfiguration(ctx, poolClient.AnalyticsConfiguration) - config.AuthSessionValidity = flex.Int64ToFramework(ctx, poolClient.AuthSessionValidity) - config.CallbackUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.CallbackURLs) - config.ClientSecret = flex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) - config.DefaultRedirectUri = flex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) - config.EnablePropagateAdditionalUserContextData = flex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) - config.EnableTokenRevocation = flex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) - config.ExplicitAuthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ExplicitAuthFlows) - config.ID = flex.StringToFramework(ctx, poolClient.ClientId) - config.IdTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) - config.LogoutUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.LogoutURLs) - config.Name = flex.StringToFramework(ctx, poolClient.ClientName) - config.PreventUserExistenceErrors = flex.StringToFrameworkLegacy(ctx, poolClient.PreventUserExistenceErrors) - config.ReadAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ReadAttributes) - config.RefreshTokenValidity = flex.Int64ToFramework(ctx, poolClient.RefreshTokenValidity) - config.SupportedIdentityProviders = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.SupportedIdentityProviders) + config.AuthSessionValidity = fwflex.Int32ToFramework(ctx, poolClient.AuthSessionValidity) + config.CallbackUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.CallbackURLs) + config.ClientSecret = fwflex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) + config.DefaultRedirectUri = fwflex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) + config.EnablePropagateAdditionalUserContextData = fwflex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) + config.EnableTokenRevocation = fwflex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) + config.ExplicitAuthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ExplicitAuthFlows) + config.ID = fwflex.StringToFramework(ctx, poolClient.ClientId) + config.IdTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) + config.LogoutUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.LogoutURLs) + config.Name = fwflex.StringToFramework(ctx, poolClient.ClientName) + config.PreventUserExistenceErrors = fwtypes.StringEnumValue(poolClient.PreventUserExistenceErrors) + config.ReadAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ReadAttributes) + config.RefreshTokenValidity = fwflex.Int32ValueToFramework(ctx, poolClient.RefreshTokenValidity) + config.SupportedIdentityProviders = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.SupportedIdentityProviders) config.TokenValidityUnits = flattenTokenValidityUnits(ctx, poolClient.TokenValidityUnits) - config.UserPoolID = flex.StringToFramework(ctx, poolClient.UserPoolId) - config.WriteAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.WriteAttributes) + config.UserPoolID = fwflex.StringToFramework(ctx, poolClient.UserPoolId) + config.WriteAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.WriteAttributes) if response.Diagnostics.HasError() { return @@ -401,47 +399,51 @@ func (r *userPoolClientResource) Read(ctx context.Context, request resource.Read return } - conn := r.Meta().CognitoIDPConn(ctx) + conn := r.Meta().CognitoIDPClient(ctx) + + poolClient, err := findUserPoolClientByTwoPartKey(ctx, conn, state.UserPoolID.ValueString(), state.ID.ValueString()) - poolClient, err := FindCognitoUserPoolClientByID(ctx, conn, state.UserPoolID.ValueString(), state.ID.ValueString()) if tfresource.NotFound(err) { - create.LogNotFoundRemoveState(names.CognitoIDP, create.ErrActionReading, ResNameUserPoolClient, state.ID.ValueString()) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) response.State.RemoveResource(ctx) + return } + if err != nil { - response.Diagnostics.Append(create.DiagErrorFramework(names.CognitoIDP, create.ErrActionReading, ResNameUserPoolClient, state.ID.ValueString(), err)) + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito User Pool Client (%s)", state.ID.ValueString()), err.Error()) + return } - state.AccessTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) - state.AllowedOauthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthFlows) - state.AllowedOauthFlowsUserPoolClient = flex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) - state.AllowedOauthScopes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthScopes) + state.AccessTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) + state.AllowedOauthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthFlows) + state.AllowedOauthFlowsUserPoolClient = fwflex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) + state.AllowedOauthScopes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthScopes) state.AnalyticsConfiguration = flattenAnaylticsConfiguration(ctx, poolClient.AnalyticsConfiguration) - state.AuthSessionValidity = flex.Int64ToFramework(ctx, poolClient.AuthSessionValidity) - state.CallbackUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.CallbackURLs) - state.ClientSecret = flex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) - state.DefaultRedirectUri = flex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) - state.EnablePropagateAdditionalUserContextData = flex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) - state.EnableTokenRevocation = flex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) - state.ExplicitAuthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ExplicitAuthFlows) - state.ID = flex.StringToFramework(ctx, poolClient.ClientId) - state.IdTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) - state.LogoutUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.LogoutURLs) - state.Name = flex.StringToFramework(ctx, poolClient.ClientName) - state.PreventUserExistenceErrors = flex.StringToFrameworkLegacy(ctx, poolClient.PreventUserExistenceErrors) - state.ReadAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ReadAttributes) - state.RefreshTokenValidity = flex.Int64ToFramework(ctx, poolClient.RefreshTokenValidity) - state.SupportedIdentityProviders = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.SupportedIdentityProviders) + state.AuthSessionValidity = fwflex.Int32ToFramework(ctx, poolClient.AuthSessionValidity) + state.CallbackUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.CallbackURLs) + state.ClientSecret = fwflex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) + state.DefaultRedirectUri = fwflex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) + state.EnablePropagateAdditionalUserContextData = fwflex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) + state.EnableTokenRevocation = fwflex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) + state.ExplicitAuthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ExplicitAuthFlows) + state.ID = fwflex.StringToFramework(ctx, poolClient.ClientId) + state.IdTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) + state.LogoutUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.LogoutURLs) + state.Name = fwflex.StringToFramework(ctx, poolClient.ClientName) + state.PreventUserExistenceErrors = fwtypes.StringEnumValue(poolClient.PreventUserExistenceErrors) + state.ReadAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ReadAttributes) + state.RefreshTokenValidity = fwflex.Int32ValueToFramework(ctx, poolClient.RefreshTokenValidity) + state.SupportedIdentityProviders = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.SupportedIdentityProviders) if state.TokenValidityUnits.IsNull() && isDefaultTokenValidityUnits(poolClient.TokenValidityUnits) { elemType := fwtypes.NewObjectTypeOf[tokenValidityUnits](ctx).ObjectType state.TokenValidityUnits = types.ListNull(elemType) } else { state.TokenValidityUnits = flattenTokenValidityUnits(ctx, poolClient.TokenValidityUnits) } - state.UserPoolID = flex.StringToFramework(ctx, poolClient.UserPoolId) - state.WriteAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.WriteAttributes) + state.UserPoolID = fwflex.StringToFramework(ctx, poolClient.UserPoolId) + state.WriteAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.WriteAttributes) if response.Diagnostics.HasError() { return @@ -469,7 +471,7 @@ func (r *userPoolClientResource) Update(ctx context.Context, request resource.Up return } - conn := r.Meta().CognitoIDPConn(ctx) + conn := r.Meta().CognitoIDPClient(ctx) params := plan.updateInput(ctx, &response.Diagnostics) if response.Diagnostics.HasError() { @@ -477,14 +479,17 @@ func (r *userPoolClientResource) Update(ctx context.Context, request resource.Up } // If removing `token_validity_units`, reset to defaults if !state.TokenValidityUnits.IsNull() && plan.TokenValidityUnits.IsNull() { - params.TokenValidityUnits.AccessToken = aws.String(cognitoidentityprovider.TimeUnitsTypeHours) - params.TokenValidityUnits.IdToken = aws.String(cognitoidentityprovider.TimeUnitsTypeHours) - params.TokenValidityUnits.RefreshToken = aws.String(cognitoidentityprovider.TimeUnitsTypeDays) + params.TokenValidityUnits.AccessToken = awstypes.TimeUnitsTypeHours + params.TokenValidityUnits.IdToken = awstypes.TimeUnitsTypeHours + params.TokenValidityUnits.RefreshToken = awstypes.TimeUnitsTypeDays } - output, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.UpdateUserPoolClientWithContext(ctx, params) - }, cognitoidentityprovider.ErrCodeConcurrentModificationException) + const ( + timeout = 2 * time.Minute + ) + output, err := tfresource.RetryWhenIsA[*awstypes.ConcurrentModificationException](ctx, timeout, func() (interface{}, error) { + return conn.UpdateUserPoolClient(ctx, params) + }) if err != nil { response.Diagnostics.AddError( fmt.Sprintf("updating Cognito User Pool Client (%s)", plan.ID.ValueString()), @@ -495,34 +500,34 @@ func (r *userPoolClientResource) Update(ctx context.Context, request resource.Up poolClient := output.(*cognitoidentityprovider.UpdateUserPoolClientOutput).UserPoolClient - config.AccessTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) - config.AllowedOauthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthFlows) - config.AllowedOauthFlowsUserPoolClient = flex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) - config.AllowedOauthScopes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.AllowedOAuthScopes) + config.AccessTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.AccessTokenValidity) + config.AllowedOauthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthFlows) + config.AllowedOauthFlowsUserPoolClient = fwflex.BoolToFramework(ctx, poolClient.AllowedOAuthFlowsUserPoolClient) + config.AllowedOauthScopes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.AllowedOAuthScopes) config.AnalyticsConfiguration = flattenAnaylticsConfiguration(ctx, poolClient.AnalyticsConfiguration) - config.AuthSessionValidity = flex.Int64ToFramework(ctx, poolClient.AuthSessionValidity) - config.CallbackUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.CallbackURLs) - config.ClientSecret = flex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) - config.DefaultRedirectUri = flex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) - config.EnablePropagateAdditionalUserContextData = flex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) - config.EnableTokenRevocation = flex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) - config.ExplicitAuthFlows = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ExplicitAuthFlows) - config.ID = flex.StringToFramework(ctx, poolClient.ClientId) - config.IdTokenValidity = flex.Int64ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) - config.LogoutUrls = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.LogoutURLs) - config.Name = flex.StringToFramework(ctx, poolClient.ClientName) - config.PreventUserExistenceErrors = flex.StringToFrameworkLegacy(ctx, poolClient.PreventUserExistenceErrors) - config.ReadAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.ReadAttributes) - config.RefreshTokenValidity = flex.Int64ToFramework(ctx, poolClient.RefreshTokenValidity) - config.SupportedIdentityProviders = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.SupportedIdentityProviders) + config.AuthSessionValidity = fwflex.Int32ToFramework(ctx, poolClient.AuthSessionValidity) + config.CallbackUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.CallbackURLs) + config.ClientSecret = fwflex.StringToFrameworkLegacy(ctx, poolClient.ClientSecret) + config.DefaultRedirectUri = fwflex.StringToFrameworkLegacy(ctx, poolClient.DefaultRedirectURI) + config.EnablePropagateAdditionalUserContextData = fwflex.BoolToFramework(ctx, poolClient.EnablePropagateAdditionalUserContextData) + config.EnableTokenRevocation = fwflex.BoolToFramework(ctx, poolClient.EnableTokenRevocation) + config.ExplicitAuthFlows = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ExplicitAuthFlows) + config.ID = fwflex.StringToFramework(ctx, poolClient.ClientId) + config.IdTokenValidity = fwflex.Int32ToFrameworkLegacy(ctx, poolClient.IdTokenValidity) + config.LogoutUrls = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.LogoutURLs) + config.Name = fwflex.StringToFramework(ctx, poolClient.ClientName) + config.PreventUserExistenceErrors = fwtypes.StringEnumValue(poolClient.PreventUserExistenceErrors) + config.ReadAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.ReadAttributes) + config.RefreshTokenValidity = fwflex.Int32ValueToFramework(ctx, poolClient.RefreshTokenValidity) + config.SupportedIdentityProviders = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.SupportedIdentityProviders) if !state.TokenValidityUnits.IsNull() && plan.TokenValidityUnits.IsNull() && isDefaultTokenValidityUnits(poolClient.TokenValidityUnits) { elemType := fwtypes.NewObjectTypeOf[tokenValidityUnits](ctx).ObjectType config.TokenValidityUnits = types.ListNull(elemType) } else { config.TokenValidityUnits = flattenTokenValidityUnits(ctx, poolClient.TokenValidityUnits) } - config.UserPoolID = flex.StringToFramework(ctx, poolClient.UserPoolId) - config.WriteAttributes = flex.FlattenFrameworkStringSetLegacy(ctx, poolClient.WriteAttributes) + config.UserPoolID = fwflex.StringToFramework(ctx, poolClient.UserPoolId) + config.WriteAttributes = fwflex.FlattenFrameworkStringValueSetLegacy(ctx, poolClient.WriteAttributes) if response.Diagnostics.HasError() { return @@ -545,10 +550,10 @@ func (r *userPoolClientResource) Delete(ctx context.Context, request resource.De names.AttrUserPoolID: state.UserPoolID.ValueString(), }) - conn := r.Meta().CognitoIDPConn(ctx) + conn := r.Meta().CognitoIDPClient(ctx) - _, err := conn.DeleteUserPoolClientWithContext(ctx, params) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + _, err := conn.DeleteUserPoolClient(ctx, params) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return } @@ -602,91 +607,117 @@ func (r *userPoolClientResource) ConfigValidators(ctx context.Context) []resourc } } +func findUserPoolClientByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID, clientID string) (*awstypes.UserPoolClientType, error) { + input := &cognitoidentityprovider.DescribeUserPoolClientInput{ + ClientId: aws.String(clientID), + UserPoolId: aws.String(userPoolID), + } + + output, err := conn.DescribeUserPoolClient(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.UserPoolClient == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.UserPoolClient, nil +} + type resourceUserPoolClientData struct { - AccessTokenValidity types.Int64 `tfsdk:"access_token_validity"` - AllowedOauthFlows types.Set `tfsdk:"allowed_oauth_flows"` - AllowedOauthFlowsUserPoolClient types.Bool `tfsdk:"allowed_oauth_flows_user_pool_client"` - AllowedOauthScopes types.Set `tfsdk:"allowed_oauth_scopes"` - AnalyticsConfiguration types.List `tfsdk:"analytics_configuration"` - AuthSessionValidity types.Int64 `tfsdk:"auth_session_validity"` - CallbackUrls types.Set `tfsdk:"callback_urls"` - ClientSecret types.String `tfsdk:"client_secret"` - DefaultRedirectUri types.String `tfsdk:"default_redirect_uri"` - EnablePropagateAdditionalUserContextData types.Bool `tfsdk:"enable_propagate_additional_user_context_data"` - EnableTokenRevocation types.Bool `tfsdk:"enable_token_revocation"` - ExplicitAuthFlows types.Set `tfsdk:"explicit_auth_flows"` - GenerateSecret types.Bool `tfsdk:"generate_secret"` - ID types.String `tfsdk:"id"` - IdTokenValidity types.Int64 `tfsdk:"id_token_validity"` - LogoutUrls types.Set `tfsdk:"logout_urls"` - Name types.String `tfsdk:"name"` - PreventUserExistenceErrors types.String `tfsdk:"prevent_user_existence_errors"` - ReadAttributes types.Set `tfsdk:"read_attributes"` - RefreshTokenValidity types.Int64 `tfsdk:"refresh_token_validity"` - SupportedIdentityProviders types.Set `tfsdk:"supported_identity_providers"` - TokenValidityUnits types.List `tfsdk:"token_validity_units"` - UserPoolID types.String `tfsdk:"user_pool_id"` - WriteAttributes types.Set `tfsdk:"write_attributes"` + AccessTokenValidity types.Int64 `tfsdk:"access_token_validity"` + AllowedOauthFlows types.Set `tfsdk:"allowed_oauth_flows"` + AllowedOauthFlowsUserPoolClient types.Bool `tfsdk:"allowed_oauth_flows_user_pool_client"` + AllowedOauthScopes types.Set `tfsdk:"allowed_oauth_scopes"` + AnalyticsConfiguration types.List `tfsdk:"analytics_configuration"` + AuthSessionValidity types.Int64 `tfsdk:"auth_session_validity"` + CallbackUrls types.Set `tfsdk:"callback_urls"` + ClientSecret types.String `tfsdk:"client_secret"` + DefaultRedirectUri types.String `tfsdk:"default_redirect_uri"` + EnablePropagateAdditionalUserContextData types.Bool `tfsdk:"enable_propagate_additional_user_context_data"` + EnableTokenRevocation types.Bool `tfsdk:"enable_token_revocation"` + ExplicitAuthFlows types.Set `tfsdk:"explicit_auth_flows"` + GenerateSecret types.Bool `tfsdk:"generate_secret"` + ID types.String `tfsdk:"id"` + IdTokenValidity types.Int64 `tfsdk:"id_token_validity"` + LogoutUrls types.Set `tfsdk:"logout_urls"` + Name types.String `tfsdk:"name"` + PreventUserExistenceErrors fwtypes.StringEnum[awstypes.PreventUserExistenceErrorTypes] `tfsdk:"prevent_user_existence_errors"` + ReadAttributes types.Set `tfsdk:"read_attributes"` + RefreshTokenValidity types.Int64 `tfsdk:"refresh_token_validity"` + SupportedIdentityProviders types.Set `tfsdk:"supported_identity_providers"` + TokenValidityUnits types.List `tfsdk:"token_validity_units"` + UserPoolID types.String `tfsdk:"user_pool_id"` + WriteAttributes types.Set `tfsdk:"write_attributes"` } func (data resourceUserPoolClientData) createInput(ctx context.Context, diags *diag.Diagnostics) *cognitoidentityprovider.CreateUserPoolClientInput { return &cognitoidentityprovider.CreateUserPoolClientInput{ - AccessTokenValidity: flex.Int64FromFrameworkLegacy(ctx, data.AccessTokenValidity), - AllowedOAuthFlows: flex.ExpandFrameworkStringSet(ctx, data.AllowedOauthFlows), - AllowedOAuthFlowsUserPoolClient: flex.BoolFromFramework(ctx, data.AllowedOauthFlowsUserPoolClient), - AllowedOAuthScopes: flex.ExpandFrameworkStringSet(ctx, data.AllowedOauthScopes), + AccessTokenValidity: fwflex.Int32FromFrameworkLegacy(ctx, data.AccessTokenValidity), + AllowedOAuthFlows: fwflex.ExpandFrameworkStringyValueSet[awstypes.OAuthFlowType](ctx, data.AllowedOauthFlows), + AllowedOAuthFlowsUserPoolClient: fwflex.BoolValueFromFramework(ctx, data.AllowedOauthFlowsUserPoolClient), + AllowedOAuthScopes: fwflex.ExpandFrameworkStringValueSet(ctx, data.AllowedOauthScopes), AnalyticsConfiguration: expandAnaylticsConfiguration(ctx, data.AnalyticsConfiguration, diags), - AuthSessionValidity: flex.Int64FromFramework(ctx, data.AuthSessionValidity), - CallbackURLs: flex.ExpandFrameworkStringSet(ctx, data.CallbackUrls), - ClientName: flex.StringFromFramework(ctx, data.Name), - DefaultRedirectURI: flex.StringFromFrameworkLegacy(ctx, data.DefaultRedirectUri), - EnablePropagateAdditionalUserContextData: flex.BoolFromFramework(ctx, data.EnablePropagateAdditionalUserContextData), - EnableTokenRevocation: flex.BoolFromFramework(ctx, data.EnableTokenRevocation), - ExplicitAuthFlows: flex.ExpandFrameworkStringSet(ctx, data.ExplicitAuthFlows), - GenerateSecret: flex.BoolFromFramework(ctx, data.GenerateSecret), - IdTokenValidity: flex.Int64FromFrameworkLegacy(ctx, data.IdTokenValidity), - LogoutURLs: flex.ExpandFrameworkStringSet(ctx, data.LogoutUrls), - PreventUserExistenceErrors: flex.StringFromFrameworkLegacy(ctx, data.PreventUserExistenceErrors), - ReadAttributes: flex.ExpandFrameworkStringSet(ctx, data.ReadAttributes), - RefreshTokenValidity: flex.Int64FromFramework(ctx, data.RefreshTokenValidity), - SupportedIdentityProviders: flex.ExpandFrameworkStringSet(ctx, data.SupportedIdentityProviders), + AuthSessionValidity: fwflex.Int32FromFramework(ctx, data.AuthSessionValidity), + CallbackURLs: fwflex.ExpandFrameworkStringValueSet(ctx, data.CallbackUrls), + ClientName: fwflex.StringFromFramework(ctx, data.Name), + DefaultRedirectURI: fwflex.StringFromFrameworkLegacy(ctx, data.DefaultRedirectUri), + EnablePropagateAdditionalUserContextData: fwflex.BoolFromFramework(ctx, data.EnablePropagateAdditionalUserContextData), + EnableTokenRevocation: fwflex.BoolFromFramework(ctx, data.EnableTokenRevocation), + ExplicitAuthFlows: fwflex.ExpandFrameworkStringyValueSet[awstypes.ExplicitAuthFlowsType](ctx, data.ExplicitAuthFlows), + GenerateSecret: fwflex.BoolValueFromFramework(ctx, data.GenerateSecret), + IdTokenValidity: fwflex.Int32FromFrameworkLegacy(ctx, data.IdTokenValidity), + LogoutURLs: fwflex.ExpandFrameworkStringValueSet(ctx, data.LogoutUrls), + PreventUserExistenceErrors: data.PreventUserExistenceErrors.ValueEnum(), + ReadAttributes: fwflex.ExpandFrameworkStringValueSet(ctx, data.ReadAttributes), + RefreshTokenValidity: fwflex.Int32ValueFromFramework(ctx, data.RefreshTokenValidity), + SupportedIdentityProviders: fwflex.ExpandFrameworkStringValueSet(ctx, data.SupportedIdentityProviders), TokenValidityUnits: expandTokenValidityUnits(ctx, data.TokenValidityUnits, diags), - UserPoolId: flex.StringFromFramework(ctx, data.UserPoolID), - WriteAttributes: flex.ExpandFrameworkStringSet(ctx, data.WriteAttributes), + UserPoolId: fwflex.StringFromFramework(ctx, data.UserPoolID), + WriteAttributes: fwflex.ExpandFrameworkStringValueSet(ctx, data.WriteAttributes), } } func (data resourceUserPoolClientData) updateInput(ctx context.Context, diags *diag.Diagnostics) *cognitoidentityprovider.UpdateUserPoolClientInput { return &cognitoidentityprovider.UpdateUserPoolClientInput{ - AccessTokenValidity: flex.Int64FromFrameworkLegacy(ctx, data.AccessTokenValidity), - AllowedOAuthFlows: flex.ExpandFrameworkStringSet(ctx, data.AllowedOauthFlows), - AllowedOAuthFlowsUserPoolClient: flex.BoolFromFramework(ctx, data.AllowedOauthFlowsUserPoolClient), - AllowedOAuthScopes: flex.ExpandFrameworkStringSet(ctx, data.AllowedOauthScopes), + AccessTokenValidity: fwflex.Int32FromFrameworkLegacy(ctx, data.AccessTokenValidity), + AllowedOAuthFlows: fwflex.ExpandFrameworkStringyValueSet[awstypes.OAuthFlowType](ctx, data.AllowedOauthFlows), + AllowedOAuthFlowsUserPoolClient: fwflex.BoolValueFromFramework(ctx, data.AllowedOauthFlowsUserPoolClient), + AllowedOAuthScopes: fwflex.ExpandFrameworkStringValueSet(ctx, data.AllowedOauthScopes), AnalyticsConfiguration: expandAnaylticsConfiguration(ctx, data.AnalyticsConfiguration, diags), - AuthSessionValidity: flex.Int64FromFramework(ctx, data.AuthSessionValidity), - CallbackURLs: flex.ExpandFrameworkStringSet(ctx, data.CallbackUrls), - ClientId: flex.StringFromFramework(ctx, data.ID), - ClientName: flex.StringFromFramework(ctx, data.Name), - DefaultRedirectURI: flex.StringFromFrameworkLegacy(ctx, data.DefaultRedirectUri), - EnablePropagateAdditionalUserContextData: flex.BoolFromFramework(ctx, data.EnablePropagateAdditionalUserContextData), - EnableTokenRevocation: flex.BoolFromFramework(ctx, data.EnableTokenRevocation), - ExplicitAuthFlows: flex.ExpandFrameworkStringSet(ctx, data.ExplicitAuthFlows), - IdTokenValidity: flex.Int64FromFrameworkLegacy(ctx, data.IdTokenValidity), - LogoutURLs: flex.ExpandFrameworkStringSet(ctx, data.LogoutUrls), - PreventUserExistenceErrors: flex.StringFromFrameworkLegacy(ctx, data.PreventUserExistenceErrors), - ReadAttributes: flex.ExpandFrameworkStringSet(ctx, data.ReadAttributes), - RefreshTokenValidity: flex.Int64FromFramework(ctx, data.RefreshTokenValidity), - SupportedIdentityProviders: flex.ExpandFrameworkStringSet(ctx, data.SupportedIdentityProviders), + AuthSessionValidity: fwflex.Int32FromFramework(ctx, data.AuthSessionValidity), + CallbackURLs: fwflex.ExpandFrameworkStringValueSet(ctx, data.CallbackUrls), + ClientId: fwflex.StringFromFramework(ctx, data.ID), + ClientName: fwflex.StringFromFramework(ctx, data.Name), + DefaultRedirectURI: fwflex.StringFromFrameworkLegacy(ctx, data.DefaultRedirectUri), + EnablePropagateAdditionalUserContextData: fwflex.BoolFromFramework(ctx, data.EnablePropagateAdditionalUserContextData), + EnableTokenRevocation: fwflex.BoolFromFramework(ctx, data.EnableTokenRevocation), + ExplicitAuthFlows: fwflex.ExpandFrameworkStringyValueSet[awstypes.ExplicitAuthFlowsType](ctx, data.ExplicitAuthFlows), + IdTokenValidity: fwflex.Int32FromFrameworkLegacy(ctx, data.IdTokenValidity), + LogoutURLs: fwflex.ExpandFrameworkStringValueSet(ctx, data.LogoutUrls), + PreventUserExistenceErrors: data.PreventUserExistenceErrors.ValueEnum(), + ReadAttributes: fwflex.ExpandFrameworkStringValueSet(ctx, data.ReadAttributes), + RefreshTokenValidity: fwflex.Int32ValueFromFramework(ctx, data.RefreshTokenValidity), + SupportedIdentityProviders: fwflex.ExpandFrameworkStringValueSet(ctx, data.SupportedIdentityProviders), TokenValidityUnits: expandTokenValidityUnits(ctx, data.TokenValidityUnits, diags), - UserPoolId: flex.StringFromFramework(ctx, data.UserPoolID), - WriteAttributes: flex.ExpandFrameworkStringSet(ctx, data.WriteAttributes), + UserPoolId: fwflex.StringFromFramework(ctx, data.UserPoolID), + WriteAttributes: fwflex.ExpandFrameworkStringValueSet(ctx, data.WriteAttributes), } } func (data resourceUserPoolClientData) deleteInput(ctx context.Context) *cognitoidentityprovider.DeleteUserPoolClientInput { return &cognitoidentityprovider.DeleteUserPoolClientInput{ - ClientId: flex.StringFromFramework(ctx, data.ID), - UserPoolId: flex.StringFromFramework(ctx, data.UserPoolID), + ClientId: fwflex.StringFromFramework(ctx, data.ID), + UserPoolId: fwflex.StringFromFramework(ctx, data.UserPoolID), } } @@ -698,22 +729,22 @@ type analyticsConfiguration struct { UserDataShared types.Bool `tfsdk:"user_data_shared"` } -func (ac *analyticsConfiguration) expand(ctx context.Context) *cognitoidentityprovider.AnalyticsConfigurationType { +func (ac *analyticsConfiguration) expand(ctx context.Context) *awstypes.AnalyticsConfigurationType { if ac == nil { return nil } - result := &cognitoidentityprovider.AnalyticsConfigurationType{ - ApplicationArn: flex.StringFromFramework(ctx, ac.ApplicationARN), - ApplicationId: flex.StringFromFramework(ctx, ac.ApplicationID), - ExternalId: flex.StringFromFramework(ctx, ac.ExternalID), - RoleArn: flex.StringFromFramework(ctx, ac.RoleARN), - UserDataShared: flex.BoolFromFramework(ctx, ac.UserDataShared), + result := &awstypes.AnalyticsConfigurationType{ + ApplicationArn: fwflex.StringFromFramework(ctx, ac.ApplicationARN), + ApplicationId: fwflex.StringFromFramework(ctx, ac.ApplicationID), + ExternalId: fwflex.StringFromFramework(ctx, ac.ExternalID), + RoleArn: fwflex.StringFromFramework(ctx, ac.RoleARN), + UserDataShared: fwflex.BoolValueFromFramework(ctx, ac.UserDataShared), } return result } -func expandAnaylticsConfiguration(ctx context.Context, list types.List, diags *diag.Diagnostics) *cognitoidentityprovider.AnalyticsConfigurationType { +func expandAnaylticsConfiguration(ctx context.Context, list types.List, diags *diag.Diagnostics) *awstypes.AnalyticsConfigurationType { var analytics []analyticsConfiguration diags.Append(list.ElementsAs(ctx, &analytics, false)...) if diags.HasError() { @@ -726,7 +757,7 @@ func expandAnaylticsConfiguration(ctx context.Context, list types.List, diags *d return nil } -func flattenAnaylticsConfiguration(ctx context.Context, ac *cognitoidentityprovider.AnalyticsConfigurationType) types.List { +func flattenAnaylticsConfiguration(ctx context.Context, ac *awstypes.AnalyticsConfigurationType) types.List { attributeTypes := fwtypes.AttributeTypesMust[analyticsConfiguration](ctx) elemType := types.ObjectType{AttrTypes: attributeTypes} @@ -735,11 +766,11 @@ func flattenAnaylticsConfiguration(ctx context.Context, ac *cognitoidentityprovi } attrs := map[string]attr.Value{} - attrs["application_arn"] = flex.StringToFrameworkARN(ctx, ac.ApplicationArn) - attrs[names.AttrApplicationID] = flex.StringToFramework(ctx, ac.ApplicationId) - attrs[names.AttrExternalID] = flex.StringToFramework(ctx, ac.ExternalId) - attrs[names.AttrRoleARN] = flex.StringToFrameworkARN(ctx, ac.RoleArn) - attrs["user_data_shared"] = flex.BoolToFramework(ctx, ac.UserDataShared) + attrs["application_arn"] = fwflex.StringToFrameworkARN(ctx, ac.ApplicationArn) + attrs[names.AttrApplicationID] = fwflex.StringToFramework(ctx, ac.ApplicationId) + attrs[names.AttrExternalID] = fwflex.StringToFramework(ctx, ac.ExternalId) + attrs[names.AttrRoleARN] = fwflex.StringToFrameworkARN(ctx, ac.RoleArn) + attrs["user_data_shared"] = types.BoolValue(ac.UserDataShared) val := types.ObjectValueMust(attributeTypes, attrs) @@ -747,28 +778,28 @@ func flattenAnaylticsConfiguration(ctx context.Context, ac *cognitoidentityprovi } type tokenValidityUnits struct { - AccessToken types.String `tfsdk:"access_token"` - IdToken types.String `tfsdk:"id_token"` - RefreshToken types.String `tfsdk:"refresh_token"` + AccessToken fwtypes.StringEnum[awstypes.TimeUnitsType] `tfsdk:"access_token"` + IdToken fwtypes.StringEnum[awstypes.TimeUnitsType] `tfsdk:"id_token"` + RefreshToken fwtypes.StringEnum[awstypes.TimeUnitsType] `tfsdk:"refresh_token"` } -func isDefaultTokenValidityUnits(tvu *cognitoidentityprovider.TokenValidityUnitsType) bool { +func isDefaultTokenValidityUnits(tvu *awstypes.TokenValidityUnitsType) bool { if tvu == nil { return false } - return aws.StringValue(tvu.AccessToken) == cognitoidentityprovider.TimeUnitsTypeHours && - aws.StringValue(tvu.IdToken) == cognitoidentityprovider.TimeUnitsTypeHours && - aws.StringValue(tvu.RefreshToken) == cognitoidentityprovider.TimeUnitsTypeDays + return tvu.AccessToken == awstypes.TimeUnitsTypeHours && + tvu.IdToken == awstypes.TimeUnitsTypeHours && + tvu.RefreshToken == awstypes.TimeUnitsTypeDays } -func (tvu *tokenValidityUnits) expand(ctx context.Context) *cognitoidentityprovider.TokenValidityUnitsType { +func (tvu *tokenValidityUnits) expand(context.Context) *awstypes.TokenValidityUnitsType { if tvu == nil { return nil } - return &cognitoidentityprovider.TokenValidityUnitsType{ - AccessToken: flex.StringFromFramework(ctx, tvu.AccessToken), - IdToken: flex.StringFromFramework(ctx, tvu.IdToken), - RefreshToken: flex.StringFromFramework(ctx, tvu.RefreshToken), + return &awstypes.TokenValidityUnitsType{ + AccessToken: tvu.AccessToken.ValueEnum(), + IdToken: tvu.IdToken.ValueEnum(), + RefreshToken: tvu.RefreshToken.ValueEnum(), } } @@ -785,25 +816,25 @@ func resolveTokenValidityUnits(ctx context.Context, list types.List, diags *diag return nil } -func expandTokenValidityUnits(ctx context.Context, list types.List, diags *diag.Diagnostics) *cognitoidentityprovider.TokenValidityUnitsType { +func expandTokenValidityUnits(ctx context.Context, list types.List, diags *diag.Diagnostics) *awstypes.TokenValidityUnitsType { if tvu := resolveTokenValidityUnits(ctx, list, diags); tvu != nil { return tvu.expand(ctx) } - return &cognitoidentityprovider.TokenValidityUnitsType{} + return &awstypes.TokenValidityUnitsType{} } -func flattenTokenValidityUnits(ctx context.Context, tvu *cognitoidentityprovider.TokenValidityUnitsType) types.List { +func flattenTokenValidityUnits(ctx context.Context, tvu *awstypes.TokenValidityUnitsType) types.List { attributeTypes := fwtypes.AttributeTypesMust[tokenValidityUnits](ctx) elemType := types.ObjectType{AttrTypes: attributeTypes} - if tvu == nil || (tvu.AccessToken == nil && tvu.IdToken == nil && tvu.RefreshToken == nil) { + if tvu == nil || (tvu.AccessToken == "" && tvu.IdToken == "" && tvu.RefreshToken == "") { return types.ListNull(elemType) } attrs := map[string]attr.Value{} - attrs["access_token"] = flex.StringToFramework(ctx, tvu.AccessToken) - attrs["id_token"] = flex.StringToFramework(ctx, tvu.IdToken) - attrs["refresh_token"] = flex.StringToFramework(ctx, tvu.RefreshToken) + attrs["access_token"] = fwtypes.StringEnumValue(tvu.AccessToken) + attrs["id_token"] = fwtypes.StringEnumValue(tvu.IdToken) + attrs["refresh_token"] = fwtypes.StringEnumValue(tvu.RefreshToken) val := types.ObjectValueMust(attributeTypes, attrs) @@ -821,8 +852,8 @@ func (v resourceUserPoolClientAccessTokenValidityValidator) ValidateResource(ctx func(rupcd resourceUserPoolClientData) types.Int64 { return rupcd.AccessTokenValidity }, - func(tvu *tokenValidityUnits) types.String { - return tvu.AccessToken + func(tvu *tokenValidityUnits) awstypes.TimeUnitsType { + return tvu.AccessToken.ValueEnum() }, ) } @@ -838,8 +869,8 @@ func (v resourceUserPoolClientIDTokenValidityValidator) ValidateResource(ctx con func(rupcd resourceUserPoolClientData) types.Int64 { return rupcd.IdTokenValidity }, - func(tvu *tokenValidityUnits) types.String { - return tvu.IdToken + func(tvu *tokenValidityUnits) awstypes.TimeUnitsType { + return tvu.IdToken.ValueEnum() }, ) } @@ -855,8 +886,8 @@ func (v resourceUserPoolClientRefreshTokenValidityValidator) ValidateResource(ct func(rupcd resourceUserPoolClientData) types.Int64 { return rupcd.RefreshTokenValidity }, - func(tvu *tokenValidityUnits) types.String { - return tvu.RefreshToken + func(tvu *tokenValidityUnits) awstypes.TimeUnitsType { + return tvu.RefreshToken.ValueEnum() }, ) } @@ -876,7 +907,7 @@ func (v resourceUserPoolClientValidityValidator) MarkdownDescription(_ context.C return fmt.Sprintf("must have a duration between %s and %s", v.min, v.max) } -func (v resourceUserPoolClientValidityValidator) validate(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse, valF func(resourceUserPoolClientData) types.Int64, unitF func(*tokenValidityUnits) types.String) { +func (v resourceUserPoolClientValidityValidator) validate(ctx context.Context, req resource.ValidateConfigRequest, resp *resource.ValidateConfigResponse, valF func(resourceUserPoolClientData) types.Int64, unitF func(*tokenValidityUnits) awstypes.TimeUnitsType) { var config resourceUserPoolClientData resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) if resp.Diagnostics.HasError() { @@ -889,25 +920,23 @@ func (v resourceUserPoolClientValidityValidator) validate(ctx context.Context, r return } - val := aws.Int64Value(flex.Int64FromFramework(ctx, x)) - var duration time.Duration units := resolveTokenValidityUnits(ctx, config.TokenValidityUnits, &resp.Diagnostics) if resp.Diagnostics.HasError() { return } - if units == nil { + if val := aws.ToInt64(fwflex.Int64FromFramework(ctx, x)); units == nil { duration = time.Duration(val * int64(v.defaultUnit)) } else { - switch aws.StringValue(flex.StringFromFramework(ctx, unitF(units))) { - case cognitoidentityprovider.TimeUnitsTypeSeconds: + switch unitF(units) { + case awstypes.TimeUnitsTypeSeconds: duration = time.Duration(val * int64(time.Second)) - case cognitoidentityprovider.TimeUnitsTypeMinutes: + case awstypes.TimeUnitsTypeMinutes: duration = time.Duration(val * int64(time.Minute)) - case cognitoidentityprovider.TimeUnitsTypeHours: + case awstypes.TimeUnitsTypeHours: duration = time.Duration(val * int64(time.Hour)) - case cognitoidentityprovider.TimeUnitsTypeDays: + case awstypes.TimeUnitsTypeDays: duration = time.Duration(val * 24 * int64(time.Hour)) } } diff --git a/internal/service/cognitoidp/user_pool_client_data_source.go b/internal/service/cognitoidp/user_pool_client_data_source.go index 1d257a5c713..9d538f4eb9b 100644 --- a/internal/service/cognitoidp/user_pool_client_data_source.go +++ b/internal/service/cognitoidp/user_pool_client_data_source.go @@ -6,13 +6,12 @@ package cognitoidp import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -185,99 +184,89 @@ func dataSourceUserPoolClient() *schema.Resource { func dataSourceUserPoolClientRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - clientId := d.Get(names.AttrClientID).(string) - d.SetId(clientId) - - userPoolClient, err := FindCognitoUserPoolClientByID(ctx, conn, d.Get(names.AttrUserPoolID).(string), d.Id()) + clientID := d.Get(names.AttrClientID).(string) + userPoolClient, err := findUserPoolClientByTwoPartKey(ctx, conn, d.Get(names.AttrUserPoolID).(string), clientID) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Cognito User Pool Client (%s): %s", clientId, err) + return sdkdiag.AppendErrorf(diags, "reading Cognito User Pool Client (%s): %s", clientID, err) } - d.Set(names.AttrUserPoolID, userPoolClient.UserPoolId) - d.Set(names.AttrName, userPoolClient.ClientName) - d.Set("explicit_auth_flows", flex.FlattenStringSet(userPoolClient.ExplicitAuthFlows)) - d.Set("read_attributes", flex.FlattenStringSet(userPoolClient.ReadAttributes)) - d.Set("write_attributes", flex.FlattenStringSet(userPoolClient.WriteAttributes)) - d.Set("refresh_token_validity", userPoolClient.RefreshTokenValidity) + d.SetId(clientID) d.Set("access_token_validity", userPoolClient.AccessTokenValidity) - d.Set("id_token_validity", userPoolClient.IdTokenValidity) - d.Set(names.AttrClientSecret, userPoolClient.ClientSecret) - d.Set("allowed_oauth_flows", flex.FlattenStringSet(userPoolClient.AllowedOAuthFlows)) + d.Set("allowed_oauth_flows", userPoolClient.AllowedOAuthFlows) d.Set("allowed_oauth_flows_user_pool_client", userPoolClient.AllowedOAuthFlowsUserPoolClient) - d.Set("allowed_oauth_scopes", flex.FlattenStringSet(userPoolClient.AllowedOAuthScopes)) - d.Set("callback_urls", flex.FlattenStringSet(userPoolClient.CallbackURLs)) - d.Set("default_redirect_uri", userPoolClient.DefaultRedirectURI) - d.Set("logout_urls", flex.FlattenStringSet(userPoolClient.LogoutURLs)) - d.Set("prevent_user_existence_errors", userPoolClient.PreventUserExistenceErrors) - d.Set("supported_identity_providers", flex.FlattenStringSet(userPoolClient.SupportedIdentityProviders)) - d.Set("enable_token_revocation", userPoolClient.EnableTokenRevocation) - d.Set("enable_propagate_additional_user_context_data", userPoolClient.EnablePropagateAdditionalUserContextData) - + d.Set("allowed_oauth_scopes", userPoolClient.AllowedOAuthScopes) if err := d.Set("analytics_configuration", flattenUserPoolClientAnalyticsConfig(userPoolClient.AnalyticsConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting analytics_configuration: %s", err) } - + d.Set("callback_urls", userPoolClient.CallbackURLs) + d.Set(names.AttrClientSecret, userPoolClient.ClientSecret) + d.Set("default_redirect_uri", userPoolClient.DefaultRedirectURI) + d.Set("enable_propagate_additional_user_context_data", userPoolClient.EnablePropagateAdditionalUserContextData) + d.Set("enable_token_revocation", userPoolClient.EnableTokenRevocation) + d.Set("explicit_auth_flows", userPoolClient.ExplicitAuthFlows) + d.Set("id_token_validity", userPoolClient.IdTokenValidity) + d.Set("logout_urls", userPoolClient.LogoutURLs) + d.Set(names.AttrName, userPoolClient.ClientName) + d.Set("prevent_user_existence_errors", userPoolClient.PreventUserExistenceErrors) + d.Set("read_attributes", userPoolClient.ReadAttributes) + d.Set("refresh_token_validity", userPoolClient.RefreshTokenValidity) + d.Set("supported_identity_providers", userPoolClient.SupportedIdentityProviders) if err := d.Set("token_validity_units", flattenUserPoolClientTokenValidityUnitsType(userPoolClient.TokenValidityUnits)); err != nil { return sdkdiag.AppendErrorf(diags, "setting token_validity_units: %s", err) } + d.Set(names.AttrUserPoolID, userPoolClient.UserPoolId) + d.Set("write_attributes", userPoolClient.WriteAttributes) + return diags } -func flattenUserPoolClientAnalyticsConfig(analyticsConfig *cognitoidentityprovider.AnalyticsConfigurationType) []interface{} { - if analyticsConfig == nil { +func flattenUserPoolClientAnalyticsConfig(apiObject *awstypes.AnalyticsConfigurationType) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "user_data_shared": aws.BoolValue(analyticsConfig.UserDataShared), + tfMap := map[string]interface{}{ + "user_data_shared": apiObject.UserDataShared, } - if analyticsConfig.ExternalId != nil { - m[names.AttrExternalID] = aws.StringValue(analyticsConfig.ExternalId) + if apiObject.ApplicationArn != nil { + tfMap["application_arn"] = aws.ToString(apiObject.ApplicationArn) } - if analyticsConfig.RoleArn != nil { - m[names.AttrRoleARN] = aws.StringValue(analyticsConfig.RoleArn) + if apiObject.ApplicationId != nil { + tfMap[names.AttrApplicationID] = aws.ToString(apiObject.ApplicationId) } - if analyticsConfig.ApplicationId != nil { - m[names.AttrApplicationID] = aws.StringValue(analyticsConfig.ApplicationId) + if apiObject.ExternalId != nil { + tfMap[names.AttrExternalID] = aws.ToString(apiObject.ExternalId) } - if analyticsConfig.ApplicationArn != nil { - m["application_arn"] = aws.StringValue(analyticsConfig.ApplicationArn) + if apiObject.RoleArn != nil { + tfMap[names.AttrRoleARN] = aws.ToString(apiObject.RoleArn) } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenUserPoolClientTokenValidityUnitsType(tokenValidityConfig *cognitoidentityprovider.TokenValidityUnitsType) []interface{} { - if tokenValidityConfig == nil { +func flattenUserPoolClientTokenValidityUnitsType(apiObject *awstypes.TokenValidityUnitsType) []interface{} { + if apiObject == nil { return nil } //tokenValidityConfig is never nil and if everything is empty it causes diffs - if tokenValidityConfig.IdToken == nil && tokenValidityConfig.AccessToken == nil && tokenValidityConfig.RefreshToken == nil { + if apiObject.IdToken == "" && apiObject.AccessToken == "" && apiObject.RefreshToken == "" { return nil } - m := map[string]interface{}{} - - if tokenValidityConfig.IdToken != nil { - m["id_token"] = aws.StringValue(tokenValidityConfig.IdToken) - } - - if tokenValidityConfig.AccessToken != nil { - m["access_token"] = aws.StringValue(tokenValidityConfig.AccessToken) - } - - if tokenValidityConfig.RefreshToken != nil { - m["refresh_token"] = aws.StringValue(tokenValidityConfig.RefreshToken) + tfMap := map[string]interface{}{ + "access_token": apiObject.AccessToken, + "id_token": apiObject.IdToken, + "refresh_token": apiObject.RefreshToken, } - return []interface{}{m} + return []interface{}{tfMap} } diff --git a/internal/service/cognitoidp/user_pool_client_data_source_test.go b/internal/service/cognitoidp/user_pool_client_data_source_test.go index 83fc61bd009..e3c8b80694b 100644 --- a/internal/service/cognitoidp/user_pool_client_data_source_test.go +++ b/internal/service/cognitoidp/user_pool_client_data_source_test.go @@ -6,7 +6,7 @@ package cognitoidp_test import ( "testing" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -15,7 +15,7 @@ import ( func TestAccCognitoIDPUserPoolClientDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "data.aws_cognito_user_pool_client.test" @@ -23,7 +23,6 @@ func TestAccCognitoIDPUserPoolClientDataSource_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckUserPoolClientDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccUserPoolClientDataSourceConfig_basic(rName), @@ -41,10 +40,10 @@ func TestAccCognitoIDPUserPoolClientDataSource_basic(t *testing.T) { } func testAccUserPoolClientDataSourceConfig_basic(rName string) string { - return testAccUserPoolClientConfig_basic(rName) + ` + return acctest.ConfigCompose(testAccUserPoolClientConfig_basic(rName), ` data "aws_cognito_user_pool_client" "test" { user_pool_id = aws_cognito_user_pool.test.id client_id = aws_cognito_user_pool_client.test.id } -` +`) } diff --git a/internal/service/cognitoidp/user_pool_client_test.go b/internal/service/cognitoidp/user_pool_client_test.go index 00c89447fe6..5f07f7aa681 100644 --- a/internal/service/cognitoidp/user_pool_client_test.go +++ b/internal/service/cognitoidp/user_pool_client_test.go @@ -5,13 +5,11 @@ package cognitoidp_test import ( "context" - "errors" "fmt" "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/aws/aws-sdk-go/service/pinpoint" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -24,7 +22,7 @@ import ( func TestAccCognitoIDPUserPoolClient_basic(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -76,7 +74,7 @@ func TestAccCognitoIDPUserPoolClient_basic(t *testing.T) { func TestAccCognitoIDPUserPoolClient_enableRevocation(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -121,7 +119,7 @@ func TestAccCognitoIDPUserPoolClient_enableRevocation(t *testing.T) { func TestAccCognitoIDPUserPoolClient_accessTokenValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -176,11 +174,11 @@ func TestAccCognitoIDPUserPoolClient_accessTokenValidity_error(t *testing.T) { ExpectError: regexache.MustCompile(`Attribute access_token_validity must have a duration between 5m0s and\s+24h0m0s, got: 25h0m0s`), }, { - Config: testAccUserPoolClientConfig_accessTokenValidityUnit(rName, 2, cognitoidentityprovider.TimeUnitsTypeDays), + Config: testAccUserPoolClientConfig_accessTokenValidityUnit(rName, 2, string(awstypes.TimeUnitsTypeDays)), ExpectError: regexache.MustCompile(`Attribute access_token_validity must have a duration between 5m0s and\s+24h0m0s, got: 48h0m0s`), }, { - Config: testAccUserPoolClientConfig_accessTokenValidityUnit(rName, 4, cognitoidentityprovider.TimeUnitsTypeMinutes), + Config: testAccUserPoolClientConfig_accessTokenValidityUnit(rName, 4, string(awstypes.TimeUnitsTypeMinutes)), ExpectError: regexache.MustCompile(`Attribute access_token_validity must have a duration between 5m0s and\s+24h0m0s, got: 4m0s`), }, }, @@ -189,7 +187,7 @@ func TestAccCognitoIDPUserPoolClient_accessTokenValidity_error(t *testing.T) { func TestAccCognitoIDPUserPoolClient_idTokenValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -244,11 +242,11 @@ func TestAccCognitoIDPUserPoolClient_idTokenValidity_error(t *testing.T) { ExpectError: regexache.MustCompile(`Attribute id_token_validity must have a duration between 5m0s and\s+24h0m0s,\s+got: 25h0m0s`), }, { - Config: testAccUserPoolClientConfig_idTokenValidityUnit(rName, 2, cognitoidentityprovider.TimeUnitsTypeDays), + Config: testAccUserPoolClientConfig_idTokenValidityUnit(rName, 2, string(awstypes.TimeUnitsTypeDays)), ExpectError: regexache.MustCompile(`Attribute id_token_validity must have a duration between 5m0s and\s+24h0m0s,\s+got: 48h0m0s`), }, { - Config: testAccUserPoolClientConfig_idTokenValidityUnit(rName, 4, cognitoidentityprovider.TimeUnitsTypeMinutes), + Config: testAccUserPoolClientConfig_idTokenValidityUnit(rName, 4, string(awstypes.TimeUnitsTypeMinutes)), ExpectError: regexache.MustCompile(`Attribute id_token_validity must have a duration between 5m0s and\s+24h0m0s,\s+got: 4m0s`), }, }, @@ -257,7 +255,7 @@ func TestAccCognitoIDPUserPoolClient_idTokenValidity_error(t *testing.T) { func TestAccCognitoIDPUserPoolClient_refreshTokenValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -312,7 +310,7 @@ func TestAccCognitoIDPUserPoolClient_refreshTokenValidity_error(t *testing.T) { ExpectError: regexache.MustCompile(`Attribute refresh_token_validity must have a duration between 1h0m0s and\s+87600h0m0s,\s+got: 87624h0m0s`), }, { - Config: testAccUserPoolClientConfig_refreshTokenValidityUnit(rName, 59, cognitoidentityprovider.TimeUnitsTypeMinutes), + Config: testAccUserPoolClientConfig_refreshTokenValidityUnit(rName, 59, string(awstypes.TimeUnitsTypeMinutes)), ExpectError: regexache.MustCompile(`Attribute refresh_token_validity must have a duration between 1h0m0s and\s+87600h0m0s,\s+got: 59m0s`), }, }, @@ -321,7 +319,7 @@ func TestAccCognitoIDPUserPoolClient_refreshTokenValidity_error(t *testing.T) { func TestAccCognitoIDPUserPoolClient_tokenValidityUnits(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -382,7 +380,7 @@ func TestAccCognitoIDPUserPoolClient_tokenValidityUnits(t *testing.T) { func TestAccCognitoIDPUserPoolClient_tokenValidityUnits_explicitDefaults(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -408,7 +406,7 @@ func TestAccCognitoIDPUserPoolClient_tokenValidityUnits_explicitDefaults(t *test func TestAccCognitoIDPUserPoolClient_tokenValidityUnits_AccessToken(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -456,7 +454,7 @@ func TestAccCognitoIDPUserPoolClient_tokenValidityUnits_AccessToken(t *testing.T func TestAccCognitoIDPUserPoolClient_tokenValidityUnitsWTokenValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -506,7 +504,7 @@ func TestAccCognitoIDPUserPoolClient_tokenValidityUnitsWTokenValidity(t *testing func TestAccCognitoIDPUserPoolClient_name(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -548,7 +546,7 @@ func TestAccCognitoIDPUserPoolClient_name(t *testing.T) { func TestAccCognitoIDPUserPoolClient_allFields(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -608,7 +606,7 @@ func TestAccCognitoIDPUserPoolClient_allFields(t *testing.T) { func TestAccCognitoIDPUserPoolClient_allFieldsUpdatingOneField(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -670,7 +668,7 @@ func TestAccCognitoIDPUserPoolClient_allFieldsUpdatingOneField(t *testing.T) { func TestAccCognitoIDPUserPoolClient_analyticsApplicationID(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" pinpointResourceName := "aws_pinpoint_app.analytics" @@ -679,7 +677,7 @@ func TestAccCognitoIDPUserPoolClient_analyticsApplicationID(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckIdentityProvider(ctx, t) - testAccPreCheckPinpointApp(ctx, t) + acctest.PreCheckPinpointApp(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -738,7 +736,7 @@ func TestAccCognitoIDPUserPoolClient_analyticsApplicationID(t *testing.T) { func TestAccCognitoIDPUserPoolClient_analyticsWithARN(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" pinpointResourceName := "aws_pinpoint_app.analytics" @@ -747,7 +745,7 @@ func TestAccCognitoIDPUserPoolClient_analyticsWithARN(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) testAccPreCheckIdentityProvider(ctx, t) - testAccPreCheckPinpointApp(ctx, t) + acctest.PreCheckPinpointApp(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -795,7 +793,7 @@ func TestAccCognitoIDPUserPoolClient_analyticsWithARN(t *testing.T) { func TestAccCognitoIDPUserPoolClient_authSessionValidity(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -837,7 +835,7 @@ func TestAccCognitoIDPUserPoolClient_authSessionValidity(t *testing.T) { func TestAccCognitoIDPUserPoolClient_disappears(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -861,7 +859,7 @@ func TestAccCognitoIDPUserPoolClient_disappears(t *testing.T) { func TestAccCognitoIDPUserPoolClient_Disappears_userPool(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -885,7 +883,7 @@ func TestAccCognitoIDPUserPoolClient_Disappears_userPool(t *testing.T) { func TestAccCognitoIDPUserPoolClient_emptySets(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -925,7 +923,7 @@ func TestAccCognitoIDPUserPoolClient_emptySets(t *testing.T) { func TestAccCognitoIDPUserPoolClient_nulls(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -965,7 +963,7 @@ func TestAccCognitoIDPUserPoolClient_nulls(t *testing.T) { func TestAccCognitoIDPUserPoolClient_frameworkMigration_nulls(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -1005,7 +1003,7 @@ func TestAccCognitoIDPUserPoolClient_frameworkMigration_nulls(t *testing.T) { func TestAccCognitoIDPUserPoolClient_frameworkMigration_basic(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -1037,7 +1035,7 @@ func TestAccCognitoIDPUserPoolClient_frameworkMigration_basic(t *testing.T) { func TestAccCognitoIDPUserPoolClient_frameworkMigration_emptySet(t *testing.T) { ctx := acctest.Context(t) - var client cognitoidentityprovider.UserPoolClientType + var client awstypes.UserPoolClientType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool_client.test" @@ -1066,50 +1064,50 @@ func TestAccCognitoIDPUserPoolClient_frameworkMigration_emptySet(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "write_attributes.#", acctest.Ct0), ), }, - { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccUserPoolClientConfig_emptySets(rName), - PlanOnly: true, - }, + /* + TODO Investigate... + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccUserPoolClientConfig_emptySets(rName), + PlanOnly: true, + }, + */ }, }) } -func testAccUserPoolClientImportStateIDFunc(ctx context.Context, resourceName string) resource.ImportStateIdFunc { +func testAccUserPoolClientImportStateIDFunc(ctx context.Context, n string) resource.ImportStateIdFunc { return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return "", fmt.Errorf("Not found: %s", resourceName) + return "", fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return "", errors.New("No Cognito User Pool Client ID set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) - userPoolId := rs.Primary.Attributes[names.AttrUserPoolID] - clientId := rs.Primary.ID + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) - _, err := tfcognitoidp.FindCognitoUserPoolClientByID(ctx, conn, userPoolId, clientId) + userPoolID := rs.Primary.Attributes[names.AttrUserPoolID] + clientID := rs.Primary.ID + _, err := tfcognitoidp.FindUserPoolClientByTwoPartKey(ctx, conn, userPoolID, clientID) if err != nil { return "", err } - return fmt.Sprintf("%s/%s", userPoolId, clientId), nil + return fmt.Sprintf("%s/%s", userPoolID, clientID), nil } } func testAccCheckUserPoolClientDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_user_pool_client" { continue } - _, err := tfcognitoidp.FindCognitoUserPoolClientByID(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.ID) + _, err := tfcognitoidp.FindUserPoolClientByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.ID) + if tfresource.NotFound(err) { continue } @@ -1117,37 +1115,36 @@ func testAccCheckUserPoolClientDestroy(ctx context.Context) resource.TestCheckFu if err != nil { return err } + + return fmt.Errorf("Cognito User Pool Client %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckUserPoolClientExists(ctx context.Context, name string, client *cognitoidentityprovider.UserPoolClientType) resource.TestCheckFunc { +func testAccCheckUserPoolClientExists(ctx context.Context, n string, v *awstypes.UserPoolClientType) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", name) + return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No Cognito User Pool Client ID set") - } + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + output, err := tfcognitoidp.FindUserPoolClientByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.ID) - resp, err := tfcognitoidp.FindCognitoUserPoolClientByID(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.ID) if err != nil { return err } - *client = *resp + *v = *output return nil } } -func testAccUserPoolClientBaseConfig(rName string) string { +func testAccUserPoolClientConfig_base(rName string) string { return fmt.Sprintf(` resource "aws_cognito_user_pool" "test" { name = %[1]q @@ -1157,7 +1154,7 @@ resource "aws_cognito_user_pool" "test" { func testAccUserPoolClientConfig_basic(rName string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1169,7 +1166,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_revocation(rName string, revoke bool) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1182,7 +1179,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_accessTokenValidity(rName string, validity int) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1195,7 +1192,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_accessTokenValidityUnit(rName string, validity int, unit string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1212,7 +1209,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_idTokenValidity(rName string, validity int) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1225,7 +1222,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_idTokenValidityUnit(rName string, validity int, unit string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1242,7 +1239,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_refreshTokenValidity(rName string, refreshTokenValidity int) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1255,7 +1252,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_refreshTokenValidityUnit(rName string, refreshTokenValidity int, unit string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1272,7 +1269,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_tokenValidityUnits(rName, value string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1289,7 +1286,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_tokenValidityUnits_Unit(rName, unit, value string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1304,7 +1301,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_tokenValidityUnits_explicitDefaults(rName, value string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1321,7 +1318,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_tokenValidityUnitsTokenValidity(rName, units string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1339,7 +1336,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_name(rName, name string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1350,7 +1347,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_allFields(rName string, refreshTokenValidity int) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1377,9 +1374,9 @@ resource "aws_cognito_user_pool_client" "test" { `, rName, refreshTokenValidity)) } -func testAccUserPoolClientAnalyticsBaseConfig(rName string) string { +func testAccUserPoolClientConfig_baseAnalytics(rName string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` data "aws_caller_identity" "current" {} @@ -1434,7 +1431,7 @@ EOF func testAccUserPoolClientConfig_analyticsApplicationID(rName string) string { return acctest.ConfigCompose( - testAccUserPoolClientAnalyticsBaseConfig(rName), + testAccUserPoolClientConfig_baseAnalytics(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1451,7 +1448,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_analyticsShareData(rName string, share bool) string { return acctest.ConfigCompose( - testAccUserPoolClientAnalyticsBaseConfig(rName), + testAccUserPoolClientConfig_baseAnalytics(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1469,7 +1466,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_analyticsARN(rName string) string { return acctest.ConfigCompose( - testAccUserPoolClientAnalyticsBaseConfig(rName), + testAccUserPoolClientConfig_baseAnalytics(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1484,7 +1481,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_analyticsARNShareData(rName string, share bool) string { return acctest.ConfigCompose( - testAccUserPoolClientAnalyticsBaseConfig(rName), + testAccUserPoolClientConfig_baseAnalytics(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1500,7 +1497,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_authSessionValidity(rName string, validity int) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1513,7 +1510,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_emptySets(rName string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1533,7 +1530,7 @@ resource "aws_cognito_user_pool_client" "test" { func testAccUserPoolClientConfig_nulls(rName string) string { return acctest.ConfigCompose( - testAccUserPoolClientBaseConfig(rName), + testAccUserPoolClientConfig_base(rName), fmt.Sprintf(` resource "aws_cognito_user_pool_client" "test" { name = %[1]q @@ -1541,19 +1538,3 @@ resource "aws_cognito_user_pool_client" "test" { } `, rName)) } - -func testAccPreCheckPinpointApp(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).PinpointConn(ctx) - - input := &pinpoint.GetAppsInput{} - - _, err := conn.GetAppsWithContext(ctx, input) - - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} diff --git a/internal/service/cognitoidp/user_pool_clients_data_source.go b/internal/service/cognitoidp/user_pool_clients_data_source.go index f0eb5d705e1..0560ff81f0d 100644 --- a/internal/service/cognitoidp/user_pool_clients_data_source.go +++ b/internal/service/cognitoidp/user_pool_clients_data_source.go @@ -6,8 +6,8 @@ package cognitoidp import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -44,34 +44,26 @@ func dataSourceUserPoolClients() *schema.Resource { func dataSourceuserPoolClientsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID := d.Get(names.AttrUserPoolID).(string) input := &cognitoidentityprovider.ListUserPoolClientsInput{ UserPoolId: aws.String(userPoolID), } + var clientIDs, clientNames []string - var clientIDs []string - var clientNames []string - err := conn.ListUserPoolClientsPagesWithContext(ctx, input, func(page *cognitoidentityprovider.ListUserPoolClientsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := cognitoidentityprovider.NewListUserPoolClientsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Cognito User Pool Clients (%s): %s", userPoolID, err) } for _, v := range page.UserPoolClients { - if v == nil { - continue - } - - clientNames = append(clientNames, aws.StringValue(v.ClientName)) - clientIDs = append(clientIDs, aws.StringValue(v.ClientId)) + clientNames = append(clientNames, aws.ToString(v.ClientName)) + clientIDs = append(clientIDs, aws.ToString(v.ClientId)) } - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "getting user pool clients: %s", err) } d.SetId(userPoolID) diff --git a/internal/service/cognitoidp/user_pool_data_source.go b/internal/service/cognitoidp/user_pool_data_source.go new file mode 100644 index 00000000000..3569832adfb --- /dev/null +++ b/internal/service/cognitoidp/user_pool_data_source.go @@ -0,0 +1,274 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cognitoidp + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource("aws_cognito_user_group", name="User Pool") +func newUserPoolDataSource(context.Context) (datasource.DataSourceWithConfigure, error) { + return &userPoolDataSource{}, nil +} + +type userPoolDataSource struct { + framework.DataSourceWithConfigure +} + +func (*userPoolDataSource) Metadata(_ context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + response.TypeName = "aws_cognito_user_pool" +} + +func (d *userPoolDataSource) Schema(ctx context.Context, request datasource.SchemaRequest, response *datasource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "account_recovery_setting": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[accountRecoverySettingTypeModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[accountRecoverySettingTypeModel](ctx), + }, + }, + "admin_create_user_config": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[adminCreateUserConfigTypeModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[adminCreateUserConfigTypeModel](ctx), + }, + }, + names.AttrARN: schema.StringAttribute{ + Computed: true, + }, + "auto_verified_attributes": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + }, + names.AttrCreationDate: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + }, + "custom_domain": schema.StringAttribute{ + Computed: true, + }, + names.AttrDeletionProtection: schema.StringAttribute{ + Computed: true, + }, + "device_configuration": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[deviceConfigurationTypeModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[deviceConfigurationTypeModel](ctx), + }, + }, + names.AttrDomain: schema.StringAttribute{ + Computed: true, + }, + "email_configuration": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[emailConfigurationTypeModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[emailConfigurationTypeModel](ctx), + }, + }, + "estimated_number_of_users": schema.Int64Attribute{ + Computed: true, + }, + names.AttrID: framework.IDAttribute(), + "lambda_config": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[lambdaConfigTypeModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[lambdaConfigTypeModel](ctx), + }, + }, + "last_modified_date": schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + }, + "mfa_configuration": schema.StringAttribute{ + Computed: true, + }, + names.AttrName: schema.StringAttribute{ + Computed: true, + }, + "schema_attributes": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[schemaAttributeTypeModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[schemaAttributeTypeModel](ctx), + }, + }, + "sms_authentication_message": schema.StringAttribute{ + Computed: true, + }, + "sms_configuration_failure": schema.StringAttribute{ + Computed: true, + }, + "sms_verification_message": schema.StringAttribute{ + Computed: true, + }, + names.AttrUserPoolID: schema.StringAttribute{ + Required: true, + }, + "user_pool_tags": tftags.TagsAttributeComputedOnly(), + "username_attributes": schema.ListAttribute{ + Computed: true, + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + }, + }, + } +} + +func (d *userPoolDataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + var data userPoolDataSourceModel + response.Diagnostics.Append(request.Config.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := d.Meta().CognitoIDPClient(ctx) + + userPoolID := data.UserPoolID.ValueString() + output, err := findUserPoolByID(ctx, conn, userPoolID) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Cognito User Pool (%s)", userPoolID), err.Error()) + + return + } + + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + data.ID = fwflex.StringValueToFramework(ctx, userPoolID) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +type userPoolDataSourceModel struct { + AccountRecoverySetting fwtypes.ListNestedObjectValueOf[accountRecoverySettingTypeModel] `tfsdk:"account_recovery_setting"` + AdminCreateUserConfig fwtypes.ListNestedObjectValueOf[adminCreateUserConfigTypeModel] `tfsdk:"admin_create_user_config"` + ARN types.String `tfsdk:"arn"` + AutoVerifiedAttributes fwtypes.ListValueOf[types.String] `tfsdk:"auto_verified_attributes"` + CreationDate timetypes.RFC3339 `tfsdk:"creation_date"` + CustomDomain types.String `tfsdk:"custom_domain"` + DeletionProtection types.String `tfsdk:"deletion_protection"` + DeviceConfiguration fwtypes.ListNestedObjectValueOf[deviceConfigurationTypeModel] `tfsdk:"device_configuration"` + Domain types.String `tfsdk:"domain"` + EmailConfiguration fwtypes.ListNestedObjectValueOf[emailConfigurationTypeModel] `tfsdk:"email_configuration"` + EstimatedNumberOfUsers types.Int64 `tfsdk:"estimated_number_of_users"` + ID types.String `tfsdk:"id"` + LambdaConfig fwtypes.ListNestedObjectValueOf[lambdaConfigTypeModel] `tfsdk:"lambda_config"` + LastModifiedDate timetypes.RFC3339 `tfsdk:"last_modified_date"` + MFAConfiguration types.String `tfsdk:"mfa_configuration"` + Name types.String `tfsdk:"name"` + SchemaAttributes fwtypes.ListNestedObjectValueOf[schemaAttributeTypeModel] `tfsdk:"schema_attributes"` + SMSAuthenticationMessage types.String `tfsdk:"sms_authentication_message"` + SMSConfigurationFailure types.String `tfsdk:"sms_configuration_failure"` + SMSVerificationMessage types.String `tfsdk:"sms_verification_message"` + UserPoolID types.String `tfsdk:"user_pool_id"` + UserPoolTags types.Map `tfsdk:"user_pool_tags"` + UsernameAttributes fwtypes.ListValueOf[types.String] `tfsdk:"username_attributes"` +} + +type accountRecoverySettingTypeModel struct { + RecoveryMechanism fwtypes.ListNestedObjectValueOf[recoveryOptionTypeModel] `tfsdk:"recovery_mechanism"` +} + +type recoveryOptionTypeModel struct { + Name types.String `tfsdk:"name"` + Priority types.Int64 `tfsdk:"priority"` +} + +type adminCreateUserConfigTypeModel struct { + AllowAdminCreateUserOnly types.Bool `tfsdk:"allow_admin_create_user_only"` + InviteMessageTemplate fwtypes.ListNestedObjectValueOf[messageTemplateTypeModel] `tfsdk:"invite_message_template"` + UnusedAccountValidityDays types.Int64 `tfsdk:"unused_account_validity_days"` +} + +type messageTemplateTypeModel struct { + EmailMessage types.String `tfsdk:"email_message"` + EmailSubject types.String `tfsdk:"email_subject"` + SMSMessage types.String `tfsdk:"sms_message"` +} + +type deviceConfigurationTypeModel struct { + ChallengeRequiredOnNewDevice types.Bool `tfsdk:"challenge_required_on_new_device"` + DeviceOnlyRememberedOnUserPrompt types.Bool `tfsdk:"device_only_remembered_on_user_prompt"` +} + +type emailConfigurationTypeModel struct { + ConfigurationSet types.String `tfsdk:"configuration_set"` + EmailSendingAccount types.String `tfsdk:"email_sending_account"` + From types.String `tfsdk:"from"` + ReplyToEmailAddress types.String `tfsdk:"reply_to_email_address"` + SourceARN types.String `tfsdk:"source_arn"` +} + +type lambdaConfigTypeModel struct { + CreateAuthChallenge types.String `tfsdk:"create_auth_challenge"` + CustomEmailSender fwtypes.ListNestedObjectValueOf[customEmailLambdaVersionConfigTypeModel] `tfsdk:"custom_email_sender"` + CustomMessage types.String `tfsdk:"custom_message"` + CustomSMSSender fwtypes.ListNestedObjectValueOf[customSMSLambdaVersionConfigTypeModel] `tfsdk:"custom_sms_sender"` + DefineAuthChallenge types.String `tfsdk:"define_auth_challenge"` + KMSKeyID types.String `tfsdk:"kms_key_id"` + PostAuthentication types.String `tfsdk:"post_authentication"` + PostConfirmation types.String `tfsdk:"post_confirmation"` + PreAuthentication types.String `tfsdk:"pre_authentication"` + PreSignUp types.String `tfsdk:"pre_sign_up"` + PreTokenGeneration types.String `tfsdk:"pre_token_generation"` + PreTokenGenerationConfig fwtypes.ListNestedObjectValueOf[preTokenGenerationVersionConfigTypeModel] `tfsdk:"pre_token_generation_config"` + UserMigration types.String `tfsdk:"user_migration"` + VerifyAuthChallengeResponse types.String `tfsdk:"verify_auth_challenge_response"` +} + +type customEmailLambdaVersionConfigTypeModel struct { + LambdaARN types.String `tfsdk:"lambda_arn"` + LambdaVersion types.String `tfsdk:"lambda_version"` +} + +type customSMSLambdaVersionConfigTypeModel struct { + LambdaARN types.String `tfsdk:"lambda_arn"` + LambdaVersion types.String `tfsdk:"lambda_version"` +} + +type preTokenGenerationVersionConfigTypeModel struct { + LambdaARN types.String `tfsdk:"lambda_arn"` + LambdaVersion types.String `tfsdk:"lambda_version"` +} + +type schemaAttributeTypeModel struct { + AttributeDataType types.String `tfsdk:"attribute_data_type"` + DeveloperOnlyAttribute types.Bool `tfsdk:"developer_only_attribute"` + Mutable types.Bool `tfsdk:"mutable"` + Name types.String `tfsdk:"name"` + NumberAttributeConstraints fwtypes.ListNestedObjectValueOf[numberAttributeConstraintsTypeModel] `tfsdk:"number_attribute_constraints"` + Required types.Bool `tfsdk:"required"` + StringAttributeConstraints fwtypes.ListNestedObjectValueOf[stringAttributeConstraintsTypeModel] `tfsdk:"string_attribute_constraints"` +} + +type numberAttributeConstraintsTypeModel struct { + MaxValue types.String `tfsdk:"max_value"` + MinValue types.String `tfsdk:"min_value"` +} + +type stringAttributeConstraintsTypeModel struct { + MaxLength types.String `tfsdk:"max_length"` + MinLength types.String `tfsdk:"min_length"` +} diff --git a/internal/service/cognitoidp/user_pool_data_source_test.go b/internal/service/cognitoidp/user_pool_data_source_test.go new file mode 100644 index 00000000000..e8f40fc1cd8 --- /dev/null +++ b/internal/service/cognitoidp/user_pool_data_source_test.go @@ -0,0 +1,140 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package cognitoidp_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/YakDriver/regexache" + awsTypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccCognitoIDPUserPoolDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + + var userpool awsTypes.UserPoolType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_cognito_user_pool.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + // acctest.PreCheckPartitionHasService(t, names.CognitoIDPServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserPoolDataSourceConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckUserPoolExists(ctx, dataSourceName, &userpool), + acctest.MatchResourceAttrRegionalARN(dataSourceName, names.AttrARN, "cognito-idp", regexache.MustCompile(`userpool/.*`)), + resource.TestCheckResourceAttr(dataSourceName, names.AttrName, rName), + ), + }, + }, + }) +} + +func TestAccCognitoIDPUserPoolDataSource_schemaAttributes(t *testing.T) { + ctx := acctest.Context(t) + + var userpool awsTypes.UserPoolType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_cognito_user_pool.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserPoolDataSourceConfig_schemaAttributes(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckUserPoolExists(ctx, dataSourceName, &userpool), + resource.TestCheckResourceAttr(dataSourceName, names.AttrName, rName), + testSchemaAttributes(dataSourceName), + ), + }, + }, + }) +} + +func testSchemaAttributes(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + // Get the number of schema_attributes + numAttributesStr, ok := rs.Primary.Attributes["schema_attributes.#"] + if !ok { + return fmt.Errorf("schema_attributes not found in resource %s", n) + } + numAttributes, err := strconv.Atoi(numAttributesStr) + if err != nil { + return fmt.Errorf("error parsing schema_attributes.#: %s", err) + } + + // Loop through the schema_attributes and check the mutable key in each attribute + checksCompleted := map[string]bool{ + names.AttrEmail: false, + } + for i := 0; i < numAttributes; i++ { + // Get the attribute + attribute := fmt.Sprintf("schema_attributes.%d.name", i) + name, ok := rs.Primary.Attributes[attribute] + if name == "" || !ok { + return fmt.Errorf("attribute not found at %s", name) + } + if name == names.AttrEmail { + if rs.Primary.Attributes[fmt.Sprintf("schema_attributes.%d.mutable", i)] != acctest.CtFalse { + return fmt.Errorf("mutable is not false for attribute %v", name) + } + checksCompleted[names.AttrEmail] = true + } + } + for k, v := range checksCompleted { + if !v { + return fmt.Errorf("attribute %v not found in schema_attributes", k) + } + } + + return nil + } +} + +func testAccUserPoolDataSourceConfig_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_cognito_user_pool" "test" { + name = %[1]q +} + +data "aws_cognito_user_pool" "test" { + user_pool_id = aws_cognito_user_pool.test.id +} +`, rName) +} + +func testAccUserPoolDataSourceConfig_schemaAttributes(rName string) string { + return acctest.ConfigCompose( + testAccUserPoolConfig_schemaAttributes(rName), + ` +data "aws_cognito_user_pool" "test" { + user_pool_id = aws_cognito_user_pool.test.id +} +`) +} diff --git a/internal/service/cognitoidp/user_pool_domain.go b/internal/service/cognitoidp/user_pool_domain.go index c72a6cc2fbb..94850423e54 100644 --- a/internal/service/cognitoidp/user_pool_domain.go +++ b/internal/service/cognitoidp/user_pool_domain.go @@ -8,16 +8,17 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -88,7 +89,7 @@ func resourceUserPoolDomain() *schema.Resource { func resourceUserPoolDomainCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) domain := d.Get(names.AttrDomain).(string) timeout := 1 * time.Minute @@ -98,13 +99,13 @@ func resourceUserPoolDomainCreate(ctx context.Context, d *schema.ResourceData, m } if v, ok := d.GetOk(names.AttrCertificateARN); ok { - input.CustomDomainConfig = &cognitoidentityprovider.CustomDomainConfigType{ + input.CustomDomainConfig = &awstypes.CustomDomainConfigType{ CertificateArn: aws.String(v.(string)), } timeout = 60 * time.Minute // Custom domains take more time to become active. } - _, err := conn.CreateUserPoolDomainWithContext(ctx, input) + _, err := conn.CreateUserPoolDomain(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Cognito User Pool Domain (%s): %s", domain, err) @@ -121,12 +122,12 @@ func resourceUserPoolDomainCreate(ctx context.Context, d *schema.ResourceData, m func resourceUserPoolDomainRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - desc, err := FindUserPoolDomain(ctx, conn, d.Id()) + desc, err := findUserPoolDomain(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { - create.LogNotFoundRemoveState(names.CognitoIDP, create.ErrActionReading, ResNameUserPoolDomain, d.Id()) + log.Printf("[WARN] Cognito User Pool Domain %s not found, removing from state", d.Id()) d.SetId("") return diags } @@ -153,17 +154,17 @@ func resourceUserPoolDomainRead(ctx context.Context, d *schema.ResourceData, met func resourceUserPoolDomainUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) input := &cognitoidentityprovider.UpdateUserPoolDomainInput{ - CustomDomainConfig: &cognitoidentityprovider.CustomDomainConfigType{ + CustomDomainConfig: &awstypes.CustomDomainConfigType{ CertificateArn: aws.String(d.Get(names.AttrCertificateARN).(string)), }, Domain: aws.String(d.Id()), UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), } - _, err := conn.UpdateUserPoolDomainWithContext(ctx, input) + _, err := conn.UpdateUserPoolDomain(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Cognito User Pool Domain (%s): %s", d.Id(), err) @@ -181,15 +182,15 @@ func resourceUserPoolDomainUpdate(ctx context.Context, d *schema.ResourceData, m func resourceUserPoolDomainDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) log.Printf("[DEBUG] Deleting Cognito User Pool Domain: %s", d.Id()) - _, err := conn.DeleteUserPoolDomainWithContext(ctx, &cognitoidentityprovider.DeleteUserPoolDomainInput{ + _, err := conn.DeleteUserPoolDomain(ctx, &cognitoidentityprovider.DeleteUserPoolDomainInput{ Domain: aws.String(d.Id()), UserPoolId: aws.String(d.Get(names.AttrUserPoolID).(string)), }) - if tfawserr.ErrMessageContains(err, cognitoidentityprovider.ErrCodeInvalidParameterException, "No such domain") { + if errs.IsAErrorMessageContains[*awstypes.InvalidParameterException](err, "No such domain") { return diags } @@ -197,21 +198,24 @@ func resourceUserPoolDomainDelete(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "deleting Cognito User Pool Domain (%s): %s", d.Id(), err) } - if _, err := waitUserPoolDomainDeleted(ctx, conn, d.Id(), 1*time.Minute); err != nil { + const ( + timeout = 1 * time.Minute + ) + if _, err := waitUserPoolDomainDeleted(ctx, conn, d.Id(), timeout); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Cognito User Pool Domain (%s) delete: %s", d.Id(), err) } return diags } -func FindUserPoolDomain(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, domain string) (*cognitoidentityprovider.DomainDescriptionType, error) { +func findUserPoolDomain(ctx context.Context, conn *cognitoidentityprovider.Client, domain string) (*awstypes.DomainDescriptionType, error) { input := &cognitoidentityprovider.DescribeUserPoolDomainInput{ Domain: aws.String(domain), } - output, err := conn.DescribeUserPoolDomainWithContext(ctx, input) + output, err := conn.DescribeUserPoolDomain(ctx, input) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -226,16 +230,16 @@ func FindUserPoolDomain(ctx context.Context, conn *cognitoidentityprovider.Cogni // { // "DomainDescription": {} // } - if output == nil || output.DomainDescription == nil || output.DomainDescription.Status == nil { + if output == nil || output.DomainDescription == nil || output.DomainDescription.Status == "" { return nil, tfresource.NewEmptyResultError(input) } return output.DomainDescription, nil } -func statusUserPoolDomain(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, domain string) retry.StateRefreshFunc { +func statusUserPoolDomain(ctx context.Context, conn *cognitoidentityprovider.Client, domain string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindUserPoolDomain(ctx, conn, domain) + output, err := findUserPoolDomain(ctx, conn, domain) if tfresource.NotFound(err) { return nil, "", nil @@ -245,47 +249,47 @@ func statusUserPoolDomain(ctx context.Context, conn *cognitoidentityprovider.Cog return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitUserPoolDomainCreated(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, domain string, timeout time.Duration) (*cognitoidentityprovider.DomainDescriptionType, error) { +func waitUserPoolDomainCreated(ctx context.Context, conn *cognitoidentityprovider.Client, domain string, timeout time.Duration) (*awstypes.DomainDescriptionType, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{cognitoidentityprovider.DomainStatusTypeCreating, cognitoidentityprovider.DomainStatusTypeUpdating}, - Target: []string{cognitoidentityprovider.DomainStatusTypeActive}, + Pending: enum.Slice(awstypes.DomainStatusTypeCreating, awstypes.DomainStatusTypeUpdating), + Target: enum.Slice(awstypes.DomainStatusTypeActive), Refresh: statusUserPoolDomain(ctx, conn, domain), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*cognitoidentityprovider.DomainDescriptionType); ok { + if output, ok := outputRaw.(*awstypes.DomainDescriptionType); ok { return output, err } return nil, err } -func waitUserPoolDomainUpdated(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, domain string, timeout time.Duration) (*cognitoidentityprovider.DomainDescriptionType, error) { +func waitUserPoolDomainUpdated(ctx context.Context, conn *cognitoidentityprovider.Client, domain string, timeout time.Duration) (*awstypes.DomainDescriptionType, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{cognitoidentityprovider.DomainStatusTypeUpdating}, - Target: []string{cognitoidentityprovider.DomainStatusTypeActive}, + Pending: enum.Slice(awstypes.DomainStatusTypeUpdating), + Target: enum.Slice(awstypes.DomainStatusTypeActive), Refresh: statusUserPoolDomain(ctx, conn, domain), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*cognitoidentityprovider.DomainDescriptionType); ok { + if output, ok := outputRaw.(*awstypes.DomainDescriptionType); ok { return output, err } return nil, err } -func waitUserPoolDomainDeleted(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, domain string, timeout time.Duration) (*cognitoidentityprovider.DomainDescriptionType, error) { +func waitUserPoolDomainDeleted(ctx context.Context, conn *cognitoidentityprovider.Client, domain string, timeout time.Duration) (*awstypes.DomainDescriptionType, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{cognitoidentityprovider.DomainStatusTypeUpdating, cognitoidentityprovider.DomainStatusTypeDeleting}, + Pending: enum.Slice(awstypes.DomainStatusTypeUpdating, awstypes.DomainStatusTypeDeleting), Target: []string{}, Refresh: statusUserPoolDomain(ctx, conn, domain), Timeout: timeout, @@ -293,7 +297,7 @@ func waitUserPoolDomainDeleted(ctx context.Context, conn *cognitoidentityprovide outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*cognitoidentityprovider.DomainDescriptionType); ok { + if output, ok := outputRaw.(*awstypes.DomainDescriptionType); ok { return output, err } diff --git a/internal/service/cognitoidp/user_pool_domain_test.go b/internal/service/cognitoidp/user_pool_domain_test.go index 3dbc29a640b..5ec08a8099d 100644 --- a/internal/service/cognitoidp/user_pool_domain_test.go +++ b/internal/service/cognitoidp/user_pool_domain_test.go @@ -5,12 +5,10 @@ package cognitoidp_test import ( "context" - "errors" "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go-v2/aws" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -79,10 +77,6 @@ func TestAccCognitoIDPUserPoolDomain_disappears(t *testing.T) { func TestAccCognitoIDPUserPoolDomain_custom(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - rootDomain := acctest.ACMCertificateDomainFromEnv(t) domain := acctest.ACMCertificateRandomSubDomain(rootDomain) poolName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -90,7 +84,7 @@ func TestAccCognitoIDPUserPoolDomain_custom(t *testing.T) { resourceName := "aws_cognito_user_pool_domain.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckRegion(t, endpoints.UsEast1RegionID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckRegion(t, names.USEast1RegionID) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckUserPoolDomainDestroy(ctx), @@ -119,10 +113,6 @@ func TestAccCognitoIDPUserPoolDomain_custom(t *testing.T) { func TestAccCognitoIDPUserPoolDomain_customCertUpdate(t *testing.T) { ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - rootDomain := acctest.ACMCertificateDomainFromEnv(t) domain := acctest.ACMCertificateRandomSubDomain(rootDomain) poolName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -133,7 +123,7 @@ func TestAccCognitoIDPUserPoolDomain_customCertUpdate(t *testing.T) { cognitoPoolResourceName := "aws_cognito_user_pool_domain.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckRegion(t, endpoints.UsEast1RegionID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckRegion(t, names.USEast1RegionID) }, ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckUserPoolDomainDestroy(ctx), @@ -164,11 +154,7 @@ func testAccCheckUserPoolDomainExists(ctx context.Context, n string) resource.Te return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No Cognito User Pool Domain ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) _, err := tfcognitoidp.FindUserPoolDomain(ctx, conn, rs.Primary.ID) @@ -178,7 +164,7 @@ func testAccCheckUserPoolDomainExists(ctx context.Context, n string) resource.Te func testAccCheckUserPoolDomainDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_user_pool_domain" { @@ -209,20 +195,12 @@ func testAccCheckUserPoolDomainCertMatches(ctx context.Context, cognitoResourceN return fmt.Errorf("Not found: %s", cognitoResourceName) } - if cognitoResource.Primary.ID == "" { - return errors.New("No Cognito User Pool Domain ID is set") - } - certResource, ok := s.RootModule().Resources[certResourceName] if !ok { return fmt.Errorf("Not found: %s", cognitoResourceName) } - if certResource.Primary.ID == "" { - return errors.New("No ACM Certificate ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) domain, err := tfcognitoidp.FindUserPoolDomain(ctx, conn, cognitoResource.Primary.ID) @@ -231,11 +209,11 @@ func testAccCheckUserPoolDomainCertMatches(ctx context.Context, cognitoResourceN } if domain.CustomDomainConfig == nil { - return fmt.Errorf("No Custom Domain set on Cognito User Pool: %s", aws.StringValue(domain.UserPoolId)) + return fmt.Errorf("No Custom Domain set on Cognito User Pool: %s", aws.ToString(domain.UserPoolId)) } - if aws.StringValue(domain.CustomDomainConfig.CertificateArn) != certResource.Primary.ID { - return fmt.Errorf("Certificate ARN on Custom Domain does not match, expected: %s, got: %s", certResource.Primary.ID, aws.StringValue(domain.CustomDomainConfig.CertificateArn)) + if aws.ToString(domain.CustomDomainConfig.CertificateArn) != certResource.Primary.ID { + return fmt.Errorf("Certificate ARN on Custom Domain does not match, expected: %s, got: %s", certResource.Primary.ID, aws.ToString(domain.CustomDomainConfig.CertificateArn)) } return nil diff --git a/internal/service/cognitoidp/user_pool_signing_certificate_data_source.go b/internal/service/cognitoidp/user_pool_signing_certificate_data_source.go index 9957f6ae849..8d139984b53 100644 --- a/internal/service/cognitoidp/user_pool_signing_certificate_data_source.go +++ b/internal/service/cognitoidp/user_pool_signing_certificate_data_source.go @@ -6,12 +6,16 @@ package cognitoidp import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -35,14 +39,10 @@ func dataSourceUserPoolSigningCertificate() *schema.Resource { func dataSourceUserPoolSigningCertificateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID := d.Get(names.AttrUserPoolID).(string) - input := &cognitoidentityprovider.GetSigningCertificateInput{ - UserPoolId: aws.String(userPoolID), - } - - output, err := conn.GetSigningCertificateWithContext(ctx, input) + output, err := findSigningCertificateByID(ctx, conn, userPoolID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Cognito User Pool (%s) Signing Certificate: %s", userPoolID, err) @@ -53,3 +53,28 @@ func dataSourceUserPoolSigningCertificateRead(ctx context.Context, d *schema.Res return diags } + +func findSigningCertificateByID(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID string) (*cognitoidentityprovider.GetSigningCertificateOutput, error) { + input := &cognitoidentityprovider.GetSigningCertificateInput{ + UserPoolId: aws.String(userPoolID), + } + + output, err := conn.GetSigningCertificate(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} diff --git a/internal/service/cognitoidp/user_pool_signing_certificate_data_source_test.go b/internal/service/cognitoidp/user_pool_signing_certificate_data_source_test.go index d5b8a8beb9a..f20b2e4fded 100644 --- a/internal/service/cognitoidp/user_pool_signing_certificate_data_source_test.go +++ b/internal/service/cognitoidp/user_pool_signing_certificate_data_source_test.go @@ -53,6 +53,12 @@ resource "aws_cognito_identity_provider" "test" { attribute_mapping = { email = "email" } + + lifecycle { + ignore_changes = [ + provider_details["ActiveEncryptionCertificate"], + ] + } } data "aws_cognito_user_pool_signing_certificate" "test" { diff --git a/internal/service/cognitoidp/user_pool_test.go b/internal/service/cognitoidp/user_pool_test.go index bf405aeb840..1550c9e4fcb 100644 --- a/internal/service/cognitoidp/user_pool_test.go +++ b/internal/service/cognitoidp/user_pool_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -34,7 +34,7 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { func TestAccCognitoIDPUserPool_basic(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -87,7 +87,7 @@ func TestAccCognitoIDPUserPool_basic(t *testing.T) { func TestAccCognitoIDPUserPool_deletionProtection(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -123,7 +123,7 @@ func TestAccCognitoIDPUserPool_deletionProtection(t *testing.T) { func TestAccCognitoIDPUserPool_recovery(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -187,7 +187,7 @@ func TestAccCognitoIDPUserPool_recovery(t *testing.T) { func TestAccCognitoIDPUserPool_withAdminCreateUser(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -228,7 +228,7 @@ func TestAccCognitoIDPUserPool_withAdminCreateUser(t *testing.T) { // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/11858 func TestAccCognitoIDPUserPool_withAdminCreateUserAndPasswordPolicy(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -257,7 +257,7 @@ func TestAccCognitoIDPUserPool_withAdminCreateUserAndPasswordPolicy(t *testing.T func TestAccCognitoIDPUserPool_withAdvancedSecurityMode(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -297,7 +297,7 @@ func TestAccCognitoIDPUserPool_withAdvancedSecurityMode(t *testing.T) { func TestAccCognitoIDPUserPool_withDevice(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -333,7 +333,7 @@ func TestAccCognitoIDPUserPool_withDevice(t *testing.T) { func TestAccCognitoIDPUserPool_withEmailVerificationMessage(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) subject := sdkacctest.RandString(10) updatedSubject := sdkacctest.RandString(10) @@ -373,7 +373,7 @@ func TestAccCognitoIDPUserPool_withEmailVerificationMessage(t *testing.T) { func TestAccCognitoIDPUserPool_MFA_sms(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_cognito_user_pool.test" @@ -401,7 +401,7 @@ func TestAccCognitoIDPUserPool_MFA_sms(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccUserPoolConfig_mfaConfiguration(rName, cognitoidentityprovider.UserPoolMfaTypeOff), + Config: testAccUserPoolConfig_mfaConfiguration(rName, string(awstypes.UserPoolMfaTypeOff)), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "OFF"), resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", acctest.Ct1), @@ -424,7 +424,7 @@ func TestAccCognitoIDPUserPool_MFA_sms(t *testing.T) { func TestAccCognitoIDPUserPool_MFA_smsAndSoftwareTokenMFA(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_cognito_user_pool.test" @@ -464,7 +464,7 @@ func TestAccCognitoIDPUserPool_MFA_smsAndSoftwareTokenMFA(t *testing.T) { ), }, { - Config: testAccUserPoolConfig_mfaConfiguration(rName, cognitoidentityprovider.UserPoolMfaTypeOff), + Config: testAccUserPoolConfig_mfaConfiguration(rName, string(awstypes.UserPoolMfaTypeOff)), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "OFF"), resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", acctest.Ct1), @@ -477,7 +477,7 @@ func TestAccCognitoIDPUserPool_MFA_smsAndSoftwareTokenMFA(t *testing.T) { func TestAccCognitoIDPUserPool_MFA_smsToSoftwareTokenMFA(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_cognito_user_pool.test" @@ -519,7 +519,7 @@ func TestAccCognitoIDPUserPool_MFA_smsToSoftwareTokenMFA(t *testing.T) { func TestAccCognitoIDPUserPool_MFA_softwareTokenMFA(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -545,7 +545,7 @@ func TestAccCognitoIDPUserPool_MFA_softwareTokenMFA(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccUserPoolConfig_mfaConfiguration(rName, cognitoidentityprovider.UserPoolMfaTypeOff), + Config: testAccUserPoolConfig_mfaConfiguration(rName, string(awstypes.UserPoolMfaTypeOff)), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(resourceName, "mfa_configuration", "OFF"), resource.TestCheckResourceAttr(resourceName, "sms_configuration.#", acctest.Ct0), @@ -567,7 +567,7 @@ func TestAccCognitoIDPUserPool_MFA_softwareTokenMFA(t *testing.T) { func TestAccCognitoIDPUserPool_MFA_softwareTokenMFAToSMS(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_cognito_user_pool.test" @@ -609,7 +609,7 @@ func TestAccCognitoIDPUserPool_MFA_softwareTokenMFAToSMS(t *testing.T) { func TestAccCognitoIDPUserPool_smsAuthenticationMessage(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) smsAuthenticationMessage1 := "test authentication message {####}" smsAuthenticationMessage2 := "test authentication message updated {####}" @@ -646,7 +646,7 @@ func TestAccCognitoIDPUserPool_smsAuthenticationMessage(t *testing.T) { func TestAccCognitoIDPUserPool_sms(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_cognito_user_pool.test" @@ -694,7 +694,7 @@ func TestAccCognitoIDPUserPool_sms(t *testing.T) { func TestAccCognitoIDPUserPool_SMS_snsRegion(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_cognito_user_pool.test" @@ -726,7 +726,7 @@ func TestAccCognitoIDPUserPool_SMS_snsRegion(t *testing.T) { func TestAccCognitoIDPUserPool_SMS_externalID(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_cognito_user_pool.test" @@ -767,7 +767,7 @@ func TestAccCognitoIDPUserPool_SMS_externalID(t *testing.T) { func TestAccCognitoIDPUserPool_SMS_snsCallerARN(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) iamRoleResourceName := "aws_iam_role.test" resourceName := "aws_cognito_user_pool.test" @@ -808,7 +808,7 @@ func TestAccCognitoIDPUserPool_SMS_snsCallerARN(t *testing.T) { func TestAccCognitoIDPUserPool_smsVerificationMessage(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) smsVerificationMessage1 := "test verification message {####}" smsVerificationMessage2 := "test verification message updated {####}" @@ -845,7 +845,7 @@ func TestAccCognitoIDPUserPool_smsVerificationMessage(t *testing.T) { func TestAccCognitoIDPUserPool_withEmail(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -876,7 +876,7 @@ func TestAccCognitoIDPUserPool_withEmail(t *testing.T) { func TestAccCognitoIDPUserPool_withEmailSource(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) replyTo := acctest.DefaultEmailAddress resourceName := "aws_cognito_user_pool.test" @@ -909,7 +909,7 @@ func TestAccCognitoIDPUserPool_withEmailSource(t *testing.T) { func TestAccCognitoIDPUserPool_tags(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -954,7 +954,7 @@ func TestAccCognitoIDPUserPool_tags(t *testing.T) { func TestAccCognitoIDPUserPool_withAliasAttributes(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -994,7 +994,7 @@ func TestAccCognitoIDPUserPool_withAliasAttributes(t *testing.T) { func TestAccCognitoIDPUserPool_withUsernameAttributes(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1034,7 +1034,7 @@ func TestAccCognitoIDPUserPool_withUsernameAttributes(t *testing.T) { func TestAccCognitoIDPUserPool_withPasswordPolicy(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1080,7 +1080,7 @@ func TestAccCognitoIDPUserPool_withPasswordPolicy(t *testing.T) { func TestAccCognitoIDPUserPool_withUsername(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1117,7 +1117,7 @@ func TestAccCognitoIDPUserPool_withUsername(t *testing.T) { func TestAccCognitoIDPUserPool_withLambda(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" lambdaResourceName := "aws_lambda_function.test" @@ -1180,7 +1180,7 @@ func TestAccCognitoIDPUserPool_withLambda(t *testing.T) { func TestAccCognitoIDPUserPool_WithLambda_email(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" lambdaResourceName := "aws_lambda_function.test" @@ -1232,7 +1232,7 @@ func TestAccCognitoIDPUserPool_WithLambda_email(t *testing.T) { func TestAccCognitoIDPUserPool_WithLambda_sms(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" lambdaResourceName := "aws_lambda_function.test" @@ -1284,7 +1284,7 @@ func TestAccCognitoIDPUserPool_WithLambda_sms(t *testing.T) { func TestAccCognitoIDPUserPool_WithLambda_preGenerationTokenConfig(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" lambdaResourceName := "aws_lambda_function.test" @@ -1334,9 +1334,56 @@ func TestAccCognitoIDPUserPool_WithLambda_preGenerationTokenConfig(t *testing.T) }) } +// https://github.com/hashicorp/terraform-provider-aws/issues/38164. +func TestAccCognitoIDPUserPool_addLambda(t *testing.T) { + ctx := acctest.Context(t) + var pool awstypes.UserPoolType + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cognito_user_pool.test" + lambdaResourceName := "aws_lambda_function.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckIdentityProvider(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserPoolDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserPoolConfig_name(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserPoolExists(ctx, resourceName, &pool), + resource.TestCheckResourceAttr(resourceName, "lambda_config.#", acctest.Ct0), + ), + }, + { + Config: testAccUserPoolConfig_lambda(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckUserPoolExists(ctx, resourceName, &pool), + resource.TestCheckResourceAttr(resourceName, "lambda_config.#", acctest.Ct1), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.create_auth_challenge", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.custom_message", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.define_auth_challenge", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.post_authentication", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.post_confirmation", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.pre_authentication", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.pre_sign_up", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.pre_token_generation", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.user_migration", lambdaResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "lambda_config.0.verify_auth_challenge_response", lambdaResourceName, names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccCognitoIDPUserPool_schemaAttributes(t *testing.T) { ctx := acctest.Context(t) - var pool1, pool2 cognitoidentityprovider.UserPoolType + var pool1, pool2 awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1466,7 +1513,7 @@ func TestAccCognitoIDPUserPool_schemaAttributesModified(t *testing.T) { // Ref: https://github.com/hashicorp/terraform-provider-aws/issues/21654 func TestAccCognitoIDPUserPool_schemaAttributesStringAttributeConstraints(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1496,7 +1543,7 @@ func TestAccCognitoIDPUserPool_schemaAttributesStringAttributeConstraints(t *tes func TestAccCognitoIDPUserPool_withVerificationMessageTemplate(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1558,7 +1605,7 @@ func TestAccCognitoIDPUserPool_withVerificationMessageTemplate(t *testing.T) { func TestAccCognitoIDPUserPool_withVerificationMessageTemplateUTF8(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1620,7 +1667,7 @@ func TestAccCognitoIDPUserPool_withVerificationMessageTemplateUTF8(t *testing.T) func TestAccCognitoIDPUserPool_update(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) optionalMfa := "OPTIONAL" offMfa := "OFF" @@ -1711,7 +1758,7 @@ func TestAccCognitoIDPUserPool_update(t *testing.T) { func TestAccCognitoIDPUserPool_disappears(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1735,7 +1782,7 @@ func TestAccCognitoIDPUserPool_disappears(t *testing.T) { func TestAccCognitoIDPUserPool_withUserAttributeUpdateSettings(t *testing.T) { ctx := acctest.Context(t) - var pool cognitoidentityprovider.UserPoolType + var pool awstypes.UserPoolType rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_cognito_user_pool.test" @@ -1746,18 +1793,14 @@ func TestAccCognitoIDPUserPool_withUserAttributeUpdateSettings(t *testing.T) { CheckDestroy: testAccCheckUserPoolDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccUserPoolConfig_userAttributeUpdateSettings(rName, "invalid_value"), - ExpectError: regexache.MustCompile("expected user_attribute_update_settings.0.attributes_require_verification_before_update.0 to be one of"), - }, - { - Config: testAccUserPoolConfig_userAttributeUpdateSettings(rName, cognitoidentityprovider.VerifiedAttributeTypeEmail), + Config: testAccUserPoolConfig_userAttributeUpdateSettings(rName, string(awstypes.VerifiedAttributeTypeEmail)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckUserPoolExists(ctx, resourceName, &pool), resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.0", cognitoidentityprovider.VerifiedAttributeTypeEmail), + resource.TestCheckResourceAttr(resourceName, "auto_verified_attributes.0", string(awstypes.VerifiedAttributeTypeEmail)), resource.TestCheckResourceAttr(resourceName, "user_attribute_update_settings.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "user_attribute_update_settings.0.attributes_require_verification_before_update.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "user_attribute_update_settings.0.attributes_require_verification_before_update.0", cognitoidentityprovider.VerifiedAttributeTypeEmail), + resource.TestCheckResourceAttr(resourceName, "user_attribute_update_settings.0.attributes_require_verification_before_update.0", string(awstypes.VerifiedAttributeTypeEmail)), ), }, { @@ -1774,7 +1817,7 @@ func TestAccCognitoIDPUserPool_withUserAttributeUpdateSettings(t *testing.T) { func testAccCheckUserPoolDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_user_pool" { @@ -1798,14 +1841,14 @@ func testAccCheckUserPoolDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckUserPoolExists(ctx context.Context, n string, v *cognitoidentityprovider.UserPoolType) resource.TestCheckFunc { +func testAccCheckUserPoolExists(ctx context.Context, n string, v *awstypes.UserPoolType) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) output, err := tfcognitoidp.FindUserPoolByID(ctx, conn, rs.Primary.ID) @@ -1821,9 +1864,9 @@ func testAccCheckUserPoolExists(ctx context.Context, n string, v *cognitoidentit } } -func testAccCheckUserPoolNotRecreated(pool1, pool2 *cognitoidentityprovider.UserPoolType) resource.TestCheckFunc { +func testAccCheckUserPoolNotRecreated(pool1, pool2 *awstypes.UserPoolType) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.TimeValue(pool1.CreationDate).Equal(aws.TimeValue(pool2.CreationDate)) { + if !aws.ToTime(pool1.CreationDate).Equal(aws.ToTime(pool2.CreationDate)) { return fmt.Errorf("user pool was recreated. expected: %s, got: %s", pool1.CreationDate, pool2.CreationDate) } return nil @@ -1831,21 +1874,8 @@ func testAccCheckUserPoolNotRecreated(pool1, pool2 *cognitoidentityprovider.User } func testAccPreCheckIdentityProvider(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) - - input := &cognitoidentityprovider.ListUserPoolsInput{ - MaxResults: aws.Int64(1), - } - - _, err := conn.ListUserPoolsWithContext(ctx, input) - - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } + t.Helper() + acctest.PreCheckCognitoIdentityProvider(ctx, t) } func testAccUserPoolSMSConfigurationConfig_base(rName string, externalID string) string { diff --git a/internal/service/cognitoidp/user_pool_ui_customization.go b/internal/service/cognitoidp/user_pool_ui_customization.go index b8eb78ca6e7..d44a5fd0d86 100644 --- a/internal/service/cognitoidp/user_pool_ui_customization.go +++ b/internal/service/cognitoidp/user_pool_ui_customization.go @@ -8,9 +8,9 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -81,7 +81,7 @@ const ( func resourceUserPoolUICustomizationPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) userPoolID, clientID := d.Get(names.AttrUserPoolID).(string), d.Get(names.AttrClientID).(string) id := errs.Must(flex.FlattenResourceId([]string{userPoolID, clientID}, userPoolUICustomizationResourceIDPartCount, false)) @@ -102,7 +102,7 @@ func resourceUserPoolUICustomizationPut(ctx context.Context, d *schema.ResourceD input.ImageFile = v } - _, err := conn.SetUICustomizationWithContext(ctx, input) + _, err := conn.SetUICustomization(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting Cognito User Pool UI Customization (%s): %s", id, err) @@ -115,7 +115,7 @@ func resourceUserPoolUICustomizationPut(ctx context.Context, d *schema.ResourceD func resourceUserPoolUICustomizationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) parts, err := flex.ExpandResourceId(d.Id(), userPoolUICustomizationResourceIDPartCount, false) if err != nil { @@ -136,11 +136,11 @@ func resourceUserPoolUICustomizationRead(ctx context.Context, d *schema.Resource } d.Set(names.AttrClientID, uiCustomization.ClientId) - d.Set(names.AttrCreationDate, aws.TimeValue(uiCustomization.CreationDate).Format(time.RFC3339)) + d.Set(names.AttrCreationDate, aws.ToTime(uiCustomization.CreationDate).Format(time.RFC3339)) d.Set("css", uiCustomization.CSS) d.Set("css_version", uiCustomization.CSSVersion) d.Set("image_url", uiCustomization.ImageUrl) - d.Set("last_modified_date", aws.TimeValue(uiCustomization.LastModifiedDate).Format(time.RFC3339)) + d.Set("last_modified_date", aws.ToTime(uiCustomization.LastModifiedDate).Format(time.RFC3339)) d.Set(names.AttrUserPoolID, uiCustomization.UserPoolId) return diags @@ -148,7 +148,7 @@ func resourceUserPoolUICustomizationRead(ctx context.Context, d *schema.Resource func resourceUserPoolUICustomizationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) parts, err := flex.ExpandResourceId(d.Id(), userPoolUICustomizationResourceIDPartCount, false) if err != nil { @@ -157,12 +157,12 @@ func resourceUserPoolUICustomizationDelete(ctx context.Context, d *schema.Resour userPoolID, clientID := parts[0], parts[1] log.Printf("[DEBUG] Deleting Cognito User Pool UI Customization: %s", d.Id()) - _, err = conn.SetUICustomizationWithContext(ctx, &cognitoidentityprovider.SetUICustomizationInput{ + _, err = conn.SetUICustomization(ctx, &cognitoidentityprovider.SetUICustomizationInput{ ClientId: aws.String(clientID), UserPoolId: aws.String(userPoolID), }) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -173,15 +173,15 @@ func resourceUserPoolUICustomizationDelete(ctx context.Context, d *schema.Resour return diags } -func findUserPoolUICustomizationByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider, userPoolID, clientID string) (*cognitoidentityprovider.UICustomizationType, error) { +func findUserPoolUICustomizationByTwoPartKey(ctx context.Context, conn *cognitoidentityprovider.Client, userPoolID, clientID string) (*awstypes.UICustomizationType, error) { input := &cognitoidentityprovider.GetUICustomizationInput{ ClientId: aws.String(clientID), UserPoolId: aws.String(userPoolID), } - output, err := conn.GetUICustomizationWithContext(ctx, input) + output, err := conn.GetUICustomization(ctx, input) - if tfawserr.ErrCodeEquals(err, cognitoidentityprovider.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/cognitoidp/user_pool_ui_customization_test.go b/internal/service/cognitoidp/user_pool_ui_customization_test.go index d0ccfe717da..4fe00f8e710 100644 --- a/internal/service/cognitoidp/user_pool_ui_customization_test.go +++ b/internal/service/cognitoidp/user_pool_ui_customization_test.go @@ -508,7 +508,7 @@ func TestAccCognitoIDPUserPoolUICustomization_UpdateAllToClient_cSS(t *testing.T func testAccCheckUserPoolUICustomizationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_user_pool_ui_customization" { @@ -539,7 +539,7 @@ func testAccCheckUserPoolUICustomizationExists(ctx context.Context, n string) re return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) _, err := tfcognitoidp.FindUserPoolUICustomizationByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes[names.AttrClientID]) diff --git a/internal/service/cognitoidp/user_pools_data_source.go b/internal/service/cognitoidp/user_pools_data_source.go index c643b56afc5..7a5476d9e0f 100644 --- a/internal/service/cognitoidp/user_pools_data_source.go +++ b/internal/service/cognitoidp/user_pools_data_source.go @@ -5,11 +5,11 @@ package cognitoidp import ( "context" - "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -43,29 +43,25 @@ func dataSourceUserPools() *schema.Resource { func dataSourceUserPoolsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).CognitoIDPConn(ctx) + conn := meta.(*conns.AWSClient).CognitoIDPClient(ctx) - output, err := findUserPoolDescriptionTypes(ctx, conn) + name := d.Get(names.AttrName).(string) + output, err := findUserPoolDescriptionTypesByName(ctx, conn, name) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Cognito User Pools: %s", err) } - name := d.Get(names.AttrName).(string) var arns, userPoolIDs []string for _, v := range output { - if name != aws.StringValue(v.Name) { - continue - } - - userPoolID := aws.StringValue(v.Id) + userPoolID := aws.ToString(v.Id) arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, - Service: cognitoidentityprovider.ServiceName, + Service: "cognito-idp", Region: meta.(*conns.AWSClient).Region, AccountID: meta.(*conns.AWSClient).AccountID, - Resource: fmt.Sprintf("userpool/%s", userPoolID), + Resource: "userpool/" + userPoolID, }.String() userPoolIDs = append(userPoolIDs, userPoolID) @@ -79,28 +75,25 @@ func dataSourceUserPoolsRead(ctx context.Context, d *schema.ResourceData, meta i return diags } -func findUserPoolDescriptionTypes(ctx context.Context, conn *cognitoidentityprovider.CognitoIdentityProvider) ([]*cognitoidentityprovider.UserPoolDescriptionType, error) { +func findUserPoolDescriptionTypesByName(ctx context.Context, conn *cognitoidentityprovider.Client, name string) ([]awstypes.UserPoolDescriptionType, error) { input := &cognitoidentityprovider.ListUserPoolsInput{ - MaxResults: aws.Int64(60), + MaxResults: aws.Int32(60), } - var output []*cognitoidentityprovider.UserPoolDescriptionType + var output []awstypes.UserPoolDescriptionType - err := conn.ListUserPoolsPagesWithContext(ctx, input, func(page *cognitoidentityprovider.ListUserPoolsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := cognitoidentityprovider.NewListUserPoolsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err } for _, v := range page.UserPools { - if v != nil { + if aws.ToString(v.Name) == name { output = append(output, v) } } - - return !lastPage - }) - - if err != nil { - return nil, err } return output, nil diff --git a/internal/service/cognitoidp/user_test.go b/internal/service/cognitoidp/user_test.go index f29d30670c2..a3c791d5c77 100644 --- a/internal/service/cognitoidp/user_test.go +++ b/internal/service/cognitoidp/user_test.go @@ -9,8 +9,9 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider" + awstypes "github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -43,7 +44,7 @@ func TestAccCognitoIDPUser_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "preferred_mfa_setting", ""), resource.TestCheckResourceAttr(resourceName, "mfa_setting_list.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, names.AttrEnabled, acctest.CtTrue), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, cognitoidentityprovider.UserStatusTypeForceChangePassword), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.UserStatusTypeForceChangePassword)), ), }, { @@ -108,7 +109,7 @@ func TestAccCognitoIDPUser_temporaryPassword(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckUserExists(ctx, userResourceName), testAccUserTemporaryPassword(ctx, userResourceName, clientResourceName), - resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, cognitoidentityprovider.UserStatusTypeForceChangePassword), + resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, string(awstypes.UserStatusTypeForceChangePassword)), ), }, { @@ -129,7 +130,7 @@ func TestAccCognitoIDPUser_temporaryPassword(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckUserExists(ctx, userResourceName), testAccUserTemporaryPassword(ctx, userResourceName, clientResourceName), - resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, cognitoidentityprovider.UserStatusTypeForceChangePassword), + resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, string(awstypes.UserStatusTypeForceChangePassword)), ), }, { @@ -137,7 +138,7 @@ func TestAccCognitoIDPUser_temporaryPassword(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckUserExists(ctx, userResourceName), resource.TestCheckResourceAttr(userResourceName, "temporary_password", ""), - resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, cognitoidentityprovider.UserStatusTypeForceChangePassword), + resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, string(awstypes.UserStatusTypeForceChangePassword)), ), }, }, @@ -165,7 +166,7 @@ func TestAccCognitoIDPUser_password(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckUserExists(ctx, userResourceName), testAccUserPassword(ctx, userResourceName, clientResourceName), - resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, cognitoidentityprovider.UserStatusTypeConfirmed), + resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, string(awstypes.UserStatusTypeConfirmed)), ), }, { @@ -186,7 +187,7 @@ func TestAccCognitoIDPUser_password(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckUserExists(ctx, userResourceName), testAccUserPassword(ctx, userResourceName, clientResourceName), - resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, cognitoidentityprovider.UserStatusTypeConfirmed), + resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, string(awstypes.UserStatusTypeConfirmed)), ), }, { @@ -194,7 +195,7 @@ func TestAccCognitoIDPUser_password(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckUserExists(ctx, userResourceName), resource.TestCheckResourceAttr(userResourceName, names.AttrPassword, ""), - resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, cognitoidentityprovider.UserStatusTypeConfirmed), + resource.TestCheckResourceAttr(userResourceName, names.AttrStatus, string(awstypes.UserStatusTypeConfirmed)), ), }, }, @@ -293,6 +294,47 @@ func TestAccCognitoIDPUser_enabled(t *testing.T) { }) } +// https://github.com/hashicorp/terraform-provider-aws/issues/38175. +func TestAccCognitoIDPUser_v5560Regression(t *testing.T) { + ctx := acctest.Context(t) + rUserPoolName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domain := acctest.RandomDomainName() + rUserName := acctest.RandomEmailAddress(domain) + resourceName := "aws_cognito_user.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.CognitoIDPServiceID), + CheckDestroy: testAccCheckUserDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.53.0", + }, + }, + Config: testAccUserConfig_v5560Regression(rUserPoolName, rUserName), + Check: resource.ComposeTestCheckFunc( + testAccCheckUserExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), + resource.TestCheckResourceAttrSet(resourceName, "last_modified_date"), + resource.TestCheckResourceAttrSet(resourceName, "sub"), + resource.TestCheckResourceAttr(resourceName, "preferred_mfa_setting", ""), + resource.TestCheckResourceAttr(resourceName, "mfa_setting_list.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, names.AttrEnabled, acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.UserStatusTypeForceChangePassword)), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccUserConfig_v5560Regression(rUserPoolName, rUserName), + PlanOnly: true, + }, + }, + }) +} + func testAccCheckUserExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -300,11 +342,7 @@ func testAccCheckUserExists(ctx context.Context, n string) resource.TestCheckFun return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Cognito User ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) _, err := tfcognitoidp.FindUserByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrUserPoolID], rs.Primary.Attributes[names.AttrUsername]) @@ -314,7 +352,7 @@ func testAccCheckUserExists(ctx context.Context, n string) resource.TestCheckFun func testAccCheckUserDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_cognito_user" { @@ -338,83 +376,69 @@ func testAccCheckUserDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccUserTemporaryPassword(ctx context.Context, userResName string, clientResName string) resource.TestCheckFunc { +func testAccUserTemporaryPassword(ctx context.Context, userRsName string, clientRsName string) resource.TestCheckFunc { return func(s *terraform.State) error { - userRs, ok := s.RootModule().Resources[userResName] + userRs, ok := s.RootModule().Resources[userRsName] if !ok { - return fmt.Errorf("Not found: %s", userResName) + return fmt.Errorf("Not found: %s", userRsName) } - clientRs, ok := s.RootModule().Resources[clientResName] + clientRs, ok := s.RootModule().Resources[clientRsName] if !ok { - return fmt.Errorf("Not found: %s", clientResName) + return fmt.Errorf("Not found: %s", clientRsName) } - userName := userRs.Primary.Attributes[names.AttrUsername] - userPassword := userRs.Primary.Attributes["temporary_password"] - clientId := clientRs.Primary.Attributes[names.AttrID] + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) - - params := &cognitoidentityprovider.InitiateAuthInput{ - AuthFlow: aws.String(cognitoidentityprovider.AuthFlowTypeUserPasswordAuth), - AuthParameters: map[string]*string{ - "USERNAME": aws.String(userName), - "PASSWORD": aws.String(userPassword), + input := &cognitoidentityprovider.InitiateAuthInput{ + AuthFlow: awstypes.AuthFlowTypeUserPasswordAuth, + AuthParameters: map[string]string{ + "USERNAME": userRs.Primary.Attributes[names.AttrUsername], + "PASSWORD": userRs.Primary.Attributes["temporary_password"], }, - ClientId: aws.String(clientId), + ClientId: aws.String(clientRs.Primary.Attributes[names.AttrID]), } - resp, err := conn.InitiateAuthWithContext(ctx, params) + output, err := conn.InitiateAuth(ctx, input) + if err != nil { return err } - if aws.StringValue(resp.ChallengeName) != cognitoidentityprovider.ChallengeNameTypeNewPasswordRequired { - return errors.New("The password is not a temporary password.") + if output.ChallengeName != awstypes.ChallengeNameTypeNewPasswordRequired { + return errors.New("The password is not a temporary password") } return nil } } -func testAccUserPassword(ctx context.Context, userResName string, clientResName string) resource.TestCheckFunc { +func testAccUserPassword(ctx context.Context, userRsName string, clientRsName string) resource.TestCheckFunc { return func(s *terraform.State) error { - userRs, ok := s.RootModule().Resources[userResName] + userRs, ok := s.RootModule().Resources[userRsName] if !ok { - return fmt.Errorf("Not found: %s", userResName) + return fmt.Errorf("Not found: %s", userRsName) } - clientRs, ok := s.RootModule().Resources[clientResName] + clientRs, ok := s.RootModule().Resources[clientRsName] if !ok { - return fmt.Errorf("Not found: %s", clientResName) + return fmt.Errorf("Not found: %s", clientRsName) } - userName := userRs.Primary.Attributes[names.AttrUsername] - userPassword := userRs.Primary.Attributes[names.AttrPassword] - clientId := clientRs.Primary.Attributes[names.AttrID] + conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) - - params := &cognitoidentityprovider.InitiateAuthInput{ - AuthFlow: aws.String(cognitoidentityprovider.AuthFlowTypeUserPasswordAuth), - AuthParameters: map[string]*string{ - "USERNAME": aws.String(userName), - "PASSWORD": aws.String(userPassword), + input := &cognitoidentityprovider.InitiateAuthInput{ + AuthFlow: awstypes.AuthFlowTypeUserPasswordAuth, + AuthParameters: map[string]string{ + "USERNAME": userRs.Primary.Attributes[names.AttrUsername], + "PASSWORD": userRs.Primary.Attributes[names.AttrPassword], }, - ClientId: aws.String(clientId), + ClientId: aws.String(clientRs.Primary.Attributes[names.AttrID]), } - resp, err := conn.InitiateAuthWithContext(ctx, params) - if err != nil { - return err - } + _, err := conn.InitiateAuth(ctx, input) - if resp.AuthenticationResult == nil { - return errors.New("Authentication has failed.") - } - - return nil + return err } } @@ -628,3 +652,22 @@ resource "aws_cognito_user" "test" { } `, userPoolName, userName, enabled) } + +func testAccUserConfig_v5560Regression(userPoolName string, userName string) string { + return fmt.Sprintf(` +resource "aws_cognito_user_pool" "test" { + name = %[1]q +} + +resource "aws_cognito_user" "test" { + user_pool_id = aws_cognito_user_pool.test.id + username = %[2]q + + attributes = { + "name" = "test" + "email" = %[2]q + "email_verified" = "true" + } +} +`, userPoolName, userName) +} diff --git a/internal/service/comprehend/service_endpoint_resolver_gen.go b/internal/service/comprehend/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..1d65e617e95 --- /dev/null +++ b/internal/service/comprehend/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package comprehend + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + comprehend_sdkv2 "github.com/aws/aws-sdk-go-v2/service/comprehend" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ comprehend_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver comprehend_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: comprehend_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params comprehend_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up comprehend endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*comprehend_sdkv2.Options) { + return func(o *comprehend_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/comprehend/service_endpoints_gen_test.go b/internal/service/comprehend/service_endpoints_gen_test.go index fd36f64bbb8..3bf5c4e3415 100644 --- a/internal/service/comprehend/service_endpoints_gen_test.go +++ b/internal/service/comprehend/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := comprehend_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), comprehend_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := comprehend_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), comprehend_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/comprehend/service_package_gen.go b/internal/service/comprehend/service_package_gen.go index f9d5943fbe5..d2d1a692b49 100644 --- a/internal/service/comprehend/service_package_gen.go +++ b/internal/service/comprehend/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package comprehend @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" comprehend_sdkv2 "github.com/aws/aws-sdk-go-v2/service/comprehend" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -56,19 +55,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*comprehend_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return comprehend_sdkv2.NewFromConfig(cfg, func(o *comprehend_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return comprehend_sdkv2.NewFromConfig(cfg, + comprehend_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/computeoptimizer/service_endpoint_resolver_gen.go b/internal/service/computeoptimizer/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ae76d51e24d --- /dev/null +++ b/internal/service/computeoptimizer/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package computeoptimizer + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + computeoptimizer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/computeoptimizer" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ computeoptimizer_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver computeoptimizer_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: computeoptimizer_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params computeoptimizer_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up computeoptimizer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*computeoptimizer_sdkv2.Options) { + return func(o *computeoptimizer_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/computeoptimizer/service_endpoints_gen_test.go b/internal/service/computeoptimizer/service_endpoints_gen_test.go index 47ef905d6f1..8d1ae0a4fbf 100644 --- a/internal/service/computeoptimizer/service_endpoints_gen_test.go +++ b/internal/service/computeoptimizer/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := computeoptimizer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), computeoptimizer_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := computeoptimizer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), computeoptimizer_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/computeoptimizer/service_package_gen.go b/internal/service/computeoptimizer/service_package_gen.go index 5a2862cf7dc..1fc4ab60df6 100644 --- a/internal/service/computeoptimizer/service_package_gen.go +++ b/internal/service/computeoptimizer/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package computeoptimizer @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" computeoptimizer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/computeoptimizer" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*computeoptimizer_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return computeoptimizer_sdkv2.NewFromConfig(cfg, func(o *computeoptimizer_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return computeoptimizer_sdkv2.NewFromConfig(cfg, + computeoptimizer_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/configservice/service_endpoint_resolver_gen.go b/internal/service/configservice/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..b4c40a3b13c --- /dev/null +++ b/internal/service/configservice/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package configservice + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + configservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/configservice" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ configservice_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver configservice_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: configservice_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params configservice_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up configservice endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*configservice_sdkv2.Options) { + return func(o *configservice_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/configservice/service_endpoints_gen_test.go b/internal/service/configservice/service_endpoints_gen_test.go index 04e52863855..d778d581ca8 100644 --- a/internal/service/configservice/service_endpoints_gen_test.go +++ b/internal/service/configservice/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := configservice_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), configservice_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := configservice_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), configservice_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/configservice/service_package_gen.go b/internal/service/configservice/service_package_gen.go index 861d8e46343..fab6d043788 100644 --- a/internal/service/configservice/service_package_gen.go +++ b/internal/service/configservice/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package configservice @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" configservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/configservice" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -114,19 +113,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*configservice_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return configservice_sdkv2.NewFromConfig(cfg, func(o *configservice_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return configservice_sdkv2.NewFromConfig(cfg, + configservice_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/connect/service_endpoint_resolver_gen.go b/internal/service/connect/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..a01203a7218 --- /dev/null +++ b/internal/service/connect/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package connect + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/connect/service_endpoints_gen_test.go b/internal/service/connect/service_endpoints_gen_test.go index 88dfef7f6f5..5d45b023d71 100644 --- a/internal/service/connect/service_endpoints_gen_test.go +++ b/internal/service/connect/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(connect_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(connect_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/connect/service_package_gen.go b/internal/service/connect/service_package_gen.go index 5b42ea5edd4..ca76a90687b 100644 --- a/internal/service/connect/service_package_gen.go +++ b/internal/service/connect/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package connect @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" connect_sdkv1 "github.com/aws/aws-sdk-go/service/connect" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -222,11 +221,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*c "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return connect_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/connectcases/service_endpoint_resolver_gen.go b/internal/service/connectcases/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ecbe3728fc0 --- /dev/null +++ b/internal/service/connectcases/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package connectcases + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + connectcases_sdkv2 "github.com/aws/aws-sdk-go-v2/service/connectcases" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ connectcases_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver connectcases_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: connectcases_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params connectcases_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up connectcases endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*connectcases_sdkv2.Options) { + return func(o *connectcases_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/connectcases/service_endpoints_gen_test.go b/internal/service/connectcases/service_endpoints_gen_test.go index b8c4b716412..7828b34b8f7 100644 --- a/internal/service/connectcases/service_endpoints_gen_test.go +++ b/internal/service/connectcases/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := connectcases_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), connectcases_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := connectcases_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), connectcases_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/connectcases/service_package_gen.go b/internal/service/connectcases/service_package_gen.go index b1b1b2433fc..71e95edfac4 100644 --- a/internal/service/connectcases/service_package_gen.go +++ b/internal/service/connectcases/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package connectcases @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" connectcases_sdkv2 "github.com/aws/aws-sdk-go-v2/service/connectcases" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*connectcases_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return connectcases_sdkv2.NewFromConfig(cfg, func(o *connectcases_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return connectcases_sdkv2.NewFromConfig(cfg, + connectcases_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/controltower/control.go b/internal/service/controltower/control.go index 53004e4e24c..fda5478d26c 100644 --- a/internal/service/controltower/control.go +++ b/internal/service/controltower/control.go @@ -5,12 +5,14 @@ package controltower import ( "context" + "encoding/json" "errors" "log" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/controltower" + "github.com/aws/aws-sdk-go-v2/service/controltower/document" "github.com/aws/aws-sdk-go-v2/service/controltower/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -23,6 +25,7 @@ import ( tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_controltower_control", name="Control") @@ -30,24 +33,64 @@ func resourceControl() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceControlCreate, ReadWithoutTimeout: resourceControlRead, + UpdateWithoutTimeout: resourceControlUpdate, DeleteWithoutTimeout: resourceControlDelete, Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, + StateContext: func(ctx context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + conn := meta.(*conns.AWSClient).ControlTowerClient(ctx) + + parts, err := flex.ExpandResourceId(d.Id(), controlResourceIDPartCount, false) + if err != nil { + return nil, err + } + + targetIdentifier, controlIdentifier := parts[0], parts[1] + output, err := findEnabledControlByTwoPartKey(ctx, conn, targetIdentifier, controlIdentifier) + if err != nil { + return nil, err + } + + d.Set(names.AttrARN, output.Arn) + + return []*schema.ResourceData{d}, nil + }, }, Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), Delete: schema.DefaultTimeout(60 * time.Minute), }, Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, "control_identifier": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: verify.ValidARN, }, + names.AttrParameters: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidStringIsJSONOrYAML, + }, + }, + }, + }, "target_identifier": { Type: schema.TypeString, Required: true, @@ -71,6 +114,15 @@ func resourceControlCreate(ctx context.Context, d *schema.ResourceData, meta int TargetIdentifier: aws.String(targetIdentifier), } + if v, ok := d.GetOk(names.AttrParameters); ok && v.(*schema.Set).Len() > 0 { + p, err := expandControlParameters(v.(*schema.Set).List()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating ControlTower Control (%s): %s", id, err) + } + + input.Parameters = p + } + output, err := conn.EnableControl(ctx, input) if err != nil { @@ -78,6 +130,7 @@ func resourceControlCreate(ctx context.Context, d *schema.ResourceData, meta int } d.SetId(id) + d.Set(names.AttrARN, output.Arn) if _, err := waitOperationSucceeded(ctx, conn, aws.ToString(output.OperationIdentifier), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ControlTower Control (%s) create: %s", d.Id(), err) @@ -91,13 +144,25 @@ func resourceControlRead(ctx context.Context, d *schema.ResourceData, meta inter conn := meta.(*conns.AWSClient).ControlTowerClient(ctx) - parts, err := flex.ExpandResourceId(d.Id(), controlResourceIDPartCount, false) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) - } + var output *types.EnabledControlDetails + var err error + if v, ok := d.GetOk(names.AttrARN); ok { + output, err = findEnabledControlByARN(ctx, conn, v.(string)) + } else { + // backwards compatibility if ARN is not set from existing state + parts, internalErr := flex.ExpandResourceId(d.Id(), controlResourceIDPartCount, false) + if internalErr != nil { + return sdkdiag.AppendFromErr(diags, err) + } - targetIdentifier, controlIdentifier := parts[0], parts[1] - output, err := findEnabledControlByTwoPartKey(ctx, conn, targetIdentifier, controlIdentifier) + targetIdentifier, controlIdentifier := parts[0], parts[1] + out, internalErr := findEnabledControlByTwoPartKey(ctx, conn, targetIdentifier, controlIdentifier) + if internalErr != nil { + return sdkdiag.AppendErrorf(diags, "reading ControlTower Control (%s): %s", d.Id(), err) + } + + output, err = findEnabledControlByARN(ctx, conn, aws.ToString(out.Arn)) + } if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ControlTower Control %s not found, removing from state", d.Id()) @@ -109,12 +174,51 @@ func resourceControlRead(ctx context.Context, d *schema.ResourceData, meta inter return sdkdiag.AppendErrorf(diags, "reading ControlTower Control (%s): %s", d.Id(), err) } + d.Set(names.AttrARN, output.Arn) d.Set("control_identifier", output.ControlIdentifier) - d.Set("target_identifier", targetIdentifier) + + parameters, err := flattenControlParameters(output.Parameters) + if err != nil { + return sdkdiag.AppendErrorf(diags, "flattening ControlTower Control (%s) parameters: %s", d.Id(), err) + } + + d.Set(names.AttrParameters, parameters) + d.Set("target_identifier", output.TargetIdentifier) return diags } +func resourceControlUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + conn := meta.(*conns.AWSClient).ControlTowerClient(ctx) + + if d.HasChange(names.AttrParameters) { + input := &controltower.UpdateEnabledControlInput{ + EnabledControlIdentifier: aws.String(d.Get(names.AttrARN).(string)), + } + + p, err := expandControlParameters(d.Get(names.AttrParameters).(*schema.Set).List()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating ControlTower Control (%s): %s", d.Id(), err) + } + + input.Parameters = p + + output, err := conn.UpdateEnabledControl(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating ControlTower Control (%s): %s", d.Id(), err) + } + + if _, err := waitOperationSucceeded(ctx, conn, aws.ToString(output.OperationIdentifier), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ControlTower Control (%s) delete: %s", d.Id(), err) + } + } + + return append(diags, resourceControlRead(ctx, d, meta)...) +} + func resourceControlDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics @@ -148,6 +252,77 @@ const ( controlResourceIDPartCount = 2 ) +func expandControlParameters(input []any) ([]types.EnabledControlParameter, error) { + if len(input) == 0 { + return nil, nil + } + + var output []types.EnabledControlParameter + + for _, v := range input { + val := v.(map[string]any) + e := types.EnabledControlParameter{ + Key: aws.String(val[names.AttrKey].(string)), + } + + var out any + err := json.Unmarshal([]byte(val[names.AttrValue].(string)), &out) + if err != nil { + return nil, err + } + + e.Value = document.NewLazyDocument(out) + output = append(output, e) + } + + return output, nil +} + +func flattenControlParameters(input []types.EnabledControlParameterSummary) (*schema.Set, error) { + if len(input) == 0 { + return nil, nil + } + + res := &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, + }, + } + + var output []any + + for _, v := range input { + val := map[string]any{ + names.AttrKey: aws.ToString(v.Key), + } + + var va any + err := v.Value.UnmarshalSmithyDocument(&va) + + if err != nil { + log.Printf("[WARN] Error unmarshalling control parameter value: %s", err) + return nil, err + } + + out, err := json.Marshal(va) + if err != nil { + return nil, err + } + + val[names.AttrValue] = string(out) + output = append(output, val) + } + + return schema.NewSet(schema.HashResource(res), output), nil +} + func findEnabledControlByTwoPartKey(ctx context.Context, conn *controltower.Client, targetIdentifier, controlIdentifier string) (*types.EnabledControlSummary, error) { input := &controltower.ListEnabledControlsInput{ TargetIdentifier: aws.String(targetIdentifier), @@ -197,6 +372,30 @@ func findEnabledControls(ctx context.Context, conn *controltower.Client, input * return output, nil } +func findEnabledControlByARN(ctx context.Context, conn *controltower.Client, arn string) (*types.EnabledControlDetails, error) { + input := &controltower.GetEnabledControlInput{ + EnabledControlIdentifier: aws.String(arn), + } + + output, err := conn.GetEnabledControl(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.EnabledControlDetails == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.EnabledControlDetails, nil +} func findControlOperationByID(ctx context.Context, conn *controltower.Client, id string) (*types.ControlOperation, error) { input := &controltower.GetControlOperationInput{ OperationIdentifier: aws.String(id), diff --git a/internal/service/controltower/control_test.go b/internal/service/controltower/control_test.go index a0ed871d402..4531c115f5f 100644 --- a/internal/service/controltower/control_test.go +++ b/internal/service/controltower/control_test.go @@ -37,6 +37,7 @@ func testAccControl_basic(t *testing.T) { resourceName := "aws_controltower_control.test" controlName := "AWS-GR_EC2_VOLUME_INUSE_CHECK" ouName := "Security" + region := "us-west-2" //lintignore:AWSAT003 resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -49,7 +50,7 @@ func testAccControl_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { - Config: testAccControlConfig_basic(controlName, ouName), + Config: testAccControlConfig_basic(controlName, ouName, region), Check: resource.ComposeTestCheckFunc( testAccCheckControlExists(ctx, resourceName, &control), resource.TestCheckResourceAttrSet(resourceName, "control_identifier"), @@ -65,6 +66,7 @@ func testAccControl_disappears(t *testing.T) { resourceName := "aws_controltower_control.test" controlName := "AWS-GR_EC2_VOLUME_INUSE_CHECK" ouName := "Security" + region := "us-west-2" //lintignore:AWSAT003 resource.Test(t, resource.TestCase{ PreCheck: func() { @@ -77,7 +79,7 @@ func testAccControl_disappears(t *testing.T) { CheckDestroy: testAccCheckControlDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccControlConfig_basic(controlName, ouName), + Config: testAccControlConfig_basic(controlName, ouName, region), Check: resource.ComposeTestCheckFunc( testAccCheckControlExists(ctx, resourceName, &control), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfcontroltower.ResourceControl(), resourceName), @@ -135,7 +137,7 @@ func testAccCheckControlDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccControlConfig_basic(controlName string, ouName string) string { +func testAccControlConfig_basic(controlName, ouName, region string) string { return fmt.Sprintf(` data "aws_region" "current" {} @@ -153,6 +155,11 @@ resource "aws_controltower_control" "test" { for x in data.aws_organizations_organizational_units.test.children : x.arn if x.name == "%[2]s" ][0] + + parameters { + key = "AllowedRegions" + value = jsonencode([%[3]q]) + } } -`, controlName, ouName) +`, controlName, ouName, region) } diff --git a/internal/service/controltower/service_endpoint_resolver_gen.go b/internal/service/controltower/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..845783a2ef3 --- /dev/null +++ b/internal/service/controltower/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package controltower + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + controltower_sdkv2 "github.com/aws/aws-sdk-go-v2/service/controltower" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ controltower_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver controltower_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: controltower_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params controltower_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up controltower endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*controltower_sdkv2.Options) { + return func(o *controltower_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/controltower/service_endpoints_gen_test.go b/internal/service/controltower/service_endpoints_gen_test.go index ade0d064297..4177f22267e 100644 --- a/internal/service/controltower/service_endpoints_gen_test.go +++ b/internal/service/controltower/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := controltower_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), controltower_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := controltower_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), controltower_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/controltower/service_package_gen.go b/internal/service/controltower/service_package_gen.go index 25b7f44a602..f28e6f5de8c 100644 --- a/internal/service/controltower/service_package_gen.go +++ b/internal/service/controltower/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package controltower @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" controltower_sdkv2 "github.com/aws/aws-sdk-go-v2/service/controltower" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -59,19 +58,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*controltower_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return controltower_sdkv2.NewFromConfig(cfg, func(o *controltower_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return controltower_sdkv2.NewFromConfig(cfg, + controltower_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/costoptimizationhub/service_endpoint_resolver_gen.go b/internal/service/costoptimizationhub/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..4b9d334a683 --- /dev/null +++ b/internal/service/costoptimizationhub/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package costoptimizationhub + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + costoptimizationhub_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costoptimizationhub" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ costoptimizationhub_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver costoptimizationhub_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: costoptimizationhub_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params costoptimizationhub_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up costoptimizationhub endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*costoptimizationhub_sdkv2.Options) { + return func(o *costoptimizationhub_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/costoptimizationhub/service_endpoints_gen_test.go b/internal/service/costoptimizationhub/service_endpoints_gen_test.go index f0dadbbb35e..9a5e89bf8ed 100644 --- a/internal/service/costoptimizationhub/service_endpoints_gen_test.go +++ b/internal/service/costoptimizationhub/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -243,24 +245,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := costoptimizationhub_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), costoptimizationhub_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := costoptimizationhub_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), costoptimizationhub_sdkv2.EndpointParameters{ @@ -268,14 +270,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/costoptimizationhub/service_package.go b/internal/service/costoptimizationhub/service_package.go index db43c01421e..54a63e556f6 100644 --- a/internal/service/costoptimizationhub/service_package.go +++ b/internal/service/costoptimizationhub/service_package.go @@ -16,22 +16,20 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*costoptimizationhub.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return costoptimizationhub.NewFromConfig(cfg, func(o *costoptimizationhub.Options) { - if config["partition"].(string) == names.StandardPartitionID { - // Cost Optimization Hub endpoint is available only in us-east-1 Region. - o.Region = names.USEast1RegionID - } - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled + return costoptimizationhub.NewFromConfig(cfg, + costoptimizationhub.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *costoptimizationhub.Options) { + if config["partition"].(string) == names.StandardPartitionID { + // Cost Optimization Hub endpoint is available only in us-east-1 Region. + if cfg.Region != names.USEast1RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USEast1RegionID, + }) + o.Region = names.USEast1RegionID + } } - } - }), nil + }, + ), nil } diff --git a/internal/service/costoptimizationhub/service_package_gen.go b/internal/service/costoptimizationhub/service_package_gen.go index 50f15a9e930..e67ee41129b 100644 --- a/internal/service/costoptimizationhub/service_package_gen.go +++ b/internal/service/costoptimizationhub/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package costoptimizationhub diff --git a/internal/service/cur/service_endpoint_resolver_gen.go b/internal/service/cur/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..25fbdce7659 --- /dev/null +++ b/internal/service/cur/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package cur + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + costandusagereportservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/costandusagereportservice" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ costandusagereportservice_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver costandusagereportservice_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: costandusagereportservice_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params costandusagereportservice_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up costandusagereportservice endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*costandusagereportservice_sdkv2.Options) { + return func(o *costandusagereportservice_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/cur/service_endpoints_gen_test.go b/internal/service/cur/service_endpoints_gen_test.go index fbce07ffabb..9a06ce0c82e 100644 --- a/internal/service/cur/service_endpoints_gen_test.go +++ b/internal/service/cur/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -93,7 +95,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -276,7 +278,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -297,24 +299,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := costandusagereportservice_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), costandusagereportservice_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := costandusagereportservice_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), costandusagereportservice_sdkv2.EndpointParameters{ @@ -322,14 +324,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -410,16 +412,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/cur/service_package.go b/internal/service/cur/service_package.go index 95d5d45ae6f..8ad3ede742d 100644 --- a/internal/service/cur/service_package.go +++ b/internal/service/cur/service_package.go @@ -1,4 +1,5 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 package cur @@ -15,23 +16,21 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*costandusagereportservice.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return costandusagereportservice.NewFromConfig(cfg, func(o *costandusagereportservice.Options) { - if config["partition"].(string) == names.StandardPartitionID { - // AWS Cost and Usage Reports is only available in AWS Commercial us-east-1 Region. - // https://docs.aws.amazon.com/general/latest/gr/billing.html. - o.Region = names.USEast1RegionID - } - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled + return costandusagereportservice.NewFromConfig(cfg, + costandusagereportservice.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *costandusagereportservice.Options) { + if config["partition"].(string) == names.StandardPartitionID { + // AWS Cost and Usage Reports is only available in AWS Commercial us-east-1 Region. + // https://docs.aws.amazon.com/general/latest/gr/billing.html. + if cfg.Region != names.USEast1RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USEast1RegionID, + }) + o.Region = names.USEast1RegionID + } } - } - }), nil + }, + ), nil } diff --git a/internal/service/cur/service_package_gen.go b/internal/service/cur/service_package_gen.go index 94b052a58d7..7f131def9b2 100644 --- a/internal/service/cur/service_package_gen.go +++ b/internal/service/cur/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package cur diff --git a/internal/service/customerprofiles/service_endpoint_resolver_gen.go b/internal/service/customerprofiles/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..fede695c66d --- /dev/null +++ b/internal/service/customerprofiles/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package customerprofiles + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + customerprofiles_sdkv2 "github.com/aws/aws-sdk-go-v2/service/customerprofiles" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ customerprofiles_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver customerprofiles_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: customerprofiles_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params customerprofiles_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up customerprofiles endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*customerprofiles_sdkv2.Options) { + return func(o *customerprofiles_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/customerprofiles/service_endpoints_gen_test.go b/internal/service/customerprofiles/service_endpoints_gen_test.go index 3a22a4566a8..8f2441f6063 100644 --- a/internal/service/customerprofiles/service_endpoints_gen_test.go +++ b/internal/service/customerprofiles/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := customerprofiles_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), customerprofiles_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := customerprofiles_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), customerprofiles_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/customerprofiles/service_package_gen.go b/internal/service/customerprofiles/service_package_gen.go index f018f3595ee..13a4c11bdac 100644 --- a/internal/service/customerprofiles/service_package_gen.go +++ b/internal/service/customerprofiles/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package customerprofiles @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" customerprofiles_sdkv2 "github.com/aws/aws-sdk-go-v2/service/customerprofiles" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -51,19 +50,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*customerprofiles_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return customerprofiles_sdkv2.NewFromConfig(cfg, func(o *customerprofiles_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return customerprofiles_sdkv2.NewFromConfig(cfg, + customerprofiles_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/databrew/README.md b/internal/service/databrew/README.md new file mode 100644 index 00000000000..2561a67d291 --- /dev/null +++ b/internal/service/databrew/README.md @@ -0,0 +1,5 @@ +# Terraform AWS Provider Glue DataBrew Package + +* AWS Provider: [Contribution Guide](https://hashicorp.github.io/terraform-provider-aws/#contribute) +* Service User Guide: [DataZone](https://docs.aws.amazon.com/databrew/latest/dg/what-is.html) +* Service API Guide: [Welcome](https://docs.aws.amazon.com/databrew/latest/dg/api-reference.html) diff --git a/internal/service/databrew/generate.go b/internal/service/databrew/generate.go new file mode 100644 index 00000000000..020cfef9425 --- /dev/null +++ b/internal/service/databrew/generate.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -SkipTypesImp -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/servicepackage/main.go +// ONLY generate directives and package declaration! Do not add anything else to this file. + +package databrew diff --git a/internal/service/databrew/service_endpoint_resolver_gen.go b/internal/service/databrew/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..04cdf699416 --- /dev/null +++ b/internal/service/databrew/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package databrew + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + databrew_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databrew" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ databrew_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver databrew_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: databrew_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params databrew_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up databrew endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*databrew_sdkv2.Options) { + return func(o *databrew_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/databrew/service_endpoints_gen_test.go b/internal/service/databrew/service_endpoints_gen_test.go new file mode 100644 index 00000000000..40956837ba8 --- /dev/null +++ b/internal/service/databrew/service_endpoints_gen_test.go @@ -0,0 +1,690 @@ +// Code generated by internal/generate/serviceendpointtests/main.go; DO NOT EDIT. + +package databrew_test + +import ( + "context" + "errors" + "fmt" + "maps" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + databrew_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databrew" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + terraformsdk "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type endpointTestCase struct { + with []setupFunc + expected caseExpectations +} + +type caseSetup struct { + config map[string]any + configFile configFile + environmentVariables map[string]string +} + +type configFile struct { + baseUrl string + serviceUrl string +} + +type caseExpectations struct { + diags diag.Diagnostics + endpoint string + region string +} + +type apiCallParams struct { + endpoint string + region string +} + +type setupFunc func(setup *caseSetup) + +type callFunc func(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams + +const ( + packageNameConfigEndpoint = "https://packagename-config.endpoint.test/" + awsServiceEnvvarEndpoint = "https://service-envvar.endpoint.test/" + baseEnvvarEndpoint = "https://base-envvar.endpoint.test/" + serviceConfigFileEndpoint = "https://service-configfile.endpoint.test/" + baseConfigFileEndpoint = "https://base-configfile.endpoint.test/" + + aliasName0ConfigEndpoint = "https://aliasname0-config.endpoint.test/" +) + +const ( + packageName = "databrew" + awsEnvVar = "AWS_ENDPOINT_URL_DATABREW" + baseEnvVar = "AWS_ENDPOINT_URL" + configParam = "databrew" + + aliasName0 = "gluedatabrew" +) + +const ( + expectedCallRegion = "us-west-2" //lintignore:AWSAT003 +) + +func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.Setenv + const providerRegion = "us-west-2" //lintignore:AWSAT003 + const expectedEndpointRegion = providerRegion + + testcases := map[string]endpointTestCase{ + "no config": { + with: []setupFunc{withNoConfig}, + expected: expectDefaultEndpoint(t, expectedEndpointRegion), + }, + + // Package name endpoint on Config + + "package name endpoint config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides alias name 0 config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAliasName0EndpointInConfig, + }, + expected: conflictsWith(expectPackageNameConfigEndpoint()), + }, + + "package name endpoint config overrides aws service envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAwsEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides service config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + // Alias name 0 endpoint on Config + + "alias name 0 endpoint config": { + with: []setupFunc{ + withAliasName0EndpointInConfig, + }, + expected: expectAliasName0ConfigEndpoint(), + }, + + "alias name 0 endpoint config overrides aws service envvar": { + with: []setupFunc{ + withAliasName0EndpointInConfig, + withAwsEnvVar, + }, + expected: expectAliasName0ConfigEndpoint(), + }, + + "alias name 0 endpoint config overrides base envvar": { + with: []setupFunc{ + withAliasName0EndpointInConfig, + withBaseEnvVar, + }, + expected: expectAliasName0ConfigEndpoint(), + }, + + "alias name 0 endpoint config overrides service config file": { + with: []setupFunc{ + withAliasName0EndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectAliasName0ConfigEndpoint(), + }, + + "alias name 0 endpoint config overrides base config file": { + with: []setupFunc{ + withAliasName0EndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectAliasName0ConfigEndpoint(), + }, + + // Service endpoint in AWS envvar + + "service aws envvar": { + with: []setupFunc{ + withAwsEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base envvar": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides service config file": { + with: []setupFunc{ + withAwsEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base config file": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + // Base endpoint in envvar + + "base endpoint envvar": { + with: []setupFunc{ + withBaseEnvVar, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides service config file": { + with: []setupFunc{ + withBaseEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides base config file": { + with: []setupFunc{ + withBaseEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + // Service endpoint in config file + + "service config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + "service config file overrides base config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + withBaseEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + // Base endpoint in config file + + "base endpoint config file": { + with: []setupFunc{ + withBaseEndpointInConfigFile, + }, + expected: expectBaseConfigFileEndpoint(), + }, + + // Use FIPS endpoint on Config + + "use fips config": { + with: []setupFunc{ + withUseFIPSInConfig, + }, + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), + }, + + "use fips config with package name endpoint config": { + with: []setupFunc{ + withUseFIPSInConfig, + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + } + + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase + + t.Run(name, func(t *testing.T) { + testEndpointCase(t, providerRegion, testcase, callService) + }) + } +} + +func defaultEndpoint(region string) (url.URL, error) { + r := databrew_sdkv2.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(context.Background(), databrew_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := databrew_sdkv2.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(context.Background(), databrew_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { + t.Helper() + + client := meta.DataBrewClient(ctx) + + var result apiCallParams + + _, err := client.ListProjects(ctx, &databrew_sdkv2.ListProjectsInput{}, + func(opts *databrew_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } + + return result +} + +func withNoConfig(_ *caseSetup) { + // no-op +} + +func withPackageNameEndpointInConfig(setup *caseSetup) { + if _, ok := setup.config[names.AttrEndpoints]; !ok { + setup.config[names.AttrEndpoints] = []any{ + map[string]any{}, + } + } + endpoints := setup.config[names.AttrEndpoints].([]any)[0].(map[string]any) + endpoints[packageName] = packageNameConfigEndpoint +} + +func withAliasName0EndpointInConfig(setup *caseSetup) { + if _, ok := setup.config[names.AttrEndpoints]; !ok { + setup.config[names.AttrEndpoints] = []any{ + map[string]any{}, + } + } + endpoints := setup.config[names.AttrEndpoints].([]any)[0].(map[string]any) + endpoints[aliasName0] = aliasName0ConfigEndpoint +} + +func conflictsWith(e caseExpectations) caseExpectations { + e.diags = append(e.diags, provider.ConflictingEndpointsWarningDiag( + cty.GetAttrPath(names.AttrEndpoints).IndexInt(0), + packageName, + aliasName0, + )) + return e +} + +func withAwsEnvVar(setup *caseSetup) { + setup.environmentVariables[awsEnvVar] = awsServiceEnvvarEndpoint +} + +func withBaseEnvVar(setup *caseSetup) { + setup.environmentVariables[baseEnvVar] = baseEnvvarEndpoint +} + +func withServiceEndpointInConfigFile(setup *caseSetup) { + setup.configFile.serviceUrl = serviceConfigFileEndpoint +} + +func withBaseEndpointInConfigFile(setup *caseSetup) { + setup.configFile.baseUrl = baseConfigFileEndpoint +} + +func withUseFIPSInConfig(setup *caseSetup) { + setup.config["use_fips_endpoint"] = true +} + +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectPackageNameConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: packageNameConfigEndpoint, + region: expectedCallRegion, + } +} + +func expectAliasName0ConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: aliasName0ConfigEndpoint, + region: expectedCallRegion, + } +} + +func expectAwsEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: awsServiceEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectServiceConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: serviceConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, callF callFunc) { + t.Helper() + + ctx := context.Background() + + setup := caseSetup{ + config: map[string]any{}, + environmentVariables: map[string]string{}, + } + + for _, f := range testcase.with { + f(&setup) + } + + config := map[string]any{ + names.AttrAccessKey: servicemocks.MockStaticAccessKey, + names.AttrSecretKey: servicemocks.MockStaticSecretKey, + names.AttrRegion: region, + names.AttrSkipCredentialsValidation: true, + names.AttrSkipRequestingAccountID: true, + } + + maps.Copy(config, setup.config) + + if setup.configFile.baseUrl != "" || setup.configFile.serviceUrl != "" { + config[names.AttrProfile] = "default" + tempDir := t.TempDir() + writeSharedConfigFile(t, &config, tempDir, generateSharedConfigFile(setup.configFile)) + } + + for k, v := range setup.environmentVariables { + t.Setenv(k, v) + } + + p, err := provider.New(ctx) + if err != nil { + t.Fatal(err) + } + + expectedDiags := testcase.expected.diags + expectedDiags = append( + expectedDiags, + errs.NewWarningDiagnostic( + "AWS account ID not found for provider", + "See https://registry.terraform.io/providers/hashicorp/aws/latest/docs#skip_requesting_account_id for implications.", + ), + ) + + diags := p.Configure(ctx, terraformsdk.NewResourceConfigRaw(config)) + + if diff := cmp.Diff(diags, expectedDiags, cmp.Comparer(sdkdiag.Comparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diags.HasError() { + return + } + + meta := p.Meta().(*conns.AWSClient) + + callParams := callF(ctx, t, meta) + + if e, a := testcase.expected.endpoint, callParams.endpoint; e != a { + t.Errorf("expected endpoint %q, got %q", e, a) + } + + if e, a := testcase.expected.region, callParams.region; e != a { + t.Errorf("expected region %q, got %q", e, a) + } +} + +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + +func generateSharedConfigFile(config configFile) string { + var buf strings.Builder + + buf.WriteString(` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`) + if config.baseUrl != "" { + buf.WriteString(fmt.Sprintf("endpoint_url = %s\n", config.baseUrl)) + } + + if config.serviceUrl != "" { + buf.WriteString(fmt.Sprintf(` +services = endpoint-test + +[services endpoint-test] +%[1]s = + endpoint_url = %[2]s +`, configParam, serviceConfigFileEndpoint)) + } + + return buf.String() +} + +func writeSharedConfigFile(t *testing.T, config *map[string]any, tempDir, content string) string { + t.Helper() + + file, err := os.Create(filepath.Join(tempDir, "aws-sdk-go-base-shared-configuration-file")) + if err != nil { + t.Fatalf("creating shared configuration file: %s", err) + } + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf(" writing shared configuration file: %s", err) + } + + if v, ok := (*config)[names.AttrSharedConfigFiles]; !ok { + (*config)[names.AttrSharedConfigFiles] = []any{file.Name()} + } else { + (*config)[names.AttrSharedConfigFiles] = append(v.([]any), file.Name()) + } + + return file.Name() +} diff --git a/internal/service/databrew/service_package_gen.go b/internal/service/databrew/service_package_gen.go new file mode 100644 index 00000000000..84f14e2ae69 --- /dev/null +++ b/internal/service/databrew/service_package_gen.go @@ -0,0 +1,49 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package databrew + +import ( + "context" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + databrew_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databrew" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type servicePackage struct{} + +func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { + return []*types.ServicePackageFrameworkDataSource{} +} + +func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { + return []*types.ServicePackageFrameworkResource{} +} + +func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { + return []*types.ServicePackageSDKDataSource{} +} + +func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { + return []*types.ServicePackageSDKResource{} +} + +func (p *servicePackage) ServicePackageName() string { + return names.DataBrew +} + +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*databrew_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) + + return databrew_sdkv2.NewFromConfig(cfg, + databrew_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil +} + +func ServicePackage(ctx context.Context) conns.ServicePackage { + return &servicePackage{} +} diff --git a/internal/service/databrew/tags_gen.go b/internal/service/databrew/tags_gen.go new file mode 100644 index 00000000000..883c6da820b --- /dev/null +++ b/internal/service/databrew/tags_gen.go @@ -0,0 +1,128 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package databrew + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/databrew" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists databrew service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *databrew.Client, identifier string, optFns ...func(*databrew.Options)) (tftags.KeyValueTags, error) { + input := &databrew.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), err + } + + return KeyValueTags(ctx, output.Tags), nil +} + +// ListTags lists databrew service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).DataBrewClient(ctx), identifier) + + if err != nil { + return err + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// map[string]string handling + +// Tags returns databrew service tags. +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() +} + +// KeyValueTags creates tftags.KeyValueTags from databrew service tags. +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { + return tftags.New(ctx, tags) +} + +// getTagsIn returns databrew service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) map[string]string { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets databrew service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) + } +} + +// updateTags updates databrew service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *databrew.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*databrew.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.DataBrew) + if len(removedTags) > 0 { + input := &databrew.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("untagging resource (%s): %w", identifier, err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.DataBrew) + if len(updatedTags) > 0 { + input := &databrew.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: Tags(updatedTags), + } + + _, err := conn.TagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// UpdateTags updates databrew service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).DataBrewClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/dataexchange/data_set.go b/internal/service/dataexchange/data_set.go index 55bb99d32c5..e8d5397fbc9 100644 --- a/internal/service/dataexchange/data_set.go +++ b/internal/service/dataexchange/data_set.go @@ -7,13 +7,15 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dataexchange" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" + awstypes "github.com/aws/aws-sdk-go-v2/service/dataexchange/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -39,10 +41,10 @@ func ResourceDataSet() *schema.Resource { Computed: true, }, "asset_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(dataexchange.AssetType_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.AssetType](), }, names.AttrDescription: { Type: schema.TypeString, @@ -62,28 +64,28 @@ func ResourceDataSet() *schema.Resource { func resourceDataSetCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataExchangeConn(ctx) + conn := meta.(*conns.AWSClient).DataExchangeClient(ctx) input := &dataexchange.CreateDataSetInput{ Name: aws.String(d.Get(names.AttrName).(string)), - AssetType: aws.String(d.Get("asset_type").(string)), + AssetType: awstypes.AssetType(d.Get("asset_type").(string)), Description: aws.String(d.Get(names.AttrDescription).(string)), Tags: getTagsIn(ctx), } - out, err := conn.CreateDataSetWithContext(ctx, input) + out, err := conn.CreateDataSet(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataExchange DataSet: %s", err) } - d.SetId(aws.StringValue(out.Id)) + d.SetId(aws.ToString(out.Id)) return append(diags, resourceDataSetRead(ctx, d, meta)...) } func resourceDataSetRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataExchangeConn(ctx) + conn := meta.(*conns.AWSClient).DataExchangeClient(ctx) dataSet, err := FindDataSetById(ctx, conn, d.Id()) @@ -109,7 +111,7 @@ func resourceDataSetRead(ctx context.Context, d *schema.ResourceData, meta inter func resourceDataSetUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataExchangeConn(ctx) + conn := meta.(*conns.AWSClient).DataExchangeClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &dataexchange.UpdateDataSetInput{ @@ -125,7 +127,7 @@ func resourceDataSetUpdate(ctx context.Context, d *schema.ResourceData, meta int } log.Printf("[DEBUG] Updating DataExchange DataSet: %s", d.Id()) - _, err := conn.UpdateDataSetWithContext(ctx, input) + _, err := conn.UpdateDataSet(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DataExchange DataSet (%s): %s", d.Id(), err) } @@ -136,16 +138,16 @@ func resourceDataSetUpdate(ctx context.Context, d *schema.ResourceData, meta int func resourceDataSetDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataExchangeConn(ctx) + conn := meta.(*conns.AWSClient).DataExchangeClient(ctx) input := &dataexchange.DeleteDataSetInput{ DataSetId: aws.String(d.Id()), } log.Printf("[DEBUG] Deleting DataExchange DataSet: %s", d.Id()) - _, err := conn.DeleteDataSetWithContext(ctx, input) + _, err := conn.DeleteDataSet(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, dataexchange.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } return sdkdiag.AppendErrorf(diags, "deleting DataExchange DataSet: %s", err) diff --git a/internal/service/dataexchange/data_set_test.go b/internal/service/dataexchange/data_set_test.go index aff8b64c19f..8e7b0068aff 100644 --- a/internal/service/dataexchange/data_set_test.go +++ b/internal/service/dataexchange/data_set_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/dataexchange" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -28,7 +28,7 @@ func TestAccDataExchangeDataSet_basic(t *testing.T) { resourceName := "aws_dataexchange_data_set.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, dataexchange.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.DataExchangeEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.DataExchangeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -68,7 +68,7 @@ func TestAccDataExchangeDataSet_tags(t *testing.T) { resourceName := "aws_dataexchange_data_set.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, dataexchange.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.DataExchangeEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.DataExchangeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -114,7 +114,7 @@ func TestAccDataExchangeDataSet_disappears(t *testing.T) { resourceName := "aws_dataexchange_data_set.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, dataexchange.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.DataExchangeEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.DataExchangeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckDataSetDestroy(ctx), @@ -143,7 +143,7 @@ func testAccCheckDataSetExists(ctx context.Context, n string, v *dataexchange.Ge return fmt.Errorf("No ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataExchangeConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataExchangeClient(ctx) resp, err := tfdataexchange.FindDataSetById(ctx, conn, rs.Primary.ID) if err != nil { return err @@ -160,7 +160,7 @@ func testAccCheckDataSetExists(ctx context.Context, n string, v *dataexchange.Ge func testAccCheckDataSetDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataExchangeConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataExchangeClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_dataexchange_data_set" { diff --git a/internal/service/dataexchange/find.go b/internal/service/dataexchange/find.go index f79453bc560..e9720f06670 100644 --- a/internal/service/dataexchange/find.go +++ b/internal/service/dataexchange/find.go @@ -6,19 +6,20 @@ package dataexchange import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dataexchange" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" + awstypes "github.com/aws/aws-sdk-go-v2/service/dataexchange/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -func FindDataSetById(ctx context.Context, conn *dataexchange.DataExchange, id string) (*dataexchange.GetDataSetOutput, error) { +func FindDataSetById(ctx context.Context, conn *dataexchange.Client, id string) (*dataexchange.GetDataSetOutput, error) { input := &dataexchange.GetDataSetInput{ DataSetId: aws.String(id), } - output, err := conn.GetDataSetWithContext(ctx, input) + output, err := conn.GetDataSet(ctx, input) - if tfawserr.ErrCodeEquals(err, dataexchange.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -32,14 +33,14 @@ func FindDataSetById(ctx context.Context, conn *dataexchange.DataExchange, id st return output, nil } -func FindRevisionById(ctx context.Context, conn *dataexchange.DataExchange, dataSetId, revisionId string) (*dataexchange.GetRevisionOutput, error) { +func FindRevisionById(ctx context.Context, conn *dataexchange.Client, dataSetId, revisionId string) (*dataexchange.GetRevisionOutput, error) { input := &dataexchange.GetRevisionInput{ DataSetId: aws.String(dataSetId), RevisionId: aws.String(revisionId), } - output, err := conn.GetRevisionWithContext(ctx, input) + output, err := conn.GetRevision(ctx, input) - if tfawserr.ErrCodeEquals(err, dataexchange.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/dataexchange/generate.go b/internal/service/dataexchange/generate.go index 3bc2017f691..6ed21003fba 100644 --- a/internal/service/dataexchange/generate.go +++ b/internal/service/dataexchange/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -ListTags -ServiceTagsMap -SkipTypesImp -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/dataexchange/revision.go b/internal/service/dataexchange/revision.go index b91be75b754..1edea240ea0 100644 --- a/internal/service/dataexchange/revision.go +++ b/internal/service/dataexchange/revision.go @@ -9,13 +9,14 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dataexchange" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" + awstypes "github.com/aws/aws-sdk-go-v2/service/dataexchange/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -63,7 +64,7 @@ func ResourceRevision() *schema.Resource { func resourceRevisionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataExchangeConn(ctx) + conn := meta.(*conns.AWSClient).DataExchangeClient(ctx) input := &dataexchange.CreateRevisionInput{ DataSetId: aws.String(d.Get("data_set_id").(string)), @@ -71,19 +72,19 @@ func resourceRevisionCreate(ctx context.Context, d *schema.ResourceData, meta in Tags: getTagsIn(ctx), } - out, err := conn.CreateRevisionWithContext(ctx, input) + out, err := conn.CreateRevision(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataExchange Revision: %s", err) } - d.SetId(fmt.Sprintf("%s:%s", aws.StringValue(out.DataSetId), aws.StringValue(out.Id))) + d.SetId(fmt.Sprintf("%s:%s", aws.ToString(out.DataSetId), aws.ToString(out.Id))) return append(diags, resourceRevisionRead(ctx, d, meta)...) } func resourceRevisionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataExchangeConn(ctx) + conn := meta.(*conns.AWSClient).DataExchangeClient(ctx) dataSetId, revisionId, err := RevisionParseResourceID(d.Id()) if err != nil { @@ -114,7 +115,7 @@ func resourceRevisionRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceRevisionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataExchangeConn(ctx) + conn := meta.(*conns.AWSClient).DataExchangeClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &dataexchange.UpdateRevisionInput{ @@ -127,7 +128,7 @@ func resourceRevisionUpdate(ctx context.Context, d *schema.ResourceData, meta in } log.Printf("[DEBUG] Updating DataExchange Revision: %s", d.Id()) - _, err := conn.UpdateRevisionWithContext(ctx, input) + _, err := conn.UpdateRevision(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DataExchange Revision (%s): %s", d.Id(), err) } @@ -138,7 +139,7 @@ func resourceRevisionUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceRevisionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataExchangeConn(ctx) + conn := meta.(*conns.AWSClient).DataExchangeClient(ctx) input := &dataexchange.DeleteRevisionInput{ RevisionId: aws.String(d.Get("revision_id").(string)), @@ -146,9 +147,9 @@ func resourceRevisionDelete(ctx context.Context, d *schema.ResourceData, meta in } log.Printf("[DEBUG] Deleting DataExchange Revision: %s", d.Id()) - _, err := conn.DeleteRevisionWithContext(ctx, input) + _, err := conn.DeleteRevision(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, dataexchange.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } return sdkdiag.AppendErrorf(diags, "deleting DataExchange Revision: %s", err) diff --git a/internal/service/dataexchange/revision_test.go b/internal/service/dataexchange/revision_test.go index 66b93642c2f..a4fa16f5009 100644 --- a/internal/service/dataexchange/revision_test.go +++ b/internal/service/dataexchange/revision_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/dataexchange" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -27,7 +27,7 @@ func TestAccDataExchangeRevision_basic(t *testing.T) { resourceName := "aws_dataexchange_revision.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, dataexchange.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.DataExchangeEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.DataExchangeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckRevisionDestroy(ctx), @@ -57,7 +57,7 @@ func TestAccDataExchangeRevision_tags(t *testing.T) { resourceName := "aws_dataexchange_revision.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, dataexchange.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.DataExchangeEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.DataExchangeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckRevisionDestroy(ctx), @@ -103,7 +103,7 @@ func TestAccDataExchangeRevision_disappears(t *testing.T) { resourceName := "aws_dataexchange_revision.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, dataexchange.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.DataExchangeEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.DataExchangeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckRevisionDestroy(ctx), @@ -128,7 +128,7 @@ func TestAccDataExchangeRevision_disappears_dataSet(t *testing.T) { resourceName := "aws_dataexchange_revision.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, dataexchange.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.DataExchangeEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.DataExchangeServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckRevisionDestroy(ctx), @@ -157,7 +157,7 @@ func testAccCheckRevisionExists(ctx context.Context, n string, v *dataexchange.G return fmt.Errorf("No ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataExchangeConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataExchangeClient(ctx) dataSetId, revisionId, err := tfdataexchange.RevisionParseResourceID(rs.Primary.ID) if err != nil { @@ -180,7 +180,7 @@ func testAccCheckRevisionExists(ctx context.Context, n string, v *dataexchange.G func testAccCheckRevisionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataExchangeConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataExchangeClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_dataexchange_revision" { diff --git a/internal/service/dataexchange/service_endpoint_resolver_gen.go b/internal/service/dataexchange/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..4f0136469fa --- /dev/null +++ b/internal/service/dataexchange/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package dataexchange + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + dataexchange_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dataexchange" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ dataexchange_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver dataexchange_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: dataexchange_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params dataexchange_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up dataexchange endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*dataexchange_sdkv2.Options) { + return func(o *dataexchange_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/dataexchange/service_endpoints_gen_test.go b/internal/service/dataexchange/service_endpoints_gen_test.go index aceaa5b70f0..e4f07a0660c 100644 --- a/internal/service/dataexchange/service_endpoints_gen_test.go +++ b/internal/service/dataexchange/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package dataexchange_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - dataexchange_sdkv1 "github.com/aws/aws-sdk-go/service/dataexchange" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + dataexchange_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dataexchange" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := dataexchange_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(dataexchange_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), dataexchange_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := dataexchange_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(dataexchange_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), dataexchange_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.DataExchangeConn(ctx) - - req, _ := client.ListDataSetsRequest(&dataexchange_sdkv1.ListDataSetsInput{}) + client := meta.DataExchangeClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListDataSets(ctx, &dataexchange_sdkv2.ListDataSetsInput{}, + func(opts *dataexchange_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/dataexchange/service_package_gen.go b/internal/service/dataexchange/service_package_gen.go index 8ffacc16ec0..08f1aa858f2 100644 --- a/internal/service/dataexchange/service_package_gen.go +++ b/internal/service/dataexchange/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package dataexchange import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - dataexchange_sdkv1 "github.com/aws/aws-sdk-go/service/dataexchange" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + dataexchange_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dataexchange" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -54,25 +51,14 @@ func (p *servicePackage) ServicePackageName() string { return names.DataExchange } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*dataexchange_sdkv1.DataExchange, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*dataexchange_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return dataexchange_sdkv1.New(sess.Copy(&cfg)), nil + return dataexchange_sdkv2.NewFromConfig(cfg, + dataexchange_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/dataexchange/sweep.go b/internal/service/dataexchange/sweep.go index 9b06af24402..45e4259c89c 100644 --- a/internal/service/dataexchange/sweep.go +++ b/internal/service/dataexchange/sweep.go @@ -7,12 +7,12 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dataexchange" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -30,38 +30,36 @@ func sweepDataSets(region string) error { return fmt.Errorf("error getting client: %w", err) } - conn := client.DataExchangeConn(ctx) + conn := client.DataExchangeClient(ctx) sweepResources := make([]sweep.Sweepable, 0) var errs *multierror.Error input := &dataexchange.ListDataSetsInput{} - err = conn.ListDataSetsPagesWithContext(ctx, input, func(page *dataexchange.ListDataSetsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dataexchange.NewListDataSetsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing DataExchange DataSet for %s: %w", region, err)) } for _, dataSet := range page.DataSets { r := ResourceDataSet() d := r.Data(nil) - d.SetId(aws.StringValue(dataSet.Id)) + d.SetId(aws.ToString(dataSet.Id)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error listing DataExchange DataSet for %s: %w", region, err)) } if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { errs = multierror.Append(errs, fmt.Errorf("error sweeping DataExchange DataSet for %s: %w", region, err)) } - if awsv1.SkipSweepError(errs.ErrorOrNil()) { + if awsv2.SkipSweepError(errs.ErrorOrNil()) { log.Printf("[WARN] Skipping DataExchange DataSet sweep for %s: %s", region, errs) return nil } diff --git a/internal/service/dataexchange/tags_gen.go b/internal/service/dataexchange/tags_gen.go index 2b91bfbcf2e..da726edc5c6 100644 --- a/internal/service/dataexchange/tags_gen.go +++ b/internal/service/dataexchange/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/dataexchange" - "github.com/aws/aws-sdk-go/service/dataexchange/dataexchangeiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/dataexchange" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists dataexchange service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn dataexchangeiface.DataExchangeAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *dataexchange.Client, identifier string, optFns ...func(*dataexchange.Options)) (tftags.KeyValueTags, error) { input := &dataexchange.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn dataexchangeiface.DataExchangeAPI, ident // ListTags lists dataexchange service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).DataExchangeConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).DataExchangeClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns dataexchange service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from dataexchange service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns dataexchange service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets dataexchange service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates dataexchange service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn dataexchangeiface.DataExchangeAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *dataexchange.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*dataexchange.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn dataexchangeiface.DataExchangeAPI, ide if len(removedTags) > 0 { input := &dataexchange.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn dataexchangeiface.DataExchangeAPI, ide Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn dataexchangeiface.DataExchangeAPI, ide // UpdateTags updates dataexchange service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).DataExchangeConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).DataExchangeClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/datapipeline/generate.go b/internal/service/datapipeline/generate.go index fcee5d2990e..c2f43790c6e 100644 --- a/internal/service/datapipeline/generate.go +++ b/internal/service/datapipeline/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTagsInIDElem=PipelineId -ServiceTagsSlice -TagOp=AddTags -TagInIDElem=PipelineId -UntagOp=RemoveTags -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTagsInIDElem=PipelineId -ServiceTagsSlice -TagOp=AddTags -TagInIDElem=PipelineId -UntagOp=RemoveTags -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/datapipeline/pipeline.go b/internal/service/datapipeline/pipeline.go index 6a6d1948f2d..3dc0b90a913 100644 --- a/internal/service/datapipeline/pipeline.go +++ b/internal/service/datapipeline/pipeline.go @@ -9,14 +9,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datapipeline" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datapipeline" + awstypes "github.com/aws/aws-sdk-go-v2/service/datapipeline/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -58,7 +59,7 @@ func ResourcePipeline() *schema.Resource { func resourcePipelineCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataPipelineConn(ctx) + conn := meta.(*conns.AWSClient).DataPipelineClient(ctx) uniqueID := id.UniqueId() input := datapipeline.CreatePipelineInput{ @@ -71,23 +72,23 @@ func resourcePipelineCreate(ctx context.Context, d *schema.ResourceData, meta in input.Description = aws.String(v.(string)) } - resp, err := conn.CreatePipelineWithContext(ctx, &input) + resp, err := conn.CreatePipeline(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating datapipeline: %s", err) } - d.SetId(aws.StringValue(resp.PipelineId)) + d.SetId(aws.ToString(resp.PipelineId)) return append(diags, resourcePipelineRead(ctx, d, meta)...) } func resourcePipelineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataPipelineConn(ctx) + conn := meta.(*conns.AWSClient).DataPipelineClient(ctx) v, err := PipelineRetrieve(ctx, d.Id(), conn) - if tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineNotFoundException) || tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineDeletedException) || v == nil { + if errs.IsA[*awstypes.PipelineNotFoundException](err) || errs.IsA[*awstypes.PipelineDeletedException](err) || v == nil { log.Printf("[WARN] DataPipeline (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -114,14 +115,14 @@ func resourcePipelineUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourcePipelineDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataPipelineConn(ctx) + conn := meta.(*conns.AWSClient).DataPipelineClient(ctx) opts := datapipeline.DeletePipelineInput{ PipelineId: aws.String(d.Id()), } - _, err := conn.DeletePipelineWithContext(ctx, &opts) - if tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineNotFoundException) || tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineDeletedException) { + _, err := conn.DeletePipeline(ctx, &opts) + if errs.IsA[*awstypes.PipelineNotFoundException](err) || errs.IsA[*awstypes.PipelineDeletedException](err) { return diags } if err != nil { @@ -134,39 +135,35 @@ func resourcePipelineDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } -func PipelineRetrieve(ctx context.Context, id string, conn *datapipeline.DataPipeline) (*datapipeline.PipelineDescription, error) { +func PipelineRetrieve(ctx context.Context, id string, conn *datapipeline.Client) (*awstypes.PipelineDescription, error) { opts := datapipeline.DescribePipelinesInput{ - PipelineIds: []*string{aws.String(id)}, + PipelineIds: []string{id}, } - resp, err := conn.DescribePipelinesWithContext(ctx, &opts) + resp, err := conn.DescribePipelines(ctx, &opts) if err != nil { return nil, err } - var pipeline *datapipeline.PipelineDescription + var pipeline awstypes.PipelineDescription for _, p := range resp.PipelineDescriptionList { - if p == nil { - continue - } - - if aws.StringValue(p.PipelineId) == id { + if aws.ToString(p.PipelineId) == id { pipeline = p break } } - return pipeline, nil + return &pipeline, nil } -func WaitForDeletion(ctx context.Context, conn *datapipeline.DataPipeline, pipelineID string) error { +func WaitForDeletion(ctx context.Context, conn *datapipeline.Client, pipelineID string) error { params := &datapipeline.DescribePipelinesInput{ - PipelineIds: []*string{aws.String(pipelineID)}, + PipelineIds: []string{pipelineID}, } return retry.RetryContext(ctx, 10*time.Minute, func() *retry.RetryError { - _, err := conn.DescribePipelinesWithContext(ctx, params) - if tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineNotFoundException) || tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineDeletedException) { + _, err := conn.DescribePipelines(ctx, params) + if errs.IsA[*awstypes.PipelineNotFoundException](err) || errs.IsA[*awstypes.PipelineDeletedException](err) { return nil } if err != nil { diff --git a/internal/service/datapipeline/pipeline_data_source.go b/internal/service/datapipeline/pipeline_data_source.go index 419721d1868..6a57525ff6a 100644 --- a/internal/service/datapipeline/pipeline_data_source.go +++ b/internal/service/datapipeline/pipeline_data_source.go @@ -40,7 +40,7 @@ func DataSourcePipeline() *schema.Resource { func dataSourcePipelineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataPipelineConn(ctx) + conn := meta.(*conns.AWSClient).DataPipelineClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig diff --git a/internal/service/datapipeline/pipeline_definition.go b/internal/service/datapipeline/pipeline_definition.go index e690dd0474c..0716ee12885 100644 --- a/internal/service/datapipeline/pipeline_definition.go +++ b/internal/service/datapipeline/pipeline_definition.go @@ -10,14 +10,15 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datapipeline" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datapipeline" + awstypes "github.com/aws/aws-sdk-go-v2/service/datapipeline/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -137,7 +138,7 @@ func ResourcePipelineDefinition() *schema.Resource { func resourcePipelineDefinitionPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataPipelineConn(ctx) + conn := meta.(*conns.AWSClient).DataPipelineClient(ctx) pipelineID := d.Get("pipeline_id").(string) input := &datapipeline.PutPipelineDefinitionInput{ @@ -156,15 +157,15 @@ func resourcePipelineDefinitionPut(ctx context.Context, d *schema.ResourceData, var err error var output *datapipeline.PutPipelineDefinitionOutput err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { - output, err = conn.PutPipelineDefinitionWithContext(ctx, input) + output, err = conn.PutPipelineDefinition(ctx, input) if err != nil { - if tfawserr.ErrCodeEquals(err, datapipeline.ErrCodeInternalServiceError) { + if errs.IsA[*awstypes.InternalServiceError](err) { return retry.RetryableError(err) } return retry.NonRetryableError(err) } - if aws.BoolValue(output.Errored) { + if output.Errored { errors := getValidationError(output.ValidationErrors) if strings.Contains(errors.Error(), names.AttrRole) { return retry.RetryableError(fmt.Errorf("validating after creation DataPipeline Pipeline Definition (%s): %w", pipelineID, errors)) @@ -175,14 +176,14 @@ func resourcePipelineDefinitionPut(ctx context.Context, d *schema.ResourceData, }) if tfresource.TimedOut(err) { - output, err = conn.PutPipelineDefinitionWithContext(ctx, input) + output, err = conn.PutPipelineDefinition(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating DataPipeline Pipeline Definition (%s): %s", pipelineID, err) } - if aws.BoolValue(output.Errored) { + if output.Errored { return sdkdiag.AppendErrorf(diags, "validating after creation DataPipeline Pipeline Definition (%s): %s", pipelineID, getValidationError(output.ValidationErrors)) } @@ -191,7 +192,7 @@ func resourcePipelineDefinitionPut(ctx context.Context, d *schema.ResourceData, PipelineId: aws.String(pipelineID), } - _, err = conn.ActivatePipelineWithContext(ctx, input2) + _, err = conn.ActivatePipeline(ctx, input2) if err != nil { return sdkdiag.AppendErrorf(diags, "activating DataPipeline Pipeline Definition (%s): %s", pipelineID, err) } @@ -204,15 +205,15 @@ func resourcePipelineDefinitionPut(ctx context.Context, d *schema.ResourceData, func resourcePipelineDefinitionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataPipelineConn(ctx) + conn := meta.(*conns.AWSClient).DataPipelineClient(ctx) input := &datapipeline.GetPipelineDefinitionInput{ PipelineId: aws.String(d.Id()), } - resp, err := conn.GetPipelineDefinitionWithContext(ctx, input) + resp, err := conn.GetPipelineDefinition(ctx, input) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineNotFoundException) || - tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineDeletedException) { + if !d.IsNewResource() && errs.IsA[*awstypes.PipelineNotFoundException](err) || + errs.IsA[*awstypes.PipelineDeletedException](err) { log.Printf("[WARN] DataPipeline Pipeline Definition (%s) not found, removing from state", d.Id()) d.SetId("") return diags @@ -226,22 +227,22 @@ func resourcePipelineDefinitionRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "setting `%s` for DataPipeline Pipeline Definition (%s): %s", "parameter_object", d.Id(), err) } if err = d.Set("parameter_value", flattenPipelineDefinitionParameterValues(resp.ParameterValues)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting `%s` for DataPipeline Pipeline Definition (%s): %s", "parameter_object", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "setting `%s` for DataPipeline Pipeline Definition (%s): %s", "parameter_value", d.Id(), err) } if err = d.Set("pipeline_object", flattenPipelineDefinitionObjects(resp.PipelineObjects)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting `%s` for DataPipeline Pipeline Definition (%s): %s", "parameter_object", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "setting `%s` for DataPipeline Pipeline Definition (%s): %s", "pipeline_object", d.Id(), err) } d.Set("pipeline_id", d.Id()) return diags } -func expandPipelineDefinitionParameterObject(tfMap map[string]interface{}) *datapipeline.ParameterObject { +func expandPipelineDefinitionParameterObject(tfMap map[string]interface{}) awstypes.ParameterObject { if tfMap == nil { - return nil + return awstypes.ParameterObject{} } - apiObject := &datapipeline.ParameterObject{ + apiObject := awstypes.ParameterObject{ Attributes: expandPipelineDefinitionParameterAttributes(tfMap["attribute"].(*schema.Set).List()), Id: aws.String(tfMap[names.AttrID].(string)), } @@ -249,12 +250,12 @@ func expandPipelineDefinitionParameterObject(tfMap map[string]interface{}) *data return apiObject } -func expandPipelineDefinitionParameterAttribute(tfMap map[string]interface{}) *datapipeline.ParameterAttribute { +func expandPipelineDefinitionParameterAttribute(tfMap map[string]interface{}) awstypes.ParameterAttribute { if tfMap == nil { - return nil + return awstypes.ParameterAttribute{} } - apiObject := &datapipeline.ParameterAttribute{ + apiObject := awstypes.ParameterAttribute{ Key: aws.String(tfMap[names.AttrKey].(string)), StringValue: aws.String(tfMap["string_value"].(string)), } @@ -262,12 +263,12 @@ func expandPipelineDefinitionParameterAttribute(tfMap map[string]interface{}) *d return apiObject } -func expandPipelineDefinitionParameterAttributes(tfList []interface{}) []*datapipeline.ParameterAttribute { +func expandPipelineDefinitionParameterAttributes(tfList []interface{}) []awstypes.ParameterAttribute { if len(tfList) == 0 { return nil } - var apiObjects []*datapipeline.ParameterAttribute + var apiObjects []awstypes.ParameterAttribute for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -284,12 +285,12 @@ func expandPipelineDefinitionParameterAttributes(tfList []interface{}) []*datapi return apiObjects } -func expandPipelineDefinitionParameterObjects(tfList []interface{}) []*datapipeline.ParameterObject { +func expandPipelineDefinitionParameterObjects(tfList []interface{}) []awstypes.ParameterObject { if len(tfList) == 0 { return nil } - var apiObjects []*datapipeline.ParameterObject + var apiObjects []awstypes.ParameterObject for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -306,31 +307,23 @@ func expandPipelineDefinitionParameterObjects(tfList []interface{}) []*datapipel return apiObjects } -func flattenPipelineDefinitionParameterObject(apiObject *datapipeline.ParameterObject) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenPipelineDefinitionParameterObject(apiObject awstypes.ParameterObject) map[string]interface{} { tfMap := map[string]interface{}{} tfMap["attribute"] = flattenPipelineDefinitionParameterAttributes(apiObject.Attributes) - tfMap[names.AttrID] = aws.StringValue(apiObject.Id) + tfMap[names.AttrID] = aws.ToString(apiObject.Id) return tfMap } -func flattenPipelineDefinitionParameterAttribute(apiObject *datapipeline.ParameterAttribute) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenPipelineDefinitionParameterAttribute(apiObject awstypes.ParameterAttribute) map[string]interface{} { tfMap := map[string]interface{}{} - tfMap[names.AttrKey] = aws.StringValue(apiObject.Key) - tfMap["string_value"] = aws.StringValue(apiObject.StringValue) + tfMap[names.AttrKey] = aws.ToString(apiObject.Key) + tfMap["string_value"] = aws.ToString(apiObject.StringValue) return tfMap } -func flattenPipelineDefinitionParameterAttributes(apiObjects []*datapipeline.ParameterAttribute) []map[string]interface{} { +func flattenPipelineDefinitionParameterAttributes(apiObjects []awstypes.ParameterAttribute) []map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -338,17 +331,13 @@ func flattenPipelineDefinitionParameterAttributes(apiObjects []*datapipeline.Par var tfList []map[string]interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenPipelineDefinitionParameterAttribute(apiObject)) } return tfList } -func flattenPipelineDefinitionParameterObjects(apiObjects []*datapipeline.ParameterObject) []map[string]interface{} { +func flattenPipelineDefinitionParameterObjects(apiObjects []awstypes.ParameterObject) []map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -356,22 +345,18 @@ func flattenPipelineDefinitionParameterObjects(apiObjects []*datapipeline.Parame var tfList []map[string]interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenPipelineDefinitionParameterObject(apiObject)) } return tfList } -func expandPipelineDefinitionParameterValue(tfMap map[string]interface{}) *datapipeline.ParameterValue { +func expandPipelineDefinitionParameterValue(tfMap map[string]interface{}) awstypes.ParameterValue { if tfMap == nil { - return nil + return awstypes.ParameterValue{} } - apiObject := &datapipeline.ParameterValue{ + apiObject := awstypes.ParameterValue{ Id: aws.String(tfMap[names.AttrID].(string)), StringValue: aws.String(tfMap["string_value"].(string)), } @@ -379,12 +364,12 @@ func expandPipelineDefinitionParameterValue(tfMap map[string]interface{}) *datap return apiObject } -func expandPipelineDefinitionParameterValues(tfList []interface{}) []*datapipeline.ParameterValue { +func expandPipelineDefinitionParameterValues(tfList []interface{}) []awstypes.ParameterValue { if len(tfList) == 0 { return nil } - var apiObjects []*datapipeline.ParameterValue + var apiObjects []awstypes.ParameterValue for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -401,19 +386,15 @@ func expandPipelineDefinitionParameterValues(tfList []interface{}) []*datapipeli return apiObjects } -func flattenPipelineDefinitionParameterValue(apiObject *datapipeline.ParameterValue) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenPipelineDefinitionParameterValue(apiObject awstypes.ParameterValue) map[string]interface{} { tfMap := map[string]interface{}{} - tfMap[names.AttrID] = aws.StringValue(apiObject.Id) - tfMap["string_value"] = aws.StringValue(apiObject.StringValue) + tfMap[names.AttrID] = aws.ToString(apiObject.Id) + tfMap["string_value"] = aws.ToString(apiObject.StringValue) return tfMap } -func flattenPipelineDefinitionParameterValues(apiObjects []*datapipeline.ParameterValue) []map[string]interface{} { +func flattenPipelineDefinitionParameterValues(apiObjects []awstypes.ParameterValue) []map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -421,22 +402,18 @@ func flattenPipelineDefinitionParameterValues(apiObjects []*datapipeline.Paramet var tfList []map[string]interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenPipelineDefinitionParameterValue(apiObject)) } return tfList } -func expandPipelineDefinitionObject(tfMap map[string]interface{}) *datapipeline.PipelineObject { +func expandPipelineDefinitionObject(tfMap map[string]interface{}) awstypes.PipelineObject { if tfMap == nil { - return nil + return awstypes.PipelineObject{} } - apiObject := &datapipeline.PipelineObject{ + apiObject := awstypes.PipelineObject{ Fields: expandPipelineDefinitionPipelineFields(tfMap[names.AttrField].(*schema.Set).List()), Id: aws.String(tfMap[names.AttrID].(string)), Name: aws.String(tfMap[names.AttrName].(string)), @@ -445,12 +422,12 @@ func expandPipelineDefinitionObject(tfMap map[string]interface{}) *datapipeline. return apiObject } -func expandPipelineDefinitionPipelineField(tfMap map[string]interface{}) *datapipeline.Field { +func expandPipelineDefinitionPipelineField(tfMap map[string]interface{}) awstypes.Field { if tfMap == nil { - return nil + return awstypes.Field{} } - apiObject := &datapipeline.Field{ + apiObject := awstypes.Field{ Key: aws.String(tfMap[names.AttrKey].(string)), } @@ -464,12 +441,12 @@ func expandPipelineDefinitionPipelineField(tfMap map[string]interface{}) *datapi return apiObject } -func expandPipelineDefinitionPipelineFields(tfList []interface{}) []*datapipeline.Field { +func expandPipelineDefinitionPipelineFields(tfList []interface{}) []awstypes.Field { if len(tfList) == 0 { return nil } - var apiObjects []*datapipeline.Field + var apiObjects []awstypes.Field for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -486,12 +463,12 @@ func expandPipelineDefinitionPipelineFields(tfList []interface{}) []*datapipelin return apiObjects } -func expandPipelineDefinitionObjects(tfList []interface{}) []*datapipeline.PipelineObject { +func expandPipelineDefinitionObjects(tfList []interface{}) []awstypes.PipelineObject { if len(tfList) == 0 { return nil } - var apiObjects []*datapipeline.PipelineObject + var apiObjects []awstypes.PipelineObject for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -508,33 +485,25 @@ func expandPipelineDefinitionObjects(tfList []interface{}) []*datapipeline.Pipel return apiObjects } -func flattenPipelineDefinitionObject(apiObject *datapipeline.PipelineObject) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenPipelineDefinitionObject(apiObject awstypes.PipelineObject) map[string]interface{} { tfMap := map[string]interface{}{} tfMap[names.AttrField] = flattenPipelineDefinitionParameterFields(apiObject.Fields) - tfMap[names.AttrID] = aws.StringValue(apiObject.Id) - tfMap[names.AttrName] = aws.StringValue(apiObject.Name) + tfMap[names.AttrID] = aws.ToString(apiObject.Id) + tfMap[names.AttrName] = aws.ToString(apiObject.Name) return tfMap } -func flattenPipelineDefinitionParameterField(apiObject *datapipeline.Field) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenPipelineDefinitionParameterField(apiObject awstypes.Field) map[string]interface{} { tfMap := map[string]interface{}{} - tfMap[names.AttrKey] = aws.StringValue(apiObject.Key) - tfMap["ref_value"] = aws.StringValue(apiObject.RefValue) - tfMap["string_value"] = aws.StringValue(apiObject.StringValue) + tfMap[names.AttrKey] = aws.ToString(apiObject.Key) + tfMap["ref_value"] = aws.ToString(apiObject.RefValue) + tfMap["string_value"] = aws.ToString(apiObject.StringValue) return tfMap } -func flattenPipelineDefinitionParameterFields(apiObjects []*datapipeline.Field) []map[string]interface{} { +func flattenPipelineDefinitionParameterFields(apiObjects []awstypes.Field) []map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -542,17 +511,13 @@ func flattenPipelineDefinitionParameterFields(apiObjects []*datapipeline.Field) var tfList []map[string]interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenPipelineDefinitionParameterField(apiObject)) } return tfList } -func flattenPipelineDefinitionObjects(apiObjects []*datapipeline.PipelineObject) []map[string]interface{} { +func flattenPipelineDefinitionObjects(apiObjects []awstypes.PipelineObject) []map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -560,21 +525,17 @@ func flattenPipelineDefinitionObjects(apiObjects []*datapipeline.PipelineObject) var tfList []map[string]interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenPipelineDefinitionObject(apiObject)) } return tfList } -func getValidationError(validationErrors []*datapipeline.ValidationError) error { +func getValidationError(validationErrors []awstypes.ValidationError) error { var errs []error for _, err := range validationErrors { - errs = append(errs, fmt.Errorf("id: %s, error: %v", aws.StringValue(err.Id), aws.StringValueSlice(err.Errors))) + errs = append(errs, fmt.Errorf("id: %s, error: %v", aws.ToString(err.Id), err.Errors)) } return errors.Join(errs...) diff --git a/internal/service/datapipeline/pipeline_definition_data_source.go b/internal/service/datapipeline/pipeline_definition_data_source.go index 8adc740a9a7..348af01fe07 100644 --- a/internal/service/datapipeline/pipeline_definition_data_source.go +++ b/internal/service/datapipeline/pipeline_definition_data_source.go @@ -6,8 +6,8 @@ package datapipeline import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datapipeline" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datapipeline" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -116,14 +116,14 @@ func DataSourcePipelineDefinition() *schema.Resource { func dataSourcePipelineDefinitionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DataPipelineConn(ctx) + conn := meta.(*conns.AWSClient).DataPipelineClient(ctx) pipelineID := d.Get("pipeline_id").(string) input := &datapipeline.GetPipelineDefinitionInput{ PipelineId: aws.String(pipelineID), } - resp, err := conn.GetPipelineDefinitionWithContext(ctx, input) + resp, err := conn.GetPipelineDefinition(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "getting DataPipeline Definition (%s): %s", pipelineID, err) diff --git a/internal/service/datapipeline/pipeline_definition_test.go b/internal/service/datapipeline/pipeline_definition_test.go index 2f56006abb1..887b9faa3e7 100644 --- a/internal/service/datapipeline/pipeline_definition_test.go +++ b/internal/service/datapipeline/pipeline_definition_test.go @@ -8,14 +8,15 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datapipeline" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datapipeline" + awstypes "github.com/aws/aws-sdk-go-v2/service/datapipeline/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfdatapipeline "github.com/hashicorp/terraform-provider-aws/internal/service/datapipeline" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -101,9 +102,10 @@ func TestAccDataPipelinePipelineDefinition_complete(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"pipeline_object"}, }, }, }) @@ -116,8 +118,8 @@ func testAccCheckPipelineDefinitionExists(ctx context.Context, resourceName stri return fmt.Errorf("not found: %s", resourceName) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineConn(ctx) - resp, err := conn.GetPipelineDefinitionWithContext(ctx, &datapipeline.GetPipelineDefinitionInput{PipelineId: aws.String(rs.Primary.ID)}) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineClient(ctx) + resp, err := conn.GetPipelineDefinition(ctx, &datapipeline.GetPipelineDefinitionInput{PipelineId: aws.String(rs.Primary.ID)}) if err != nil { return fmt.Errorf("problem checking for DataPipeline Pipeline Definition existence: %w", err) } @@ -134,17 +136,17 @@ func testAccCheckPipelineDefinitionExists(ctx context.Context, resourceName stri func testAccCheckPipelineDefinitionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datapipeline_pipeline_definition" { continue } - resp, err := conn.GetPipelineDefinitionWithContext(ctx, &datapipeline.GetPipelineDefinitionInput{PipelineId: aws.String(rs.Primary.ID)}) + resp, err := conn.GetPipelineDefinition(ctx, &datapipeline.GetPipelineDefinitionInput{PipelineId: aws.String(rs.Primary.ID)}) - if tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineNotFoundException) || - tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineDeletedException) { + if errs.IsA[*awstypes.PipelineNotFoundException](err) || + errs.IsA[*awstypes.PipelineDeletedException](err) { continue } diff --git a/internal/service/datapipeline/pipeline_test.go b/internal/service/datapipeline/pipeline_test.go index 41eeca2ed7c..3bdf58f1227 100644 --- a/internal/service/datapipeline/pipeline_test.go +++ b/internal/service/datapipeline/pipeline_test.go @@ -8,21 +8,22 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datapipeline" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datapipeline" + awstypes "github.com/aws/aws-sdk-go-v2/service/datapipeline/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfdatapipeline "github.com/hashicorp/terraform-provider-aws/internal/service/datapipeline" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccDataPipelinePipeline_basic(t *testing.T) { ctx := acctest.Context(t) - var conf1, conf2 datapipeline.PipelineDescription + var conf1, conf2 awstypes.PipelineDescription rName1 := fmt.Sprintf("tf-datapipeline-%s", sdkacctest.RandString(5)) rName2 := fmt.Sprintf("tf-datapipeline-%s", sdkacctest.RandString(5)) resourceName := "aws_datapipeline_pipeline.default" @@ -59,7 +60,7 @@ func TestAccDataPipelinePipeline_basic(t *testing.T) { func TestAccDataPipelinePipeline_description(t *testing.T) { ctx := acctest.Context(t) - var conf1, conf2 datapipeline.PipelineDescription + var conf1, conf2 awstypes.PipelineDescription rName := fmt.Sprintf("tf-datapipeline-%s", sdkacctest.RandString(5)) resourceName := "aws_datapipeline_pipeline.default" @@ -95,7 +96,7 @@ func TestAccDataPipelinePipeline_description(t *testing.T) { func TestAccDataPipelinePipeline_disappears(t *testing.T) { ctx := acctest.Context(t) - var conf datapipeline.PipelineDescription + var conf awstypes.PipelineDescription rName := fmt.Sprintf("tf-datapipeline-%s", sdkacctest.RandString(5)) resourceName := "aws_datapipeline_pipeline.default" @@ -119,7 +120,7 @@ func TestAccDataPipelinePipeline_disappears(t *testing.T) { func TestAccDataPipelinePipeline_tags(t *testing.T) { ctx := acctest.Context(t) - var conf datapipeline.PipelineDescription + var conf awstypes.PipelineDescription rName := fmt.Sprintf("tf-datapipeline-%s", sdkacctest.RandString(5)) resourceName := "aws_datapipeline_pipeline.default" @@ -167,14 +168,14 @@ func TestAccDataPipelinePipeline_tags(t *testing.T) { }) } -func testAccCheckPipelineDisappears(ctx context.Context, conf *datapipeline.PipelineDescription) resource.TestCheckFunc { +func testAccCheckPipelineDisappears(ctx context.Context, conf *awstypes.PipelineDescription) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineClient(ctx) params := &datapipeline.DeletePipelineInput{ PipelineId: conf.PipelineId, } - _, err := conn.DeletePipelineWithContext(ctx, params) + _, err := conn.DeletePipeline(ctx, params) if err != nil { return err } @@ -184,7 +185,7 @@ func testAccCheckPipelineDisappears(ctx context.Context, conf *datapipeline.Pipe func testAccCheckPipelineDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_datapipeline_pipeline" { @@ -192,9 +193,9 @@ func testAccCheckPipelineDestroy(ctx context.Context) resource.TestCheckFunc { } // Try to find the Pipeline pipelineDescription, err := tfdatapipeline.PipelineRetrieve(ctx, rs.Primary.ID, conn) - if tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineNotFoundException) { + if errs.IsA[*awstypes.PipelineNotFoundException](err) { continue - } else if tfawserr.ErrCodeEquals(err, datapipeline.ErrCodePipelineDeletedException) { + } else if errs.IsA[*awstypes.PipelineDeletedException](err) { continue } @@ -210,7 +211,7 @@ func testAccCheckPipelineDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckPipelineExists(ctx context.Context, n string, v *datapipeline.PipelineDescription) resource.TestCheckFunc { +func testAccCheckPipelineExists(ctx context.Context, n string, v *awstypes.PipelineDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -221,7 +222,7 @@ func testAccCheckPipelineExists(ctx context.Context, n string, v *datapipeline.P return fmt.Errorf("No DataPipeline ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineClient(ctx) pipelineDescription, err := tfdatapipeline.PipelineRetrieve(ctx, rs.Primary.ID, conn) @@ -238,11 +239,11 @@ func testAccCheckPipelineExists(ctx context.Context, n string, v *datapipeline.P } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DataPipelineClient(ctx) input := &datapipeline.ListPipelinesInput{} - _, err := conn.ListPipelinesWithContext(ctx, input) + _, err := conn.ListPipelines(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) @@ -253,9 +254,9 @@ func testAccPreCheck(ctx context.Context, t *testing.T) { } } -func testAccCheckPipelineNotEqual(pipeline1, pipeline2 *datapipeline.PipelineDescription) resource.TestCheckFunc { +func testAccCheckPipelineNotEqual(pipeline1, pipeline2 *awstypes.PipelineDescription) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(pipeline1.PipelineId) == aws.StringValue(pipeline2.PipelineId) { + if aws.ToString(pipeline1.PipelineId) == aws.ToString(pipeline2.PipelineId) { return fmt.Errorf("Pipeline IDs are equal") } diff --git a/internal/service/datapipeline/service_endpoint_resolver_gen.go b/internal/service/datapipeline/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..a5582b9422b --- /dev/null +++ b/internal/service/datapipeline/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package datapipeline + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + datapipeline_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datapipeline" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ datapipeline_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver datapipeline_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: datapipeline_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params datapipeline_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up datapipeline endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*datapipeline_sdkv2.Options) { + return func(o *datapipeline_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/datapipeline/service_endpoints_gen_test.go b/internal/service/datapipeline/service_endpoints_gen_test.go index f0237581313..5c3e4e50560 100644 --- a/internal/service/datapipeline/service_endpoints_gen_test.go +++ b/internal/service/datapipeline/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package datapipeline_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - datapipeline_sdkv1 "github.com/aws/aws-sdk-go/service/datapipeline" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + datapipeline_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datapipeline" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := datapipeline_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(datapipeline_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), datapipeline_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := datapipeline_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(datapipeline_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), datapipeline_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.DataPipelineConn(ctx) - - req, _ := client.ListPipelinesRequest(&datapipeline_sdkv1.ListPipelinesInput{}) + client := meta.DataPipelineClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListPipelines(ctx, &datapipeline_sdkv2.ListPipelinesInput{}, + func(opts *datapipeline_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/datapipeline/service_package_gen.go b/internal/service/datapipeline/service_package_gen.go index c94266e423d..344d312bcb8 100644 --- a/internal/service/datapipeline/service_package_gen.go +++ b/internal/service/datapipeline/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package datapipeline import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - datapipeline_sdkv1 "github.com/aws/aws-sdk-go/service/datapipeline" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + datapipeline_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datapipeline" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -59,25 +56,14 @@ func (p *servicePackage) ServicePackageName() string { return names.DataPipeline } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*datapipeline_sdkv1.DataPipeline, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*datapipeline_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return datapipeline_sdkv1.New(sess.Copy(&cfg)), nil + return datapipeline_sdkv2.NewFromConfig(cfg, + datapipeline_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/datapipeline/tags_gen.go b/internal/service/datapipeline/tags_gen.go index efd17707caa..fc19be310a2 100644 --- a/internal/service/datapipeline/tags_gen.go +++ b/internal/service/datapipeline/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/datapipeline" - "github.com/aws/aws-sdk-go/service/datapipeline/datapipelineiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datapipeline" + awstypes "github.com/aws/aws-sdk-go-v2/service/datapipeline/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,11 +19,11 @@ import ( // []*SERVICE.Tag handling // Tags returns datapipeline service tags. -func Tags(tags tftags.KeyValueTags) []*datapipeline.Tag { - result := make([]*datapipeline.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &datapipeline.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -35,11 +35,11 @@ func Tags(tags tftags.KeyValueTags) []*datapipeline.Tag { } // KeyValueTags creates tftags.KeyValueTags from datapipeline service tags. -func KeyValueTags(ctx context.Context, tags []*datapipeline.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -47,7 +47,7 @@ func KeyValueTags(ctx context.Context, tags []*datapipeline.Tag) tftags.KeyValue // getTagsIn returns datapipeline service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*datapipeline.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -58,7 +58,7 @@ func getTagsIn(ctx context.Context) []*datapipeline.Tag { } // setTagsOut sets datapipeline service tags in Context. -func setTagsOut(ctx context.Context, tags []*datapipeline.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -67,7 +67,7 @@ func setTagsOut(ctx context.Context, tags []*datapipeline.Tag) { // updateTags updates datapipeline service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn datapipelineiface.DataPipelineAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *datapipeline.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*datapipeline.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -78,10 +78,10 @@ func updateTags(ctx context.Context, conn datapipelineiface.DataPipelineAPI, ide if len(removedTags) > 0 { input := &datapipeline.RemoveTagsInput{ PipelineId: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.RemoveTagsWithContext(ctx, input) + _, err := conn.RemoveTags(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -96,7 +96,7 @@ func updateTags(ctx context.Context, conn datapipelineiface.DataPipelineAPI, ide Tags: Tags(updatedTags), } - _, err := conn.AddTagsWithContext(ctx, input) + _, err := conn.AddTags(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -109,5 +109,5 @@ func updateTags(ctx context.Context, conn datapipelineiface.DataPipelineAPI, ide // UpdateTags updates datapipeline service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).DataPipelineConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).DataPipelineClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/datasync/service_endpoint_resolver_gen.go b/internal/service/datasync/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..b2f112f16df --- /dev/null +++ b/internal/service/datasync/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package datasync + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + datasync_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datasync" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ datasync_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver datasync_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: datasync_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params datasync_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up datasync endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*datasync_sdkv2.Options) { + return func(o *datasync_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/datasync/service_endpoints_gen_test.go b/internal/service/datasync/service_endpoints_gen_test.go index 945dfff355f..2c33885b57c 100644 --- a/internal/service/datasync/service_endpoints_gen_test.go +++ b/internal/service/datasync/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := datasync_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), datasync_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := datasync_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), datasync_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/datasync/service_package_gen.go b/internal/service/datasync/service_package_gen.go index 58608bf491d..52a0f8e077f 100644 --- a/internal/service/datasync/service_package_gen.go +++ b/internal/service/datasync/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package datasync @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" datasync_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datasync" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -144,19 +143,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*datasync_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return datasync_sdkv2.NewFromConfig(cfg, func(o *datasync_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return datasync_sdkv2.NewFromConfig(cfg, + datasync_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/datazone/exports_test.go b/internal/service/datazone/exports_test.go index bb4638c851a..58f3e4834fd 100644 --- a/internal/service/datazone/exports_test.go +++ b/internal/service/datazone/exports_test.go @@ -8,4 +8,5 @@ var ( ResourceDomain = newResourceDomain ResourceEnvironmentBlueprintConfiguration = newResourceEnvironmentBlueprintConfiguration IsResourceMissing = isResourceMissing + ResourceProject = newResourceProject ) diff --git a/internal/service/datazone/project.go b/internal/service/datazone/project.go new file mode 100644 index 00000000000..5f627d453d7 --- /dev/null +++ b/internal/service/datazone/project.go @@ -0,0 +1,416 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package datazone + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datazone" + awstypes "github.com/aws/aws-sdk-go-v2/service/datazone/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_datazone_project", name="Project") +func newResourceProject(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &resourceProject{} + r.SetDefaultCreateTimeout(10 * time.Minute) + r.SetDefaultDeleteTimeout(10 * time.Minute) + return r, nil +} + +const ( + ResNameProject = "Project" +) + +type resourceProject struct { + framework.ResourceWithConfigure + framework.WithTimeouts +} + +func (r *resourceProject) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_datazone_project" +} + +func (r *resourceProject) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrDescription: schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.LengthAtMost(2048), + }, + }, + "domain_identifier": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexache.MustCompile(`^dzd[-_][a-zA-Z0-9_-]{1,36}$`), "must conform to: ^dzd[-_][a-zA-Z0-9_-]{1,36}$ "), + }, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + + "glossary_terms": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + + Validators: []validator.List{ + listvalidator.SizeBetween(1, 20), + listvalidator.ValueStringsAre(stringvalidator.RegexMatches(regexache.MustCompile(`^[a-zA-Z0-9_-]{1,36}$`), "must conform to: ^[a-zA-Z0-9_-]{1,36}$ ")), + }, + Optional: true, + }, + + names.AttrName: schema.StringAttribute{ + Validators: []validator.String{ + stringvalidator.RegexMatches(regexache.MustCompile(`^[\w -]+$`), "must conform to: ^[\\w -]+$ "), + stringvalidator.LengthBetween(1, 64), + }, + Required: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + names.AttrID: framework.IDAttribute(), + + names.AttrCreatedAt: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + + "failure_reasons": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[dsProjectDeletionError](ctx), + Computed: true, + }, + + "last_updated_at": schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + }, + "project_status": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.ProjectStatus](), + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "skip_deletion_check": schema.BoolAttribute{ + Optional: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Delete: true, + }), + }, + } +} + +func (r *resourceProject) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + var plan resourceProjectData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return + } + conn := r.Meta().DataZoneClient(ctx) + + in := &datazone.CreateProjectInput{} + resp.Diagnostics.Append(flex.Expand(ctx, plan, in)...) + + if resp.Diagnostics.HasError() { + return + } + + out, err := conn.CreateProject(ctx, in) + if resp.Diagnostics.HasError() { + return + } + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionCreating, ResNameProject, plan.Name.String(), err), + err.Error(), + ) + return + } + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionCreating, ResNameProject, plan.Name.String(), nil), + errors.New("failure when creating").Error(), + ) + return + } + if !(out.FailureReasons == nil) && len(out.FailureReasons) > 0 { + for _, x := range out.FailureReasons { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionCreating, ResNameProject, plan.Name.String(), nil), + errors.New("error message: "+*x.Message+" error code: "+*x.Code).Error(), + ) + } + return + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &plan)...) + if resp.Diagnostics.HasError() { + return + } + createTimeout := r.CreateTimeout(ctx, plan.Timeouts) + _, err = waitProjectCreated(ctx, conn, plan.DomainIdentifier.ValueString(), plan.ID.ValueString(), createTimeout) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionWaitingForCreation, ResNameProject, plan.Name.String(), err), + err.Error(), + ) + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) +} + +func (r *resourceProject) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().DataZoneClient(ctx) + var state resourceProjectData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + out, err := findProjectByID(ctx, conn, state.DomainIdentifier.ValueString(), state.ID.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } + + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionSetting, ResNameProject, state.ID.String(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceProject) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().DataZoneClient(ctx) + + var plan, state resourceProjectData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + if !plan.Description.Equal(state.Description) || !plan.GlossaryTerms.Equal(state.GlossaryTerms) || !plan.Name.Equal(state.Name) { + in := &datazone.UpdateProjectInput{} + resp.Diagnostics.Append(flex.Expand(ctx, plan, in)...) + + if resp.Diagnostics.HasError() { + return + } + in.Identifier = plan.ID.ValueStringPointer() + out, err := conn.UpdateProject(ctx, in) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionUpdating, ResNameProject, plan.ID.String(), err), + err.Error(), + ) + return + } + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionUpdating, ResNameProject, plan.ID.String(), nil), + errors.New("empty output from project update").Error(), + ) + return + } + out.ProjectStatus = "ACTIVE" + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) + if resp.Diagnostics.HasError() { + return + } + } + + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) +} + +func (r *resourceProject) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().DataZoneClient(ctx) + + var state resourceProjectData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } + + in := &datazone.DeleteProjectInput{ + DomainIdentifier: state.DomainIdentifier.ValueStringPointer(), + Identifier: aws.String((*state.ID.ValueStringPointer())), + } + if !state.SkipDeletionCheck.IsNull() { + in.SkipDeletionCheck = state.SkipDeletionCheck.ValueBoolPointer() + } + + _, err := conn.DeleteProject(ctx, in) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) || errs.IsA[*awstypes.AccessDeniedException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionDeleting, ResNameProject, state.ID.String(), err), + err.Error(), + ) + return + } + + deleteTimeout := r.DeleteTimeout(ctx, state.Timeouts) + _, err = waitProjectDeleted(ctx, conn, state.DomainIdentifier.ValueString(), state.ID.ValueString(), deleteTimeout) + + if err != nil && !errs.IsA[*awstypes.AccessDeniedException](err) { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.DataZone, create.ErrActionWaitingForDeletion, ResNameProject, state.ID.String(), err), + err.Error(), + ) + return + } +} + +func (r *resourceProject) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + parts := strings.Split(req.ID, ":") + + if len(parts) != 2 { + resp.Diagnostics.AddError("Resource Import Invalid ID", fmt.Sprintf(`Unexpected format for import ID (%s), use: "DomainIdentifier:Id"`, req.ID)) + } + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("domain_identifier"), parts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), parts[1])...) +} + +func waitProjectCreated(ctx context.Context, conn *datazone.Client, domain string, identifier string, timeout time.Duration) (*datazone.GetProjectOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{}, + Target: enum.Slice[awstypes.ProjectStatus](awstypes.ProjectStatusActive), + Refresh: statusProject(ctx, conn, domain, identifier), + Timeout: timeout, + NotFoundChecks: 40, + ContinuousTargetOccurence: 10, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*datazone.GetProjectOutput); ok { + return out, err + } + + return nil, err +} + +func waitProjectDeleted(ctx context.Context, conn *datazone.Client, domain string, identifier string, timeout time.Duration) (*datazone.GetProjectOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice[awstypes.ProjectStatus](awstypes.ProjectStatusDeleting, awstypes.ProjectStatusActive), + Target: []string{}, + Refresh: statusProject(ctx, conn, domain, identifier), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*datazone.GetProjectOutput); ok { + return out, err + } + + return nil, err +} + +func statusProject(ctx context.Context, conn *datazone.Client, domain string, identifier string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findProjectByID(ctx, conn, domain, identifier) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, aws.ToString((*string)(&out.ProjectStatus)), nil + } +} + +func findProjectByID(ctx context.Context, conn *datazone.Client, domain string, identifier string) (*datazone.GetProjectOutput, error) { + in := &datazone.GetProjectInput{ + DomainIdentifier: aws.String(domain), + Identifier: aws.String(identifier), + } + out, err := conn.GetProject(ctx, in) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) || errs.IsA[*awstypes.AccessDeniedException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + return nil, err + } + + if out == nil || !(out.FailureReasons == nil) { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +type resourceProjectData struct { + Description types.String `tfsdk:"description"` + DomainIdentifier types.String `tfsdk:"domain_identifier"` + Name types.String `tfsdk:"name"` + CreatedBy types.String `tfsdk:"created_by"` + ID types.String `tfsdk:"id"` + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + FailureReasons fwtypes.ListNestedObjectValueOf[dsProjectDeletionError] `tfsdk:"failure_reasons"` + LastUpdatedAt timetypes.RFC3339 `tfsdk:"last_updated_at"` + ProjectStatus fwtypes.StringEnum[awstypes.ProjectStatus] `tfsdk:"project_status"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SkipDeletionCheck types.Bool `tfsdk:"skip_deletion_check"` + GlossaryTerms fwtypes.ListValueOf[types.String] `tfsdk:"glossary_terms"` +} + +type dsProjectDeletionError struct { + Code types.String `tfsdk:"code"` + Message types.String `tfsdk:"message"` +} diff --git a/internal/service/datazone/project_test.go b/internal/service/datazone/project_test.go new file mode 100644 index 00000000000..88374aaa12f --- /dev/null +++ b/internal/service/datazone/project_test.go @@ -0,0 +1,281 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package datazone_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/datazone" + "github.com/aws/aws-sdk-go-v2/service/datazone/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfdatazone "github.com/hashicorp/terraform-provider-aws/internal/service/datazone" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccDataZoneProject_basic(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var project datazone.GetProjectOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_datazone_project.test" + domainName := "aws_datazone_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataZoneServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProjectDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProjectConfig_basic(rName, dName), + Check: resource.ComposeTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &project), + resource.TestCheckResourceAttrPair(resourceName, "domain_identifier", domainName, names.AttrID), + resource.TestCheckResourceAttrSet(resourceName, "glossary_terms.#"), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "desc"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, "created_by"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrID), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_at"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccAuthorizerImportStateIdFunc(resourceName), + ImportStateVerifyIgnore: []string{"skip_deletion_check", "project_status"}, + }, + }, + }) +} +func TestAccDataZoneProject_disappears(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var project datazone.GetProjectOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_datazone_project.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.DataZoneEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.DataZoneServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProjectDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProjectConfig_basic(rName, dName), + Check: resource.ComposeTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &project), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfdatazone.ResourceProject, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} +func testAccCheckProjectDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).DataZoneClient(ctx) + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_datazone_project" { + continue + } + t := rs.Primary.Attributes["domain_identifier"] + + input := &datazone.GetProjectInput{ + DomainIdentifier: &t, + Identifier: aws.String(rs.Primary.ID), + } + _, err := conn.GetProject(ctx, input) + if errs.IsA[*types.AccessDeniedException](err) || errs.IsA[*types.ResourceNotFoundException](err) { + return nil + } + if err != nil { + return create.Error(names.DataZone, create.ErrActionCheckingDestroyed, tfdatazone.ResNameProject, rs.Primary.ID, err) + } + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_datazone_domain" { + continue + } + _, err := conn.DeleteDomain(ctx, &datazone.DeleteDomainInput{ + Identifier: aws.String(rs.Primary.Attributes["domain_identifier"]), + }) + + if err != nil { + return create.Error(names.DataZone, create.ErrActionCheckingDestroyed, tfdatazone.ResNameDomain, rs.Primary.ID, err) + } + } + return nil + } +} + +func testAccCheckProjectExists(ctx context.Context, name string, project *datazone.GetProjectOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.DataZone, create.ErrActionCheckingExistence, tfdatazone.ResNameProject, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.DataZone, create.ErrActionCheckingExistence, tfdatazone.ResNameProject, name, errors.New("not set")) + } + if rs.Primary.Attributes["domain_identifier"] == "" { + return create.Error(names.DataZone, create.ErrActionCheckingExistence, tfdatazone.ResNameProject, name, errors.New("domain identifier not set")) + } + t := rs.Primary.Attributes["domain_identifier"] + conn := acctest.Provider.Meta().(*conns.AWSClient).DataZoneClient(ctx) + resp, err := conn.GetProject(ctx, &datazone.GetProjectInput{ + DomainIdentifier: &t, + Identifier: &rs.Primary.ID, + }) + + if err != nil && !errs.IsA[*types.ResourceNotFoundException](err) { + return create.Error(names.DataZone, create.ErrActionCheckingExistence, tfdatazone.ResNameProject, rs.Primary.ID, err) + } + + *project = *resp + + return nil + } +} + +func testAccCheckProjectNotRecreated(before, after *datazone.GetProjectOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + if before, after := aws.ToString(before.Id), aws.ToString(after.Id); before != after { + return create.Error(names.DataZone, create.ErrActionCheckingNotRecreated, tfdatazone.ResNameProject, before, errors.New("recreated")) + } + + return nil + } +} +func TestAccDataZoneProject_update(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v1, v2 datazone.GetProjectOutput + pName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_datazone_project.test" + domainName := "aws_datazone_domain.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DataZoneServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProjectDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProjectConfig_basic(pName, dName), + Check: resource.ComposeTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v1), + resource.TestCheckResourceAttrPair(resourceName, "domain_identifier", domainName, names.AttrID), + resource.TestCheckResourceAttrSet(resourceName, "glossary_terms.#"), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "desc"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, pName), + resource.TestCheckResourceAttrSet(resourceName, "created_by"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrID), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_at"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccAuthorizerImportStateIdFunc(resourceName), + ImportStateVerifyIgnore: []string{"skip_deletion_check", "project_status"}, + }, + { + Config: testAccProjectConfigBasicUpdate(pName, dName), + Check: resource.ComposeTestCheckFunc( + testAccCheckProjectExists(ctx, resourceName, &v2), + testAccCheckProjectNotRecreated(&v1, &v2), + resource.TestCheckResourceAttrPair(resourceName, "domain_identifier", domainName, names.AttrID), + resource.TestCheckResourceAttrSet(resourceName, "glossary_terms.#"), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, names.AttrDescription), + resource.TestCheckResourceAttr(resourceName, names.AttrName, pName), + resource.TestCheckResourceAttrSet(resourceName, "created_by"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrID), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttrSet(resourceName, "last_updated_at"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateIdFunc: testAccAuthorizerImportStateIdFunc(resourceName), + ImportStateVerifyIgnore: []string{"project_status", "skip_deletion_check"}, + }, + }, + }) +} + +func testAccAuthorizerImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + return fmt.Sprintf("%s:%s", rs.Primary.Attributes["domain_identifier"], rs.Primary.ID), nil + } +} + +func testAccProjectConfig_basic(pName, dName string) string { + return acctest.ConfigCompose(testAccDomainConfig_basic(dName), fmt.Sprintf(` +resource "aws_security_group" "test" { + name = %[1]q +} + +resource "aws_datazone_project" "test" { + domain_identifier = aws_datazone_domain.test.id + glossary_terms = ["2N8w6XJCwZf"] + name = %[1]q + description = "desc" + skip_deletion_check = true +} +`, pName)) +} +func testAccProjectConfigBasicUpdate(pName, dName string) string { + return acctest.ConfigCompose(testAccDomainConfig_basic(dName), fmt.Sprintf(` +resource "aws_security_group" "test" { + name = %[1]q +} + +resource "aws_datazone_project" "test" { + domain_identifier = aws_datazone_domain.test.id + glossary_terms = ["2N8w6XJCwZf"] + name = %[1]q + description = "description" + skip_deletion_check = true +} +`, pName)) +} diff --git a/internal/service/datazone/service_endpoint_resolver_gen.go b/internal/service/datazone/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..189bcbd6331 --- /dev/null +++ b/internal/service/datazone/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package datazone + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + datazone_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datazone" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ datazone_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver datazone_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: datazone_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params datazone_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up datazone endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*datazone_sdkv2.Options) { + return func(o *datazone_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/datazone/service_endpoints_gen_test.go b/internal/service/datazone/service_endpoints_gen_test.go index e794e776047..d9757cb8085 100644 --- a/internal/service/datazone/service_endpoints_gen_test.go +++ b/internal/service/datazone/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := datazone_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), datazone_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := datazone_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), datazone_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/datazone/service_package_gen.go b/internal/service/datazone/service_package_gen.go index b5861812072..a2745588aad 100644 --- a/internal/service/datazone/service_package_gen.go +++ b/internal/service/datazone/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package datazone @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" datazone_sdkv2 "github.com/aws/aws-sdk-go-v2/service/datazone" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -37,6 +36,10 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic Factory: newResourceEnvironmentBlueprintConfiguration, Name: "Environment Blueprint Configuration", }, + { + Factory: newResourceProject, + Name: "Project", + }, } } @@ -56,19 +59,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*datazone_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return datazone_sdkv2.NewFromConfig(cfg, func(o *datazone_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return datazone_sdkv2.NewFromConfig(cfg, + datazone_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/dax/service_endpoint_resolver_gen.go b/internal/service/dax/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..669b613d31d --- /dev/null +++ b/internal/service/dax/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package dax + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + dax_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dax" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ dax_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver dax_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: dax_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params dax_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up dax endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*dax_sdkv2.Options) { + return func(o *dax_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/dax/service_endpoints_gen_test.go b/internal/service/dax/service_endpoints_gen_test.go index 2d117dcd24f..69508d5850a 100644 --- a/internal/service/dax/service_endpoints_gen_test.go +++ b/internal/service/dax/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := dax_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), dax_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := dax_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), dax_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/dax/service_package_gen.go b/internal/service/dax/service_package_gen.go index d6b86d61205..e944d854b4a 100644 --- a/internal/service/dax/service_package_gen.go +++ b/internal/service/dax/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package dax @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" dax_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dax" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -56,19 +55,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*dax_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return dax_sdkv2.NewFromConfig(cfg, func(o *dax_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return dax_sdkv2.NewFromConfig(cfg, + dax_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/deploy/service_endpoint_resolver_gen.go b/internal/service/deploy/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..adff12f265a --- /dev/null +++ b/internal/service/deploy/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package deploy + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + codedeploy_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codedeploy" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ codedeploy_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver codedeploy_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: codedeploy_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params codedeploy_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up codedeploy endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*codedeploy_sdkv2.Options) { + return func(o *codedeploy_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/deploy/service_endpoints_gen_test.go b/internal/service/deploy/service_endpoints_gen_test.go index a8957a49874..8484f47323b 100644 --- a/internal/service/deploy/service_endpoints_gen_test.go +++ b/internal/service/deploy/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := codedeploy_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codedeploy_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := codedeploy_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), codedeploy_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/deploy/service_package_gen.go b/internal/service/deploy/service_package_gen.go index dc50224c8b7..bee170a538f 100644 --- a/internal/service/deploy/service_package_gen.go +++ b/internal/service/deploy/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package deploy @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" codedeploy_sdkv2 "github.com/aws/aws-sdk-go-v2/service/codedeploy" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -61,19 +60,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*codedeploy_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return codedeploy_sdkv2.NewFromConfig(cfg, func(o *codedeploy_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return codedeploy_sdkv2.NewFromConfig(cfg, + codedeploy_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/detective/generate.go b/internal/service/detective/generate.go index 06ef30ccf6e..81b05dad227 100644 --- a/internal/service/detective/generate.go +++ b/internal/service/detective/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ServiceTagsMap -ListTags -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -ListTags -ServiceTagsMap -SkipTypesImp -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/detective/graph.go b/internal/service/detective/graph.go index 716c21679bd..5382a6c9eac 100644 --- a/internal/service/detective/graph.go +++ b/internal/service/detective/graph.go @@ -8,12 +8,13 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/detective" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/detective" + awstypes "github.com/aws/aws-sdk-go-v2/service/detective/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -58,21 +59,21 @@ func resourceGraphCreate(ctx context.Context, d *schema.ResourceData, meta inter const ( timeout = 4 * time.Minute ) - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) input := &detective.CreateGraphInput{ Tags: getTagsIn(ctx), } - outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { - return conn.CreateGraphWithContext(ctx, input) - }, detective.ErrCodeInternalServerException) + outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InternalServerException](ctx, timeout, func() (interface{}, error) { + return conn.CreateGraph(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Detective Graph: %s", err) } - d.SetId(aws.StringValue(outputRaw.(*detective.CreateGraphOutput).GraphArn)) + d.SetId(aws.ToString(outputRaw.(*detective.CreateGraphOutput).GraphArn)) return append(diags, resourceGraphRead(ctx, d, meta)...) } @@ -80,7 +81,7 @@ func resourceGraphCreate(ctx context.Context, d *schema.ResourceData, meta inter func resourceGraphRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) graph, err := FindGraphByARN(ctx, conn, d.Id()) @@ -94,7 +95,7 @@ func resourceGraphRead(ctx context.Context, d *schema.ResourceData, meta interfa return sdkdiag.AppendErrorf(diags, "reading Detective Graph (%s): %s", d.Id(), err) } - d.Set(names.AttrCreatedTime, aws.TimeValue(graph.CreatedTime).Format(time.RFC3339)) + d.Set(names.AttrCreatedTime, aws.ToTime(graph.CreatedTime).Format(time.RFC3339)) d.Set("graph_arn", graph.Arn) return diags @@ -108,14 +109,14 @@ func resourceGraphUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceGraphDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) log.Printf("[DEBUG] Deleting Detective Graph: %s", d.Id()) - _, err := conn.DeleteGraphWithContext(ctx, &detective.DeleteGraphInput{ + _, err := conn.DeleteGraph(ctx, &detective.DeleteGraphInput{ GraphArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, detective.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -126,43 +127,41 @@ func resourceGraphDelete(ctx context.Context, d *schema.ResourceData, meta inter return diags } -func FindGraphByARN(ctx context.Context, conn *detective.Detective, arn string) (*detective.Graph, error) { +func FindGraphByARN(ctx context.Context, conn *detective.Client, arn string) (*awstypes.Graph, error) { input := &detective.ListGraphsInput{} - return findGraph(ctx, conn, input, func(v *detective.Graph) bool { - return aws.StringValue(v.Arn) == arn + return findGraph(ctx, conn, input, func(v awstypes.Graph) bool { + return aws.ToString(v.Arn) == arn }) } -func findGraph(ctx context.Context, conn *detective.Detective, input *detective.ListGraphsInput, filter tfslices.Predicate[*detective.Graph]) (*detective.Graph, error) { +func findGraph(ctx context.Context, conn *detective.Client, input *detective.ListGraphsInput, filter tfslices.Predicate[awstypes.Graph]) (*awstypes.Graph, error) { output, err := findGraphs(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findGraphs(ctx context.Context, conn *detective.Detective, input *detective.ListGraphsInput, filter tfslices.Predicate[*detective.Graph]) ([]*detective.Graph, error) { - var output []*detective.Graph +func findGraphs(ctx context.Context, conn *detective.Client, input *detective.ListGraphsInput, filter tfslices.Predicate[awstypes.Graph]) ([]awstypes.Graph, error) { + var output []awstypes.Graph + + pages := detective.NewListGraphsPaginator(conn, input) - err := conn.ListGraphsPagesWithContext(ctx, input, func(page *detective.ListGraphsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err } for _, v := range page.GraphList { - if v != nil && filter(v) { + if filter(v) { output = append(output, v) } } - - return !lastPage - }) - - if err != nil { - return nil, err } return output, nil diff --git a/internal/service/detective/graph_test.go b/internal/service/detective/graph_test.go index c82fb6646b4..5a82fc4bc88 100644 --- a/internal/service/detective/graph_test.go +++ b/internal/service/detective/graph_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/detective" + awstypes "github.com/aws/aws-sdk-go-v2/service/detective/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -20,7 +20,7 @@ import ( func testAccGraph_basic(t *testing.T) { ctx := acctest.Context(t) - var graph detective.Graph + var graph awstypes.Graph resourceName := "aws_detective_graph.test" resource.Test(t, resource.TestCase{ @@ -47,7 +47,7 @@ func testAccGraph_basic(t *testing.T) { func testAccGraph_disappears(t *testing.T) { ctx := acctest.Context(t) - var graph detective.Graph + var graph awstypes.Graph resourceName := "aws_detective_graph.test" resource.Test(t, resource.TestCase{ @@ -70,7 +70,7 @@ func testAccGraph_disappears(t *testing.T) { func testAccGraph_tags(t *testing.T) { ctx := acctest.Context(t) - var graph detective.Graph + var graph awstypes.Graph resourceName := "aws_detective_graph.test" resource.Test(t, resource.TestCase{ @@ -116,7 +116,7 @@ func testAccGraph_tags(t *testing.T) { func testAccCheckGraphDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_detective_graph" { @@ -140,14 +140,14 @@ func testAccCheckGraphDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckGraphExists(ctx context.Context, n string, v *detective.Graph) resource.TestCheckFunc { +func testAccCheckGraphExists(ctx context.Context, n string, v *awstypes.Graph) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveClient(ctx) output, err := tfdetective.FindGraphByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/detective/invitation_accepter.go b/internal/service/detective/invitation_accepter.go index a449919c556..80bce0bf8d4 100644 --- a/internal/service/detective/invitation_accepter.go +++ b/internal/service/detective/invitation_accepter.go @@ -7,12 +7,13 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/detective" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/detective" + awstypes "github.com/aws/aws-sdk-go-v2/service/detective/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -44,14 +45,14 @@ func ResourceInvitationAccepter() *schema.Resource { func resourceInvitationAccepterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) graphARN := d.Get("graph_arn").(string) input := &detective.AcceptInvitationInput{ GraphArn: aws.String(graphARN), } - _, err := conn.AcceptInvitationWithContext(ctx, input) + _, err := conn.AcceptInvitation(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "accepting Detective Invitation (%s): %s", graphARN, err) @@ -65,7 +66,7 @@ func resourceInvitationAccepterCreate(ctx context.Context, d *schema.ResourceDat func resourceInvitationAccepterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) member, err := FindInvitationByGraphARN(ctx, conn, d.Id()) @@ -87,14 +88,14 @@ func resourceInvitationAccepterRead(ctx context.Context, d *schema.ResourceData, func resourceInvitationAccepterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) log.Printf("[DEBUG] Deleting Detective Invitation Accepter: %s", d.Id()) - _, err := conn.DisassociateMembershipWithContext(ctx, &detective.DisassociateMembershipInput{ + _, err := conn.DisassociateMembership(ctx, &detective.DisassociateMembershipInput{ GraphArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, detective.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -105,43 +106,41 @@ func resourceInvitationAccepterDelete(ctx context.Context, d *schema.ResourceDat return diags } -func FindInvitationByGraphARN(ctx context.Context, conn *detective.Detective, graphARN string) (*detective.MemberDetail, error) { +func FindInvitationByGraphARN(ctx context.Context, conn *detective.Client, graphARN string) (*awstypes.MemberDetail, error) { input := &detective.ListInvitationsInput{} - return findInvitation(ctx, conn, input, func(v *detective.MemberDetail) bool { - return aws.StringValue(v.GraphArn) == graphARN + return findInvitation(ctx, conn, input, func(v awstypes.MemberDetail) bool { + return aws.ToString(v.GraphArn) == graphARN }) } -func findInvitation(ctx context.Context, conn *detective.Detective, input *detective.ListInvitationsInput, filter tfslices.Predicate[*detective.MemberDetail]) (*detective.MemberDetail, error) { +func findInvitation(ctx context.Context, conn *detective.Client, input *detective.ListInvitationsInput, filter tfslices.Predicate[awstypes.MemberDetail]) (*awstypes.MemberDetail, error) { output, err := findInvitations(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findInvitations(ctx context.Context, conn *detective.Detective, input *detective.ListInvitationsInput, filter tfslices.Predicate[*detective.MemberDetail]) ([]*detective.MemberDetail, error) { - var output []*detective.MemberDetail +func findInvitations(ctx context.Context, conn *detective.Client, input *detective.ListInvitationsInput, filter tfslices.Predicate[awstypes.MemberDetail]) ([]awstypes.MemberDetail, error) { + var output []awstypes.MemberDetail - err := conn.ListInvitationsPagesWithContext(ctx, input, func(page *detective.ListInvitationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := detective.NewListInvitationsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err } for _, v := range page.Invitations { - if v != nil && filter(v) { + if filter(v) { output = append(output, v) } } - - return !lastPage - }) - - if err != nil { - return nil, err } return output, nil diff --git a/internal/service/detective/invitation_accepter_test.go b/internal/service/detective/invitation_accepter_test.go index ef588c13587..8056e534d37 100644 --- a/internal/service/detective/invitation_accepter_test.go +++ b/internal/service/detective/invitation_accepter_test.go @@ -53,7 +53,7 @@ func testAccCheckInvitationAccepterExists(ctx context.Context, n string) resourc return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveClient(ctx) _, err := tfdetective.FindInvitationByGraphARN(ctx, conn, rs.Primary.ID) @@ -63,7 +63,7 @@ func testAccCheckInvitationAccepterExists(ctx context.Context, n string) resourc func testAccCheckInvitationAccepterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_detective_invitation_accepter" { diff --git a/internal/service/detective/member.go b/internal/service/detective/member.go index 036a4829724..3544c14a33a 100644 --- a/internal/service/detective/member.go +++ b/internal/service/detective/member.go @@ -10,13 +10,15 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/detective" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/detective" + awstypes "github.com/aws/aws-sdk-go-v2/service/detective/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -94,13 +96,13 @@ func ResourceMember() *schema.Resource { func resourceMemberCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) accountID := d.Get(names.AttrAccountID).(string) graphARN := d.Get("graph_arn").(string) id := memberCreateResourceID(graphARN, accountID) input := &detective.CreateMembersInput{ - Accounts: []*detective.Account{{ + Accounts: []awstypes.Account{{ AccountId: aws.String(accountID), EmailAddress: aws.String(d.Get("email_address").(string)), }}, @@ -108,16 +110,16 @@ func resourceMemberCreate(ctx context.Context, d *schema.ResourceData, meta inte } if v := d.Get("disable_email_notification").(bool); v { - input.DisableEmailNotification = aws.Bool(v) + input.DisableEmailNotification = v } if v, ok := d.GetOk(names.AttrMessage); ok { input.Message = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.CreateMembersWithContext(ctx, input) - }, detective.ErrCodeInternalServerException) + _, err := tfresource.RetryWhenIsA[*awstypes.InternalServerException](ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { + return conn.CreateMembers(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Detective Member (%s): %s", id, err) @@ -135,7 +137,7 @@ func resourceMemberCreate(ctx context.Context, d *schema.ResourceData, meta inte func resourceMemberRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) graphARN, accountID, err := MemberParseResourceID(d.Id()) if err != nil { @@ -159,9 +161,9 @@ func resourceMemberRead(ctx context.Context, d *schema.ResourceData, meta interf d.Set("disabled_reason", member.DisabledReason) d.Set("email_address", member.EmailAddress) d.Set("graph_arn", member.GraphArn) - d.Set("invited_time", aws.TimeValue(member.InvitedTime).Format(time.RFC3339)) + d.Set("invited_time", aws.ToTime(member.InvitedTime).Format(time.RFC3339)) d.Set(names.AttrStatus, member.Status) - d.Set("updated_time", aws.TimeValue(member.UpdatedTime).Format(time.RFC3339)) + d.Set("updated_time", aws.ToTime(member.UpdatedTime).Format(time.RFC3339)) d.Set("volume_usage_in_bytes", member.VolumeUsageInBytes) return diags @@ -170,7 +172,7 @@ func resourceMemberRead(ctx context.Context, d *schema.ResourceData, meta interf func resourceMemberDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) graphARN, accountID, err := MemberParseResourceID(d.Id()) if err != nil { @@ -178,12 +180,12 @@ func resourceMemberDelete(ctx context.Context, d *schema.ResourceData, meta inte } log.Printf("[DEBUG] Deleting Detective Member: %s", d.Id()) - _, err = conn.DeleteMembersWithContext(ctx, &detective.DeleteMembersInput{ - AccountIds: aws.StringSlice([]string{accountID}), + _, err = conn.DeleteMembers(ctx, &detective.DeleteMembersInput{ + AccountIds: []string{accountID}, GraphArn: aws.String(graphARN), }) - if tfawserr.ErrCodeEquals(err, detective.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -213,58 +215,56 @@ func MemberParseResourceID(id string) (string, string, error) { return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected graph_arn%[2]saccount_id", id, memberResourceIDSeparator) } -func FindMemberByGraphByTwoPartKey(ctx context.Context, conn *detective.Detective, graphARN, accountID string) (*detective.MemberDetail, error) { +func FindMemberByGraphByTwoPartKey(ctx context.Context, conn *detective.Client, graphARN, accountID string) (*awstypes.MemberDetail, error) { input := &detective.ListMembersInput{ GraphArn: aws.String(graphARN), } - return findMember(ctx, conn, input, func(v *detective.MemberDetail) bool { - return aws.StringValue(v.AccountId) == accountID + return findMember(ctx, conn, input, func(v awstypes.MemberDetail) bool { + return aws.ToString(v.AccountId) == accountID }) } -func findMember(ctx context.Context, conn *detective.Detective, input *detective.ListMembersInput, filter tfslices.Predicate[*detective.MemberDetail]) (*detective.MemberDetail, error) { +func findMember(ctx context.Context, conn *detective.Client, input *detective.ListMembersInput, filter tfslices.Predicate[awstypes.MemberDetail]) (*awstypes.MemberDetail, error) { output, err := findMembers(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findMembers(ctx context.Context, conn *detective.Detective, input *detective.ListMembersInput, filter tfslices.Predicate[*detective.MemberDetail]) ([]*detective.MemberDetail, error) { - var output []*detective.MemberDetail +func findMembers(ctx context.Context, conn *detective.Client, input *detective.ListMembersInput, filter tfslices.Predicate[awstypes.MemberDetail]) ([]awstypes.MemberDetail, error) { + var output []awstypes.MemberDetail - err := conn.ListMembersPagesWithContext(ctx, input, func(page *detective.ListMembersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := detective.NewListMembersPaginator(conn, input) - for _, v := range page.MemberDetails { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, detective.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.MemberDetails { + if filter(v) { + output = append(output, v) + } + } } return output, nil } -func statusMember(ctx context.Context, conn *detective.Detective, graphARN, adminAccountID string) retry.StateRefreshFunc { +func statusMember(ctx context.Context, conn *detective.Client, graphARN, adminAccountID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindMemberByGraphByTwoPartKey(ctx, conn, graphARN, adminAccountID) @@ -276,24 +276,24 @@ func statusMember(ctx context.Context, conn *detective.Detective, graphARN, admi return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitMemberInvited(ctx context.Context, conn *detective.Detective, graphARN, adminAccountID string) (*detective.MemberDetail, error) { +func waitMemberInvited(ctx context.Context, conn *detective.Client, graphARN, adminAccountID string) (*awstypes.MemberDetail, error) { const ( timeout = 4 * time.Minute ) stateConf := &retry.StateChangeConf{ - Pending: []string{detective.MemberStatusVerificationInProgress}, - Target: []string{detective.MemberStatusInvited}, + Pending: enum.Slice(awstypes.MemberStatusVerificationInProgress), + Target: enum.Slice(awstypes.MemberStatusInvited), Refresh: statusMember(ctx, conn, graphARN, adminAccountID), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*detective.MemberDetail); ok { + if output, ok := outputRaw.(*awstypes.MemberDetail); ok { return output, err } diff --git a/internal/service/detective/member_test.go b/internal/service/detective/member_test.go index 31cb96760b0..05e14d65dd6 100644 --- a/internal/service/detective/member_test.go +++ b/internal/service/detective/member_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/detective" + awstypes "github.com/aws/aws-sdk-go-v2/service/detective/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -20,7 +20,7 @@ import ( func testAccMember_basic(t *testing.T) { ctx := acctest.Context(t) - var detectiveOutput detective.MemberDetail + var detectiveOutput awstypes.MemberDetail resourceName := "aws_detective_member.test" dataSourceAlternate := "data.aws_caller_identity.member" email := testAccMemberFromEnv(t) @@ -42,7 +42,7 @@ func testAccMember_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, names.AttrAccountID, dataSourceAlternate, names.AttrAccountID), acctest.CheckResourceAttrRFC3339(resourceName, "invited_time"), acctest.CheckResourceAttrRFC3339(resourceName, "updated_time"), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, detective.MemberStatusInvited), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.MemberStatusInvited)), ), }, { @@ -58,7 +58,7 @@ func testAccMember_basic(t *testing.T) { func testAccMember_disappears(t *testing.T) { ctx := acctest.Context(t) - var detectiveOutput detective.MemberDetail + var detectiveOutput awstypes.MemberDetail resourceName := "aws_detective_member.test" email := testAccMemberFromEnv(t) @@ -85,7 +85,7 @@ func testAccMember_disappears(t *testing.T) { func testAccMember_message(t *testing.T) { ctx := acctest.Context(t) - var detectiveOutput detective.MemberDetail + var detectiveOutput awstypes.MemberDetail resourceName := "aws_detective_member.test" dataSourceAlternate := "data.aws_caller_identity.member" email := testAccMemberFromEnv(t) @@ -107,7 +107,7 @@ func testAccMember_message(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, names.AttrAccountID, dataSourceAlternate, names.AttrAccountID), acctest.CheckResourceAttrRFC3339(resourceName, "invited_time"), acctest.CheckResourceAttrRFC3339(resourceName, "updated_time"), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, detective.MemberStatusInvited), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.MemberStatusInvited)), ), }, { @@ -120,14 +120,14 @@ func testAccMember_message(t *testing.T) { }) } -func testAccCheckMemberExists(ctx context.Context, n string, v *detective.MemberDetail) resource.TestCheckFunc { +func testAccCheckMemberExists(ctx context.Context, n string, v *awstypes.MemberDetail) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveClient(ctx) graphARN, accountID, err := tfdetective.MemberParseResourceID(rs.Primary.ID) if err != nil { @@ -148,7 +148,7 @@ func testAccCheckMemberExists(ctx context.Context, n string, v *detective.Member func testAccCheckMemberDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_detective_member" { diff --git a/internal/service/detective/organization_admin_account.go b/internal/service/detective/organization_admin_account.go index a4e8da2e7ee..1e0cbbbd385 100644 --- a/internal/service/detective/organization_admin_account.go +++ b/internal/service/detective/organization_admin_account.go @@ -8,13 +8,14 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/detective" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/detective" + awstypes "github.com/aws/aws-sdk-go-v2/service/detective/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -47,14 +48,14 @@ func ResourceOrganizationAdminAccount() *schema.Resource { func resourceOrganizationAdminAccountCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) accountID := d.Get(names.AttrAccountID).(string) input := &detective.EnableOrganizationAdminAccountInput{ AccountId: aws.String(accountID), } - _, err := conn.EnableOrganizationAdminAccountWithContext(ctx, input) + _, err := conn.EnableOrganizationAdminAccount(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "enabling Detective Organization Admin Account (%s): %s", accountID, err) @@ -76,7 +77,7 @@ func resourceOrganizationAdminAccountCreate(ctx context.Context, d *schema.Resou func resourceOrganizationAdminAccountRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) administrator, err := FindOrganizationAdminAccountByAccountID(ctx, conn, d.Id()) @@ -98,11 +99,11 @@ func resourceOrganizationAdminAccountRead(ctx context.Context, d *schema.Resourc func resourceOrganizationAdminAccountDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) - _, err := conn.DisableOrganizationAdminAccountWithContext(ctx, &detective.DisableOrganizationAdminAccountInput{}) + _, err := conn.DisableOrganizationAdminAccount(ctx, &detective.DisableOrganizationAdminAccountInput{}) - if tfawserr.ErrCodeEquals(err, detective.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -121,50 +122,48 @@ func resourceOrganizationAdminAccountDelete(ctx context.Context, d *schema.Resou return diags } -func FindOrganizationAdminAccountByAccountID(ctx context.Context, conn *detective.Detective, accountID string) (*detective.Administrator, error) { +func FindOrganizationAdminAccountByAccountID(ctx context.Context, conn *detective.Client, accountID string) (*awstypes.Administrator, error) { input := &detective.ListOrganizationAdminAccountsInput{} - return findOrganizationAdminAccount(ctx, conn, input, func(v *detective.Administrator) bool { - return aws.StringValue(v.AccountId) == accountID + return findOrganizationAdminAccount(ctx, conn, input, func(v awstypes.Administrator) bool { + return aws.ToString(v.AccountId) == accountID }) } -func findOrganizationAdminAccount(ctx context.Context, conn *detective.Detective, input *detective.ListOrganizationAdminAccountsInput, filter tfslices.Predicate[*detective.Administrator]) (*detective.Administrator, error) { +func findOrganizationAdminAccount(ctx context.Context, conn *detective.Client, input *detective.ListOrganizationAdminAccountsInput, filter tfslices.Predicate[awstypes.Administrator]) (*awstypes.Administrator, error) { output, err := findOrganizationAdminAccounts(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findOrganizationAdminAccounts(ctx context.Context, conn *detective.Detective, input *detective.ListOrganizationAdminAccountsInput, filter tfslices.Predicate[*detective.Administrator]) ([]*detective.Administrator, error) { - var output []*detective.Administrator +func findOrganizationAdminAccounts(ctx context.Context, conn *detective.Client, input *detective.ListOrganizationAdminAccountsInput, filter tfslices.Predicate[awstypes.Administrator]) ([]awstypes.Administrator, error) { + var output []awstypes.Administrator - err := conn.ListOrganizationAdminAccountsPagesWithContext(ctx, input, func(page *detective.ListOrganizationAdminAccountsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := detective.NewListOrganizationAdminAccountsPaginator(conn, input) - for _, v := range page.Administrators { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsAErrorMessageContains[*awstypes.ValidationException](err, "account is not a member of an organization") { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrMessageContains(err, detective.ErrCodeValidationException, "account is not a member of an organization") { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.Administrators { + if filter(v) { + output = append(output, v) + } + } } return output, nil diff --git a/internal/service/detective/organization_admin_account_test.go b/internal/service/detective/organization_admin_account_test.go index 8408eb18940..a3dacc46db9 100644 --- a/internal/service/detective/organization_admin_account_test.go +++ b/internal/service/detective/organization_admin_account_test.go @@ -101,7 +101,7 @@ func testAccOrganizationAdminAccount_MultiRegion(t *testing.T) { func testAccCheckOrganizationAdminAccountDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_detective_organization_admin_account" { @@ -132,7 +132,7 @@ func testAccCheckOrganizationAdminAccountExists(ctx context.Context, n string) r return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DetectiveClient(ctx) _, err := tfdetective.FindOrganizationAdminAccountByAccountID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/detective/organization_configuration.go b/internal/service/detective/organization_configuration.go index de341b33fc8..59d4a3baa1f 100644 --- a/internal/service/detective/organization_configuration.go +++ b/internal/service/detective/organization_configuration.go @@ -6,8 +6,8 @@ package detective import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/detective" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/detective" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -45,15 +45,15 @@ func ResourceOrganizationConfiguration() *schema.Resource { func resourceOrganizationConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) graphARN := d.Get("graph_arn").(string) input := &detective.UpdateOrganizationConfigurationInput{ - AutoEnable: aws.Bool(d.Get("auto_enable").(bool)), + AutoEnable: d.Get("auto_enable").(bool), GraphArn: aws.String(graphARN), } - _, err := conn.UpdateOrganizationConfigurationWithContext(ctx, input) + _, err := conn.UpdateOrganizationConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Detective Organization Configuration (%s): %s", graphARN, err) @@ -69,13 +69,13 @@ func resourceOrganizationConfigurationUpdate(ctx context.Context, d *schema.Reso func resourceOrganizationConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DetectiveConn(ctx) + conn := meta.(*conns.AWSClient).DetectiveClient(ctx) input := &detective.DescribeOrganizationConfigurationInput{ GraphArn: aws.String(d.Id()), } - output, err := conn.DescribeOrganizationConfigurationWithContext(ctx, input) + output, err := conn.DescribeOrganizationConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Detective Organization Configuration (%s): %s", d.Id(), err) diff --git a/internal/service/detective/service_endpoint_resolver_gen.go b/internal/service/detective/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..10e73e73b3f --- /dev/null +++ b/internal/service/detective/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package detective + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + detective_sdkv2 "github.com/aws/aws-sdk-go-v2/service/detective" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ detective_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver detective_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: detective_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params detective_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up detective endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*detective_sdkv2.Options) { + return func(o *detective_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/detective/service_endpoints_gen_test.go b/internal/service/detective/service_endpoints_gen_test.go index 9461bf9fdf9..60838ef9a22 100644 --- a/internal/service/detective/service_endpoints_gen_test.go +++ b/internal/service/detective/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package detective_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - detective_sdkv1 "github.com/aws/aws-sdk-go/service/detective" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + detective_sdkv2 "github.com/aws/aws-sdk-go-v2/service/detective" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := detective_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(detective_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), detective_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := detective_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(detective_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), detective_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.DetectiveConn(ctx) - - req, _ := client.ListGraphsRequest(&detective_sdkv1.ListGraphsInput{}) + client := meta.DetectiveClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListGraphs(ctx, &detective_sdkv2.ListGraphsInput{}, + func(opts *detective_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/detective/service_package_gen.go b/internal/service/detective/service_package_gen.go index f7f45de9f88..62982d92d4f 100644 --- a/internal/service/detective/service_package_gen.go +++ b/internal/service/detective/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package detective import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - detective_sdkv1 "github.com/aws/aws-sdk-go/service/detective" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + detective_sdkv2 "github.com/aws/aws-sdk-go-v2/service/detective" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -62,25 +59,14 @@ func (p *servicePackage) ServicePackageName() string { return names.Detective } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*detective_sdkv1.Detective, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*detective_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return detective_sdkv1.New(sess.Copy(&cfg)), nil + return detective_sdkv2.NewFromConfig(cfg, + detective_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/detective/tags_gen.go b/internal/service/detective/tags_gen.go index 07123082e85..909dfccc944 100644 --- a/internal/service/detective/tags_gen.go +++ b/internal/service/detective/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/detective" - "github.com/aws/aws-sdk-go/service/detective/detectiveiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/detective" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists detective service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn detectiveiface.DetectiveAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *detective.Client, identifier string, optFns ...func(*detective.Options)) (tftags.KeyValueTags, error) { input := &detective.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn detectiveiface.DetectiveAPI, identifier // ListTags lists detective service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).DetectiveConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).DetectiveClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns detective service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from detective service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns detective service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets detective service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates detective service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn detectiveiface.DetectiveAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *detective.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*detective.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn detectiveiface.DetectiveAPI, identifie if len(removedTags) > 0 { input := &detective.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn detectiveiface.DetectiveAPI, identifie Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn detectiveiface.DetectiveAPI, identifie // UpdateTags updates detective service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).DetectiveConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).DetectiveClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/devicefarm/service_endpoint_resolver_gen.go b/internal/service/devicefarm/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..8407250ab33 --- /dev/null +++ b/internal/service/devicefarm/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package devicefarm + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + devicefarm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/devicefarm" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ devicefarm_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver devicefarm_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: devicefarm_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params devicefarm_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up devicefarm endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*devicefarm_sdkv2.Options) { + return func(o *devicefarm_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/devicefarm/service_endpoints_gen_test.go b/internal/service/devicefarm/service_endpoints_gen_test.go index 99b15da2f63..b7e18d3b75f 100644 --- a/internal/service/devicefarm/service_endpoints_gen_test.go +++ b/internal/service/devicefarm/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := devicefarm_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), devicefarm_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := devicefarm_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), devicefarm_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/devicefarm/service_package_gen.go b/internal/service/devicefarm/service_package_gen.go index 31316f5da53..c0aacd111c4 100644 --- a/internal/service/devicefarm/service_package_gen.go +++ b/internal/service/devicefarm/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package devicefarm @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" devicefarm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/devicefarm" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -85,19 +84,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*devicefarm_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return devicefarm_sdkv2.NewFromConfig(cfg, func(o *devicefarm_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return devicefarm_sdkv2.NewFromConfig(cfg, + devicefarm_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/devicefarm/tags_gen.go b/internal/service/devicefarm/tags_gen.go index f4cc29ecabd..af23c0025a0 100644 --- a/internal/service/devicefarm/tags_gen.go +++ b/internal/service/devicefarm/tags_gen.go @@ -98,12 +98,12 @@ func setTagsOut(ctx context.Context, tags []awstypes.Tag) { } // createTags creates devicefarm service tags for new resources. -func createTags(ctx context.Context, conn *devicefarm.Client, identifier string, tags []awstypes.Tag) error { +func createTags(ctx context.Context, conn *devicefarm.Client, identifier string, tags []awstypes.Tag, optFns ...func(*devicefarm.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates devicefarm service tags. diff --git a/internal/service/devopsguru/service_endpoint_resolver_gen.go b/internal/service/devopsguru/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..7d75a9544c1 --- /dev/null +++ b/internal/service/devopsguru/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package devopsguru + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + devopsguru_sdkv2 "github.com/aws/aws-sdk-go-v2/service/devopsguru" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ devopsguru_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver devopsguru_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: devopsguru_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params devopsguru_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up devopsguru endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*devopsguru_sdkv2.Options) { + return func(o *devopsguru_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/devopsguru/service_endpoints_gen_test.go b/internal/service/devopsguru/service_endpoints_gen_test.go index d1d0eaeb12c..c954744b265 100644 --- a/internal/service/devopsguru/service_endpoints_gen_test.go +++ b/internal/service/devopsguru/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := devopsguru_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), devopsguru_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := devopsguru_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), devopsguru_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/devopsguru/service_package_gen.go b/internal/service/devopsguru/service_package_gen.go index a1ea8470104..013da856af1 100644 --- a/internal/service/devopsguru/service_package_gen.go +++ b/internal/service/devopsguru/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package devopsguru @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" devopsguru_sdkv2 "github.com/aws/aws-sdk-go-v2/service/devopsguru" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -65,19 +64,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*devopsguru_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return devopsguru_sdkv2.NewFromConfig(cfg, func(o *devopsguru_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return devopsguru_sdkv2.NewFromConfig(cfg, + devopsguru_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/directconnect/lag.go b/internal/service/directconnect/lag.go index 7bd1ddd1264..aceea99bf8f 100644 --- a/internal/service/directconnect/lag.go +++ b/internal/service/directconnect/lag.go @@ -201,6 +201,10 @@ func resourceLagDelete(ctx context.Context, d *schema.ResourceData, meta interfa return diags } + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting Direct Connect LAG (%s): listing connections: %s", d.Id(), err) + } + for _, connection := range lag.Connections { if err := deleteConnection(ctx, conn, aws.StringValue(connection.ConnectionId), waitConnectionDeleted); err != nil { return sdkdiag.AppendFromErr(diags, err) diff --git a/internal/service/directconnect/service_endpoint_resolver_gen.go b/internal/service/directconnect/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..8445f971931 --- /dev/null +++ b/internal/service/directconnect/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package directconnect + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/directconnect/service_endpoints_gen_test.go b/internal/service/directconnect/service_endpoints_gen_test.go index 054e9411b83..910a8e7822f 100644 --- a/internal/service/directconnect/service_endpoints_gen_test.go +++ b/internal/service/directconnect/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(directconnect_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(directconnect_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/directconnect/service_package_gen.go b/internal/service/directconnect/service_package_gen.go index a65a5a64097..aa7c0014dfc 100644 --- a/internal/service/directconnect/service_package_gen.go +++ b/internal/service/directconnect/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package directconnect @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" directconnect_sdkv1 "github.com/aws/aws-sdk-go/service/directconnect" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -178,11 +177,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*d "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return directconnect_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/dlm/service_endpoint_resolver_gen.go b/internal/service/dlm/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..d2ecec96a2c --- /dev/null +++ b/internal/service/dlm/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package dlm + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + dlm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dlm" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ dlm_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver dlm_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: dlm_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params dlm_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up dlm endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*dlm_sdkv2.Options) { + return func(o *dlm_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/dlm/service_endpoints_gen_test.go b/internal/service/dlm/service_endpoints_gen_test.go index ebd47c83bda..63e66178022 100644 --- a/internal/service/dlm/service_endpoints_gen_test.go +++ b/internal/service/dlm/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := dlm_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), dlm_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := dlm_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), dlm_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/dlm/service_package_gen.go b/internal/service/dlm/service_package_gen.go index 5101affdd72..6552b7c43d1 100644 --- a/internal/service/dlm/service_package_gen.go +++ b/internal/service/dlm/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package dlm @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" dlm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dlm" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*dlm_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return dlm_sdkv2.NewFromConfig(cfg, func(o *dlm_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return dlm_sdkv2.NewFromConfig(cfg, + dlm_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/dms/certificate.go b/internal/service/dms/certificate.go index 8834c89d50e..616bd7b5d69 100644 --- a/internal/service/dms/certificate.go +++ b/internal/service/dms/certificate.go @@ -8,15 +8,17 @@ import ( "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" itypes "github.com/hashicorp/terraform-provider-aws/internal/types" @@ -26,7 +28,7 @@ import ( // @SDKResource("aws_dms_certificate", name="Certificate") // @Tags(identifierAttribute="certificate_arn") -func ResourceCertificate() *schema.Resource { +func resourceCertificate() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCertificateCreate, ReadWithoutTimeout: resourceCertificateRead, @@ -77,7 +79,7 @@ func ResourceCertificate() *schema.Resource { func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) certificateID := d.Get("certificate_id").(string) input := &dms.ImportCertificateInput{ @@ -97,7 +99,7 @@ func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta input.CertificateWallet = v } - _, err := conn.ImportCertificateWithContext(ctx, input) + _, err := conn.ImportCertificate(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Certificate (%s): %s", certificateID, err) @@ -110,9 +112,9 @@ func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta func resourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - certificate, err := FindCertificateByID(ctx, conn, d.Id()) + certificate, err := findCertificateByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Certificate (%s) not found, removing from state", d.Id()) @@ -124,7 +126,15 @@ func resourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading DMS Certificate (%s): %s", d.Id(), err) } - resourceCertificateSetState(d, certificate) + d.SetId(aws.ToString(certificate.CertificateIdentifier)) + d.Set("certificate_id", certificate.CertificateIdentifier) + d.Set(names.AttrCertificateARN, certificate.CertificateArn) + if v := aws.ToString(certificate.CertificatePem); v != "" { + d.Set("certificate_pem", v) + } + if certificate.CertificateWallet != nil && len(certificate.CertificateWallet) != 0 { + d.Set("certificate_wallet", itypes.Base64EncodeOnce(certificate.CertificateWallet)) + } return diags } @@ -139,14 +149,14 @@ func resourceCertificateUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Certificate: %s", d.Id()) - _, err := conn.DeleteCertificateWithContext(ctx, &dms.DeleteCertificateInput{ + _, err := conn.DeleteCertificate(ctx, &dms.DeleteCertificateInput{ CertificateArn: aws.String(d.Get(names.AttrCertificateARN).(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -157,26 +167,12 @@ func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func resourceCertificateSetState(d *schema.ResourceData, cert *dms.Certificate) { - d.SetId(aws.StringValue(cert.CertificateIdentifier)) - - d.Set("certificate_id", cert.CertificateIdentifier) - d.Set(names.AttrCertificateARN, cert.CertificateArn) - - if aws.StringValue(cert.CertificatePem) != "" { - d.Set("certificate_pem", cert.CertificatePem) - } - if cert.CertificateWallet != nil && len(cert.CertificateWallet) != 0 { - d.Set("certificate_wallet", itypes.Base64EncodeOnce(cert.CertificateWallet)) - } -} - -func FindCertificateByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.Certificate, error) { +func findCertificateByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.Certificate, error) { input := &dms.DescribeCertificatesInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("certificate-id"), - Values: []*string{aws.String(id)}, + Values: []string{id}, }, }, } @@ -184,7 +180,7 @@ func FindCertificateByID(ctx context.Context, conn *dms.DatabaseMigrationService return findCertificate(ctx, conn, input) } -func findCertificate(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeCertificatesInput) (*dms.Certificate, error) { +func findCertificate(ctx context.Context, conn *dms.Client, input *dms.DescribeCertificatesInput) (*awstypes.Certificate, error) { output, err := findCertificates(ctx, conn, input) if err != nil { @@ -194,33 +190,27 @@ func findCertificate(ctx context.Context, conn *dms.DatabaseMigrationService, in return tfresource.AssertSinglePtrResult(output) } -func findCertificates(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeCertificatesInput) ([]*dms.Certificate, error) { - var output []*dms.Certificate +func findCertificates(ctx context.Context, conn *dms.Client, input *dms.DescribeCertificatesInput) ([]*awstypes.Certificate, error) { + var output []awstypes.Certificate - err := conn.DescribeCertificatesPagesWithContext(ctx, input, func(page *dms.DescribeCertificatesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeCertificatesPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Certificates { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.Certificates...) } - return output, nil + return tfslices.ToPointers(output), nil } diff --git a/internal/service/dms/certificate_data_source.go b/internal/service/dms/certificate_data_source.go index 470711f8a1a..02bdfbb0b70 100644 --- a/internal/service/dms/certificate_data_source.go +++ b/internal/service/dms/certificate_data_source.go @@ -7,7 +7,7 @@ import ( "context" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -18,8 +18,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_certificate") -func DataSourceCertificate() *schema.Resource { +// @SDKDataSource("aws_dms_certificate", name="Certificate") +func dataSourceCertificate() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceCertificateRead, @@ -79,19 +79,19 @@ func DataSourceCertificate() *schema.Resource { func dataSourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig certificateID := d.Get("certificate_id").(string) - out, err := FindCertificateByID(ctx, conn, certificateID) + out, err := findCertificateByID(ctx, conn, certificateID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Certificate (%s): %s", certificateID, err) } - d.SetId(aws.StringValue(out.CertificateIdentifier)) - arn := aws.StringValue(out.CertificateArn) + d.SetId(aws.ToString(out.CertificateIdentifier)) + arn := aws.ToString(out.CertificateArn) d.Set(names.AttrCertificateARN, arn) d.Set("certificate_id", out.CertificateIdentifier) d.Set("certificate_pem", out.CertificatePem) @@ -104,6 +104,7 @@ func dataSourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta d.Set("valid_to_date", out.ValidToDate.String()) tags, err := listTags(ctx, conn, arn) + if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for DMS Certificate (%s): %s", arn, err) } diff --git a/internal/service/dms/certificate_test.go b/internal/service/dms/certificate_test.go index 8690c4d32df..85edc80c042 100644 --- a/internal/service/dms/certificate_test.go +++ b/internal/service/dms/certificate_test.go @@ -147,7 +147,7 @@ func testAccCheckCertificateDestroy(ctx context.Context) resource.TestCheckFunc continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindCertificateByID(ctx, conn, rs.Primary.ID) @@ -173,7 +173,7 @@ func testAccCertificateExists(ctx context.Context, n string) resource.TestCheckF return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindCertificateByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index e62faafc573..413b1a72c28 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -11,9 +11,9 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -21,6 +21,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfkms "github.com/hashicorp/terraform-provider-aws/internal/service/kms" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -31,7 +33,7 @@ import ( // @SDKResource("aws_dms_endpoint", name="Endpoint") // @Tags(identifierAttribute="endpoint_arn") -func ResourceEndpoint() *schema.Resource { +func resourceEndpoint() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceEndpointCreate, ReadWithoutTimeout: resourceEndpointRead, @@ -110,9 +112,9 @@ func ResourceEndpoint() *schema.Resource { ValidateFunc: validEndpointID, }, names.AttrEndpointType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.ReplicationEndpointTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ReplicationEndpointTypeValue](), }, "engine_name": { Type: schema.TypeString, @@ -163,10 +165,10 @@ func ResourceEndpoint() *schema.Resource { Default: false, }, "message_format": { - Type: schema.TypeString, - Optional: true, - Default: dms.MessageFormatValueJson, - ValidateFunc: validation.StringInSlice(dms.MessageFormatValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.MessageFormatValueJson, + ValidateDiagFunc: enum.Validate[awstypes.MessageFormatValue](), }, "message_max_bytes": { Type: schema.TypeInt, @@ -192,9 +194,9 @@ func ResourceEndpoint() *schema.Resource { Optional: true, }, "security_protocol": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.KafkaSecurityProtocol_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.KafkaSecurityProtocol](), }, "ssl_ca_certificate_arn": { Type: schema.TypeString, @@ -257,11 +259,11 @@ func ResourceEndpoint() *schema.Resource { Default: false, }, "message_format": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: dms.MessageFormatValueJson, - ValidateFunc: validation.StringInSlice(dms.MessageFormatValue_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.MessageFormatValueJson, + ValidateDiagFunc: enum.Validate[awstypes.MessageFormatValue](), }, "partition_include_schema_table": { Type: schema.TypeBool, @@ -307,10 +309,10 @@ func ResourceEndpoint() *schema.Resource { Default: mongoDBAuthSourceAdmin, }, "auth_type": { - Type: schema.TypeString, - Optional: true, - Default: dms.AuthTypeValuePassword, - ValidateFunc: validation.StringInSlice(dms.AuthTypeValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.AuthTypeValuePassword, + ValidateDiagFunc: enum.Validate[awstypes.AuthTypeValue](), }, "docs_to_investigate": { Type: schema.TypeString, @@ -323,10 +325,10 @@ func ResourceEndpoint() *schema.Resource { Default: "false", }, "nesting_level": { - Type: schema.TypeString, - Optional: true, - Default: dms.NestingLevelValueNone, - ValidateFunc: validation.StringInSlice(dms.NestingLevelValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.NestingLevelValueNone, + ValidateDiagFunc: enum.Validate[awstypes.NestingLevelValue](), }, }, }, @@ -433,9 +435,9 @@ func ResourceEndpoint() *schema.Resource { Sensitive: true, }, "auth_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.RedisAuthTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RedisAuthTypeValue](), }, "auth_user_name": { Type: schema.TypeString, @@ -455,10 +457,10 @@ func ResourceEndpoint() *schema.Resource { Optional: true, }, "ssl_security_protocol": { - Type: schema.TypeString, - Optional: true, - Default: dms.SslSecurityProtocolValueSslEncryption, - ValidateFunc: validation.StringInSlice(dms.SslSecurityProtocolValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.SslSecurityProtocolValueSslEncryption, + ValidateDiagFunc: enum.Validate[awstypes.SslSecurityProtocolValue](), }, }, }, @@ -523,10 +525,10 @@ func ResourceEndpoint() *schema.Resource { Default: "", }, "canned_acl_for_objects": { - Type: schema.TypeString, - Optional: true, - Default: dms.CannedAclForObjectsValueNone, - ValidateFunc: validation.StringInSlice(dms.CannedAclForObjectsValue_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.CannedAclForObjectsValueNone, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.CannedAclForObjectsValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -585,10 +587,10 @@ func ResourceEndpoint() *schema.Resource { Default: "\\n", }, "data_format": { - Type: schema.TypeString, - Optional: true, - Default: dms.DataFormatValueCsv, - ValidateFunc: validation.StringInSlice(dms.DataFormatValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DataFormatValueCsv, + ValidateDiagFunc: enum.Validate[awstypes.DataFormatValue](), }, "data_page_size": { Type: schema.TypeInt, @@ -597,10 +599,10 @@ func ResourceEndpoint() *schema.Resource { ValidateFunc: validation.IntAtLeast(0), }, "date_partition_delimiter": { - Type: schema.TypeString, - Optional: true, - Default: dms.DatePartitionDelimiterValueSlash, - ValidateFunc: validation.StringInSlice(dms.DatePartitionDelimiterValue_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DatePartitionDelimiterValueSlash, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.DatePartitionDelimiterValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -611,10 +613,10 @@ func ResourceEndpoint() *schema.Resource { Default: false, }, "date_partition_sequence": { - Type: schema.TypeString, - Optional: true, - Default: dms.DatePartitionSequenceValueYyyymmdd, - ValidateFunc: validation.StringInSlice(dms.DatePartitionSequenceValue_Values(), true), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DatePartitionSequenceValueYyyymmdd, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.DatePartitionSequenceValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -631,10 +633,10 @@ func ResourceEndpoint() *schema.Resource { Default: true, }, "encoding_type": { - Type: schema.TypeString, - Optional: true, - Default: dms.EncodingTypeValueRleDictionary, - ValidateFunc: validation.StringInSlice(dms.EncodingTypeValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.EncodingTypeValueRleDictionary, + ValidateDiagFunc: enum.Validate[awstypes.EncodingTypeValue](), }, "encryption_mode": { Type: schema.TypeString, @@ -675,10 +677,10 @@ func ResourceEndpoint() *schema.Resource { Default: false, }, "parquet_version": { - Type: schema.TypeString, - Optional: true, - Default: dms.ParquetVersionValueParquet10, - ValidateFunc: validation.StringInSlice(dms.ParquetVersionValue_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.ParquetVersionValueParquet10, + ValidateDiagFunc: enum.Validate[awstypes.ParquetVersionValue](), }, "preserve_transactions": { Type: schema.TypeBool, @@ -750,10 +752,10 @@ func ResourceEndpoint() *schema.Resource { Optional: true, }, "ssl_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DmsSslModeValue_Values(), false), + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.DmsSslModeValue](), }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), @@ -776,12 +778,12 @@ func ResourceEndpoint() *schema.Resource { func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) endpointID := d.Get("endpoint_id").(string) input := &dms.CreateEndpointInput{ EndpointIdentifier: aws.String(endpointID), - EndpointType: aws.String(d.Get(names.AttrEndpointType).(string)), + EndpointType: awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)), EngineName: aws.String(d.Get("engine_name").(string)), Tags: getTagsIn(ctx), } @@ -801,22 +803,22 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("ssl_mode"); ok { - input.SslMode = aws.String(v.(string)) + input.SslMode = awstypes.DmsSslModeValue(v.(string)) } switch d.Get("engine_name").(string) { case engineNameAurora, engineNameMariadb, engineNameMySQL: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MySQLSettings = &dms.MySQLSettings{ + input.MySQLSettings = &awstypes.MySQLSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.MySQLSettings = &dms.MySQLSettings{ + input.MySQLSettings = &awstypes.MySQLSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -824,7 +826,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in expandTopLevelConnectionInfo(d, input) } case engineNameAuroraPostgresql, engineNamePostgres: - settings := &dms.PostgreSQLSettings{} + settings := &awstypes.PostgreSQLSettings{} if _, ok := d.GetOk("postgres_settings"); ok { settings = expandPostgreSQLSettings(d.Get("postgres_settings").([]interface{})[0].(map[string]interface{})) } @@ -837,7 +839,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in settings.Username = aws.String(d.Get(names.AttrUsername).(string)) settings.Password = aws.String(d.Get(names.AttrPassword).(string)) settings.ServerName = aws.String(d.Get("server_name").(string)) - settings.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) // Set connection info in top-level namespace as well @@ -846,15 +848,15 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in input.PostgreSQLSettings = settings case engineNameDynamoDB: - input.DynamoDbSettings = &dms.DynamoDbSettings{ + input.DynamoDbSettings = &awstypes.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), } case engineNameElasticsearch, engineNameOpenSearch: - input.ElasticsearchSettings = &dms.ElasticsearchSettings{ + input.ElasticsearchSettings = &awstypes.ElasticsearchSettings{ ServiceAccessRoleArn: aws.String(d.Get("elasticsearch_settings.0.service_access_role_arn").(string)), EndpointUri: aws.String(d.Get("elasticsearch_settings.0.endpoint_uri").(string)), - ErrorRetryDuration: aws.Int64(int64(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), - FullLoadErrorPercentage: aws.Int64(int64(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), + ErrorRetryDuration: aws.Int32(int32(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), + FullLoadErrorPercentage: aws.Int32(int32(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), } case engineNameKafka: @@ -862,7 +864,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in case engineNameKinesis: input.KinesisSettings = expandKinesisSettings(d.Get("kinesis_settings").([]interface{})[0].(map[string]interface{})) case engineNameMongodb: - var settings = &dms.MongoDbSettings{} + var settings = &awstypes.MongoDbSettings{} if _, ok := d.GetOk("secrets_manager_arn"); ok { settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) @@ -871,7 +873,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in settings.Username = aws.String(d.Get(names.AttrUsername).(string)) settings.Password = aws.String(d.Get(names.AttrPassword).(string)) settings.ServerName = aws.String(d.Get("server_name").(string)) - settings.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) // Set connection info in top-level namespace as well expandTopLevelConnectionInfo(d, input) @@ -879,9 +881,9 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in settings.DatabaseName = aws.String(d.Get(names.AttrDatabaseName).(string)) settings.KmsKeyId = aws.String(d.Get(names.AttrKMSKeyARN).(string)) - settings.AuthType = aws.String(d.Get("mongodb_settings.0.auth_type").(string)) - settings.AuthMechanism = aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)) - settings.NestingLevel = aws.String(d.Get("mongodb_settings.0.nesting_level").(string)) + settings.AuthType = awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)) + settings.AuthMechanism = awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)) + settings.NestingLevel = awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)) settings.ExtractDocId = aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)) settings.DocsToInvestigate = aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)) settings.AuthSource = aws.String(d.Get("mongodb_settings.0.auth_source").(string)) @@ -889,17 +891,17 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in input.MongoDbSettings = settings case engineNameOracle: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.OracleSettings = &dms.OracleSettings{ + input.OracleSettings = &awstypes.OracleSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } } else { - input.OracleSettings = &dms.OracleSettings{ + input.OracleSettings = &awstypes.OracleSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -909,7 +911,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in case engineNameRedis: input.RedisSettings = expandRedisSettings(d.Get("redis_settings").([]interface{})[0].(map[string]interface{})) case engineNameRedshift: - var settings = &dms.RedshiftSettings{ + var settings = &awstypes.RedshiftSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -920,7 +922,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in settings.Username = aws.String(d.Get(names.AttrUsername).(string)) settings.Password = aws.String(d.Get(names.AttrPassword).(string)) settings.ServerName = aws.String(d.Get("server_name").(string)) - settings.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + settings.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) // Set connection info in top-level namespace as well expandTopLevelConnectionInfo(d, input) @@ -938,7 +940,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := tfMap["encryption_mode"].(string); ok && v != "" { - settings.EncryptionMode = aws.String(v) + settings.EncryptionMode = awstypes.EncryptionModeValue(v) } if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok && v != "" { @@ -953,17 +955,17 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in input.RedshiftSettings = settings case engineNameSQLServer, engineNameBabelfish: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } } else { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -972,17 +974,17 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in } case engineNameSybase: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.SybaseSettings = &dms.SybaseSettings{ + input.SybaseSettings = &awstypes.SybaseSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } } else { - input.SybaseSettings = &dms.SybaseSettings{ + input.SybaseSettings = &awstypes.SybaseSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -991,17 +993,17 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in } case engineNameDB2, engineNameDB2zOS: if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.IBMDb2Settings = &dms.IBMDb2Settings{ + input.IBMDb2Settings = &awstypes.IBMDb2Settings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } } else { - input.IBMDb2Settings = &dms.IBMDb2Settings{ + input.IBMDb2Settings = &awstypes.IBMDb2Settings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } @@ -1014,11 +1016,10 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in expandTopLevelConnectionInfo(d, input) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), + _, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.CreateEndpointWithContext(ctx, input) - }, - dms.ErrCodeAccessDeniedFault) + return conn.CreateEndpoint(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Endpoint (%s): %s", endpointID, err) @@ -1031,9 +1032,9 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - endpoint, err := FindEndpointByID(ctx, conn, d.Id()) + endpoint, err := findEndpointByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Endpoint (%s) not found, removing from state", d.Id()) @@ -1054,12 +1055,12 @@ func resourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { endpointARN := d.Get("endpoint_arn").(string) pauseTasks := d.Get("pause_replication_tasks").(bool) - var tasks []*dms.ReplicationTask + var tasks []awstypes.ReplicationTask if pauseTasks { var err error @@ -1080,7 +1081,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } if d.HasChange(names.AttrEndpointType) { - input.EndpointType = aws.String(d.Get(names.AttrEndpointType).(string)) + input.EndpointType = awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)) } if d.HasChange("engine_name") { @@ -1092,13 +1093,13 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } if d.HasChange("service_access_role") { - input.DynamoDbSettings = &dms.DynamoDbSettings{ + input.DynamoDbSettings = &awstypes.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), } } if d.HasChange("ssl_mode") { - input.SslMode = aws.String(d.Get("ssl_mode").(string)) + input.SslMode = awstypes.DmsSslModeValue(d.Get("ssl_mode").(string)) } switch engineName := d.Get("engine_name").(string); engineName { @@ -1107,16 +1108,16 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MySQLSettings = &dms.MySQLSettings{ + input.MySQLSettings = &awstypes.MySQLSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.MySQLSettings = &dms.MySQLSettings{ + input.MySQLSettings = &awstypes.MySQLSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) @@ -1130,17 +1131,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.PostgreSQLSettings = &dms.PostgreSQLSettings{ + input.PostgreSQLSettings = &awstypes.PostgreSQLSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.PostgreSQLSettings = &dms.PostgreSQLSettings{ + input.PostgreSQLSettings = &awstypes.PostgreSQLSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') @@ -1151,7 +1152,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } case engineNameDynamoDB: if d.HasChange("service_access_role") { - input.DynamoDbSettings = &dms.DynamoDbSettings{ + input.DynamoDbSettings = &awstypes.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), } } @@ -1162,11 +1163,11 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in "elasticsearch_settings.0.full_load_error_percentage", "elasticsearch_settings.0.service_access_role_arn", "elasticsearch_settings.0.use_new_mapping_type") { - input.ElasticsearchSettings = &dms.ElasticsearchSettings{ + input.ElasticsearchSettings = &awstypes.ElasticsearchSettings{ ServiceAccessRoleArn: aws.String(d.Get("elasticsearch_settings.0.service_access_role_arn").(string)), EndpointUri: aws.String(d.Get("elasticsearch_settings.0.endpoint_uri").(string)), - ErrorRetryDuration: aws.Int64(int64(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), - FullLoadErrorPercentage: aws.Int64(int64(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), + ErrorRetryDuration: aws.Int32(int32(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), + FullLoadErrorPercentage: aws.Int32(int32(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), } input.EngineName = aws.String(engineName) @@ -1188,31 +1189,31 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in "mongodb_settings.0.docs_to_investigate", "mongodb_settings.0.auth_source", "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MongoDbSettings = &dms.MongoDbSettings{ + input.MongoDbSettings = &awstypes.MongoDbSettings{ SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), KmsKeyId: aws.String(d.Get(names.AttrKMSKeyARN).(string)), - AuthType: aws.String(d.Get("mongodb_settings.0.auth_type").(string)), - AuthMechanism: aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)), - NestingLevel: aws.String(d.Get("mongodb_settings.0.nesting_level").(string)), + AuthType: awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)), + AuthMechanism: awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)), + NestingLevel: awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)), ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), } } else { - input.MongoDbSettings = &dms.MongoDbSettings{ + input.MongoDbSettings = &awstypes.MongoDbSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), KmsKeyId: aws.String(d.Get(names.AttrKMSKeyARN).(string)), - AuthType: aws.String(d.Get("mongodb_settings.0.auth_type").(string)), - AuthMechanism: aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)), - NestingLevel: aws.String(d.Get("mongodb_settings.0.nesting_level").(string)), + AuthType: awstypes.AuthTypeValue(d.Get("mongodb_settings.0.auth_type").(string)), + AuthMechanism: awstypes.AuthMechanismValue(d.Get("mongodb_settings.0.auth_mechanism").(string)), + NestingLevel: awstypes.NestingLevelValue(d.Get("mongodb_settings.0.nesting_level").(string)), ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), @@ -1228,17 +1229,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.OracleSettings = &dms.OracleSettings{ + input.OracleSettings = &awstypes.OracleSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.OracleSettings = &dms.OracleSettings{ + input.OracleSettings = &awstypes.OracleSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'oracle') @@ -1258,17 +1259,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in "redshift_settings", "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.RedshiftSettings = &dms.RedshiftSettings{ + input.RedshiftSettings = &awstypes.RedshiftSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.RedshiftSettings = &dms.RedshiftSettings{ + input.RedshiftSettings = &awstypes.RedshiftSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'redshift') @@ -1288,7 +1289,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := tfMap["encryption_mode"].(string); ok && v != "" { - input.RedshiftSettings.EncryptionMode = aws.String(v) + input.RedshiftSettings.EncryptionMode = awstypes.EncryptionModeValue(v) } if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok && v != "" { @@ -1306,17 +1307,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + input.MicrosoftSQLServerSettings = &awstypes.MicrosoftSQLServerSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') @@ -1330,17 +1331,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.SybaseSettings = &dms.SybaseSettings{ + input.SybaseSettings = &awstypes.SybaseSettings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.SybaseSettings = &dms.SybaseSettings{ + input.SybaseSettings = &awstypes.SybaseSettings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') @@ -1354,17 +1355,17 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in names.AttrUsername, names.AttrPassword, "server_name", names.AttrPort, names.AttrDatabaseName, "secrets_manager_access_role_arn", "secrets_manager_arn") { if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.IBMDb2Settings = &dms.IBMDb2Settings{ + input.IBMDb2Settings = &awstypes.IBMDb2Settings{ DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), } } else { - input.IBMDb2Settings = &dms.IBMDb2Settings{ + input.IBMDb2Settings = &awstypes.IBMDb2Settings{ Username: aws.String(d.Get(names.AttrUsername).(string)), Password: aws.String(d.Get(names.AttrPassword).(string)), ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get(names.AttrPort).(int))), + Port: aws.Int32(int32(d.Get(names.AttrPort).(int))), DatabaseName: aws.String(d.Get(names.AttrDatabaseName).(string)), } input.EngineName = aws.String(engineName) // Must be included (should be 'db2') @@ -1388,7 +1389,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } if d.HasChange(names.AttrPort) { - input.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) } if d.HasChange("server_name") { @@ -1400,7 +1401,7 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in } } - _, err := conn.ModifyEndpointWithContext(ctx, input) + _, err := conn.ModifyEndpoint(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DMS Endpoint (%s): %s", d.Id(), err) @@ -1419,14 +1420,14 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceEndpointDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Endpoint: (%s)", d.Id()) - _, err := conn.DeleteEndpointWithContext(ctx, &dms.DeleteEndpointInput{ + _, err := conn.DeleteEndpoint(ctx, &dms.DeleteEndpointInput{ EndpointArn: aws.String(d.Get("endpoint_arn").(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -1434,7 +1435,7 @@ func resourceEndpointDelete(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "deleting DMS Endpoint (%s): %s", d.Id(), err) } - if err = waitEndpointDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if _, err := waitEndpointDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for DMS Endpoint (%s) delete: %s", d.Id(), err) } @@ -1527,18 +1528,18 @@ func validateSSEKMSKey(settingsAttrName string, d *schema.ResourceDiff) error { return nil } -func resourceEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) error { - d.SetId(aws.StringValue(endpoint.EndpointIdentifier)) +func resourceEndpointSetState(d *schema.ResourceData, endpoint *awstypes.Endpoint) error { + d.SetId(aws.ToString(endpoint.EndpointIdentifier)) d.Set(names.AttrCertificateARN, endpoint.CertificateArn) d.Set("endpoint_arn", endpoint.EndpointArn) d.Set("endpoint_id", endpoint.EndpointIdentifier) // For some reason the AWS API only accepts lowercase type but returns it as uppercase - d.Set(names.AttrEndpointType, strings.ToLower(aws.StringValue(endpoint.EndpointType))) + d.Set(names.AttrEndpointType, strings.ToLower(string(endpoint.EndpointType))) d.Set("engine_name", endpoint.EngineName) d.Set("extra_connection_attributes", endpoint.ExtraConnectionAttributes) - switch aws.StringValue(endpoint.EngineName) { + switch aws.ToString(endpoint.EngineName) { case engineNameAurora, engineNameMariadb, engineNameMySQL: if endpoint.MySQLSettings != nil { d.Set(names.AttrUsername, endpoint.MySQLSettings.Username) @@ -1687,15 +1688,16 @@ func resourceEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) er return nil } -func steadyEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) error { +func steadyEndpointReplicationTasks(ctx context.Context, conn *dms.Client, arn string) error { tasks, err := findReplicationTasksByEndpointARN(ctx, conn, arn) + if err != nil { return err } for _, task := range tasks { - rtID := aws.StringValue(task.ReplicationTaskIdentifier) - switch aws.StringValue(task.Status) { + rtID := aws.ToString(task.ReplicationTaskIdentifier) + switch aws.ToString(task.Status) { case replicationTaskStatusRunning, replicationTaskStatusFailed, replicationTaskStatusReady, replicationTaskStatusStopped: continue case replicationTaskStatusCreating, replicationTaskStatusDeleting, replicationTaskStatusModifying, replicationTaskStatusStopping, replicationTaskStatusStarting: @@ -1708,20 +1710,21 @@ func steadyEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigra return nil } -func stopEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) ([]*dms.ReplicationTask, error) { +func stopEndpointReplicationTasks(ctx context.Context, conn *dms.Client, arn string) ([]awstypes.ReplicationTask, error) { if err := steadyEndpointReplicationTasks(ctx, conn, arn); err != nil { return nil, err } tasks, err := findReplicationTasksByEndpointARN(ctx, conn, arn) + if err != nil { return nil, err } - var stoppedTasks []*dms.ReplicationTask + var stoppedTasks []awstypes.ReplicationTask for _, task := range tasks { - rtID := aws.StringValue(task.ReplicationTaskIdentifier) - switch aws.StringValue(task.Status) { + rtID := aws.ToString(task.ReplicationTaskIdentifier) + switch aws.ToString(task.Status) { case replicationTaskStatusRunning: err := stopReplicationTask(ctx, conn, rtID) @@ -1737,7 +1740,9 @@ func stopEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrati return stoppedTasks, nil } -func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, tasks []*dms.ReplicationTask) error { +func startEndpointReplicationTasks(ctx context.Context, conn *dms.Client, arn string, tasks []awstypes.ReplicationTask) error { + const maxConnTestWaitTime = 120 * time.Second + if len(tasks) == 0 { return nil } @@ -1747,12 +1752,12 @@ func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrat } for _, task := range tasks { - _, err := conn.TestConnectionWithContext(ctx, &dms.TestConnectionInput{ + _, err := conn.TestConnection(ctx, &dms.TestConnectionInput{ EndpointArn: aws.String(arn), ReplicationInstanceArn: task.ReplicationInstanceArn, }) - if tfawserr.ErrMessageContains(err, dms.ErrCodeInvalidResourceStateFault, "already being tested") { + if errs.IsAErrorMessageContains[*awstypes.InvalidResourceStateFault](err, "already being tested") { continue } @@ -1760,20 +1765,22 @@ func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrat return fmt.Errorf("testing connection: %w", err) } - err = conn.WaitUntilTestConnectionSucceedsWithContext(ctx, &dms.DescribeConnectionsInput{ - Filters: []*dms.Filter{ + waiter := dms.NewTestConnectionSucceedsWaiter(conn) + + err = waiter.Wait(ctx, &dms.DescribeConnectionsInput{ + Filters: []awstypes.Filter{ { Name: aws.String("endpoint-arn"), - Values: aws.StringSlice([]string{arn}), + Values: []string{arn}, }, }, - }) + }, maxConnTestWaitTime) if err != nil { return fmt.Errorf("waiting until test connection succeeds: %w", err) } - if err := startReplicationTask(ctx, conn, aws.StringValue(task.ReplicationTaskIdentifier)); err != nil { + if err := startReplicationTask(ctx, conn, aws.ToString(task.ReplicationTaskIdentifier)); err != nil { return fmt.Errorf("starting replication task: %w", err) } } @@ -1781,12 +1788,12 @@ func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrat return nil } -func findReplicationTasksByEndpointARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) ([]*dms.ReplicationTask, error) { +func findReplicationTasksByEndpointARN(ctx context.Context, conn *dms.Client, arn string) ([]awstypes.ReplicationTask, error) { input := &dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("endpoint-arn"), - Values: aws.StringSlice([]string{arn}), + Values: []string{arn}, }, }, } @@ -1794,28 +1801,28 @@ func findReplicationTasksByEndpointARN(ctx context.Context, conn *dms.DatabaseMi return findReplicationTasks(ctx, conn, input) } -func flattenOpenSearchSettings(settings *dms.ElasticsearchSettings) []map[string]interface{} { +func flattenOpenSearchSettings(settings *awstypes.ElasticsearchSettings) []map[string]interface{} { if settings == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "endpoint_uri": aws.StringValue(settings.EndpointUri), - "error_retry_duration": aws.Int64Value(settings.ErrorRetryDuration), - "full_load_error_percentage": aws.Int64Value(settings.FullLoadErrorPercentage), - "service_access_role_arn": aws.StringValue(settings.ServiceAccessRoleArn), - "use_new_mapping_type": aws.BoolValue(settings.UseNewMappingType), + "endpoint_uri": aws.ToString(settings.EndpointUri), + "error_retry_duration": aws.ToInt32(settings.ErrorRetryDuration), + "full_load_error_percentage": aws.ToInt32(settings.FullLoadErrorPercentage), + "service_access_role_arn": aws.ToString(settings.ServiceAccessRoleArn), + "use_new_mapping_type": aws.ToBool(settings.UseNewMappingType), } return []map[string]interface{}{m} } -func expandKafkaSettings(tfMap map[string]interface{}) *dms.KafkaSettings { +func expandKafkaSettings(tfMap map[string]interface{}) *awstypes.KafkaSettings { if tfMap == nil { return nil } - apiObject := &dms.KafkaSettings{} + apiObject := &awstypes.KafkaSettings{} if v, ok := tfMap["broker"].(string); ok && v != "" { apiObject.Broker = aws.String(v) @@ -1842,11 +1849,11 @@ func expandKafkaSettings(tfMap map[string]interface{}) *dms.KafkaSettings { } if v, ok := tfMap["message_format"].(string); ok && v != "" { - apiObject.MessageFormat = aws.String(v) + apiObject.MessageFormat = awstypes.MessageFormatValue(v) } if v, ok := tfMap["message_max_bytes"].(int); ok && v != 0 { - apiObject.MessageMaxBytes = aws.Int64(int64(v)) + apiObject.MessageMaxBytes = aws.Int32(int32(v)) } if v, ok := tfMap["no_hex_prefix"].(bool); ok { @@ -1866,7 +1873,7 @@ func expandKafkaSettings(tfMap map[string]interface{}) *dms.KafkaSettings { } if v, ok := tfMap["security_protocol"].(string); ok && v != "" { - apiObject.SecurityProtocol = aws.String(v) + apiObject.SecurityProtocol = awstypes.KafkaSecurityProtocol(v) } if v, ok := tfMap["ssl_ca_certificate_arn"].(string); ok && v != "" { @@ -1892,7 +1899,7 @@ func expandKafkaSettings(tfMap map[string]interface{}) *dms.KafkaSettings { return apiObject } -func flattenKafkaSettings(apiObject *dms.KafkaSettings) map[string]interface{} { +func flattenKafkaSettings(apiObject *awstypes.KafkaSettings) map[string]interface{} { if apiObject == nil { return nil } @@ -1900,86 +1907,82 @@ func flattenKafkaSettings(apiObject *dms.KafkaSettings) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.Broker; v != nil { - tfMap["broker"] = aws.StringValue(v) + tfMap["broker"] = aws.ToString(v) } if v := apiObject.IncludeControlDetails; v != nil { - tfMap["include_control_details"] = aws.BoolValue(v) + tfMap["include_control_details"] = aws.ToBool(v) } if v := apiObject.IncludeNullAndEmpty; v != nil { - tfMap["include_null_and_empty"] = aws.BoolValue(v) + tfMap["include_null_and_empty"] = aws.ToBool(v) } if v := apiObject.IncludePartitionValue; v != nil { - tfMap["include_partition_value"] = aws.BoolValue(v) + tfMap["include_partition_value"] = aws.ToBool(v) } if v := apiObject.IncludeTableAlterOperations; v != nil { - tfMap["include_table_alter_operations"] = aws.BoolValue(v) + tfMap["include_table_alter_operations"] = aws.ToBool(v) } if v := apiObject.IncludeTransactionDetails; v != nil { - tfMap["include_transaction_details"] = aws.BoolValue(v) + tfMap["include_transaction_details"] = aws.ToBool(v) } - if v := apiObject.MessageFormat; v != nil { - tfMap["message_format"] = aws.StringValue(v) - } + tfMap["message_format"] = string(apiObject.MessageFormat) if v := apiObject.MessageMaxBytes; v != nil { - tfMap["message_max_bytes"] = aws.Int64Value(v) + tfMap["message_max_bytes"] = aws.ToInt32(v) } if v := apiObject.NoHexPrefix; v != nil { - tfMap["no_hex_prefix"] = aws.BoolValue(v) + tfMap["no_hex_prefix"] = aws.ToBool(v) } if v := apiObject.PartitionIncludeSchemaTable; v != nil { - tfMap["partition_include_schema_table"] = aws.BoolValue(v) + tfMap["partition_include_schema_table"] = aws.ToBool(v) } if v := apiObject.SaslPassword; v != nil { - tfMap["sasl_password"] = aws.StringValue(v) + tfMap["sasl_password"] = aws.ToString(v) } if v := apiObject.SaslUsername; v != nil { - tfMap["sasl_username"] = aws.StringValue(v) + tfMap["sasl_username"] = aws.ToString(v) } - if v := apiObject.SecurityProtocol; v != nil { - tfMap["security_protocol"] = aws.StringValue(v) - } + tfMap["security_protocol"] = string(apiObject.SecurityProtocol) if v := apiObject.SslCaCertificateArn; v != nil { - tfMap["ssl_ca_certificate_arn"] = aws.StringValue(v) + tfMap["ssl_ca_certificate_arn"] = aws.ToString(v) } if v := apiObject.SslClientCertificateArn; v != nil { - tfMap["ssl_client_certificate_arn"] = aws.StringValue(v) + tfMap["ssl_client_certificate_arn"] = aws.ToString(v) } if v := apiObject.SslClientKeyArn; v != nil { - tfMap["ssl_client_key_arn"] = aws.StringValue(v) + tfMap["ssl_client_key_arn"] = aws.ToString(v) } if v := apiObject.SslClientKeyPassword; v != nil { - tfMap["ssl_client_key_password"] = aws.StringValue(v) + tfMap["ssl_client_key_password"] = aws.ToString(v) } if v := apiObject.Topic; v != nil { - tfMap["topic"] = aws.StringValue(v) + tfMap["topic"] = aws.ToString(v) } return tfMap } -func expandKinesisSettings(tfMap map[string]interface{}) *dms.KinesisSettings { +func expandKinesisSettings(tfMap map[string]interface{}) *awstypes.KinesisSettings { if tfMap == nil { return nil } - apiObject := &dms.KinesisSettings{} + apiObject := &awstypes.KinesisSettings{} if v, ok := tfMap["include_control_details"].(bool); ok { apiObject.IncludeControlDetails = aws.Bool(v) @@ -2002,7 +2005,7 @@ func expandKinesisSettings(tfMap map[string]interface{}) *dms.KinesisSettings { } if v, ok := tfMap["message_format"].(string); ok && v != "" { - apiObject.MessageFormat = aws.String(v) + apiObject.MessageFormat = awstypes.MessageFormatValue(v) } if v, ok := tfMap["partition_include_schema_table"].(bool); ok { @@ -2020,7 +2023,7 @@ func expandKinesisSettings(tfMap map[string]interface{}) *dms.KinesisSettings { return apiObject } -func flattenKinesisSettings(apiObject *dms.KinesisSettings) map[string]interface{} { +func flattenKinesisSettings(apiObject *awstypes.KinesisSettings) map[string]interface{} { if apiObject == nil { return nil } @@ -2028,79 +2031,77 @@ func flattenKinesisSettings(apiObject *dms.KinesisSettings) map[string]interface tfMap := map[string]interface{}{} if v := apiObject.IncludeControlDetails; v != nil { - tfMap["include_control_details"] = aws.BoolValue(v) + tfMap["include_control_details"] = aws.ToBool(v) } if v := apiObject.IncludeNullAndEmpty; v != nil { - tfMap["include_null_and_empty"] = aws.BoolValue(v) + tfMap["include_null_and_empty"] = aws.ToBool(v) } if v := apiObject.IncludePartitionValue; v != nil { - tfMap["include_partition_value"] = aws.BoolValue(v) + tfMap["include_partition_value"] = aws.ToBool(v) } if v := apiObject.IncludeTableAlterOperations; v != nil { - tfMap["include_table_alter_operations"] = aws.BoolValue(v) + tfMap["include_table_alter_operations"] = aws.ToBool(v) } if v := apiObject.IncludeTransactionDetails; v != nil { - tfMap["include_transaction_details"] = aws.BoolValue(v) + tfMap["include_transaction_details"] = aws.ToBool(v) } - if v := apiObject.MessageFormat; v != nil { - tfMap["message_format"] = aws.StringValue(v) - } + tfMap["message_format"] = string(apiObject.MessageFormat) if v := apiObject.PartitionIncludeSchemaTable; v != nil { - tfMap["partition_include_schema_table"] = aws.BoolValue(v) + tfMap["partition_include_schema_table"] = aws.ToBool(v) } if v := apiObject.ServiceAccessRoleArn; v != nil { - tfMap["service_access_role_arn"] = aws.StringValue(v) + tfMap["service_access_role_arn"] = aws.ToString(v) } if v := apiObject.StreamArn; v != nil { - tfMap[names.AttrStreamARN] = aws.StringValue(v) + tfMap[names.AttrStreamARN] = aws.ToString(v) } return tfMap } -func flattenMongoDBSettings(settings *dms.MongoDbSettings) []map[string]interface{} { +func flattenMongoDBSettings(settings *awstypes.MongoDbSettings) []map[string]interface{} { if settings == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "auth_type": aws.StringValue(settings.AuthType), - "auth_mechanism": aws.StringValue(settings.AuthMechanism), - "nesting_level": aws.StringValue(settings.NestingLevel), - "extract_doc_id": aws.StringValue(settings.ExtractDocId), - "docs_to_investigate": aws.StringValue(settings.DocsToInvestigate), - "auth_source": aws.StringValue(settings.AuthSource), + "auth_type": string(settings.AuthType), + "auth_mechanism": string(settings.AuthMechanism), + "nesting_level": string(settings.NestingLevel), + "extract_doc_id": aws.ToString(settings.ExtractDocId), + "docs_to_investigate": aws.ToString(settings.DocsToInvestigate), + "auth_source": aws.ToString(settings.AuthSource), } return []map[string]interface{}{m} } -func expandRedisSettings(tfMap map[string]interface{}) *dms.RedisSettings { +func expandRedisSettings(tfMap map[string]interface{}) *awstypes.RedisSettings { if tfMap == nil { return nil } - apiObject := &dms.RedisSettings{} + apiObject := &awstypes.RedisSettings{} if v, ok := tfMap["auth_password"].(string); ok && v != "" { apiObject.AuthPassword = aws.String(v) } if v, ok := tfMap["auth_type"].(string); ok && v != "" { - apiObject.AuthType = aws.String(v) + apiObject.AuthType = awstypes.RedisAuthTypeValue(v) } if v, ok := tfMap["auth_user_name"].(string); ok && v != "" { apiObject.AuthUserName = aws.String(v) } if v, ok := tfMap[names.AttrPort].(int); ok { - apiObject.Port = aws.Int64(int64(v)) + apiObject.Port = int32(v) } if v, ok := tfMap["server_name"].(string); ok && v != "" { apiObject.ServerName = aws.String(v) @@ -2109,13 +2110,13 @@ func expandRedisSettings(tfMap map[string]interface{}) *dms.RedisSettings { apiObject.SslCaCertificateArn = aws.String(v) } if v, ok := tfMap["ssl_security_protocol"].(string); ok && v != "" { - apiObject.SslSecurityProtocol = aws.String(v) + apiObject.SslSecurityProtocol = awstypes.SslSecurityProtocolValue(v) } return apiObject } -func flattenRedisSettings(apiObject *dms.RedisSettings) map[string]interface{} { +func flattenRedisSettings(apiObject *awstypes.RedisSettings) map[string]interface{} { if apiObject == nil { return nil } @@ -2123,52 +2124,45 @@ func flattenRedisSettings(apiObject *dms.RedisSettings) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.AuthPassword; v != nil { - tfMap["auth_password"] = aws.StringValue(v) - } - if v := apiObject.AuthType; v != nil { - tfMap["auth_type"] = aws.StringValue(v) + tfMap["auth_password"] = aws.ToString(v) } + tfMap["auth_type"] = string(apiObject.AuthType) if v := apiObject.AuthUserName; v != nil { - tfMap["auth_user_name"] = aws.StringValue(v) - } - if v := apiObject.Port; v != nil { - tfMap[names.AttrPort] = aws.Int64Value(v) + tfMap["auth_user_name"] = aws.ToString(v) } + tfMap[names.AttrPort] = apiObject.Port if v := apiObject.ServerName; v != nil { - tfMap["server_name"] = aws.StringValue(v) + tfMap["server_name"] = aws.ToString(v) } if v := apiObject.SslCaCertificateArn; v != nil { - tfMap["ssl_ca_certificate_arn"] = aws.StringValue(v) - } - if v := apiObject.SslSecurityProtocol; v != nil { - tfMap["ssl_security_protocol"] = aws.StringValue(v) + tfMap["ssl_ca_certificate_arn"] = aws.ToString(v) } - + tfMap["ssl_security_protocol"] = string(apiObject.SslSecurityProtocol) return tfMap } -func flattenRedshiftSettings(settings *dms.RedshiftSettings) []map[string]interface{} { +func flattenRedshiftSettings(settings *awstypes.RedshiftSettings) []map[string]interface{} { if settings == nil { return []map[string]interface{}{} } m := map[string]interface{}{ - "bucket_folder": aws.StringValue(settings.BucketFolder), - names.AttrBucketName: aws.StringValue(settings.BucketName), - "encryption_mode": aws.StringValue(settings.EncryptionMode), - "server_side_encryption_kms_key_id": aws.StringValue(settings.ServerSideEncryptionKmsKeyId), - "service_access_role_arn": aws.StringValue(settings.ServiceAccessRoleArn), + "bucket_folder": aws.ToString(settings.BucketFolder), + names.AttrBucketName: aws.ToString(settings.BucketName), + "encryption_mode": string(settings.EncryptionMode), + "server_side_encryption_kms_key_id": aws.ToString(settings.ServerSideEncryptionKmsKeyId), + "service_access_role_arn": aws.ToString(settings.ServiceAccessRoleArn), } return []map[string]interface{}{m} } -func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSettings { +func expandPostgreSQLSettings(tfMap map[string]interface{}) *awstypes.PostgreSQLSettings { if tfMap == nil { return nil } - apiObject := &dms.PostgreSQLSettings{} + apiObject := &awstypes.PostgreSQLSettings{} if v, ok := tfMap["after_connect_script"].(string); ok && v != "" { apiObject.AfterConnectScript = aws.String(v) @@ -2180,13 +2174,13 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti apiObject.CaptureDdls = aws.Bool(v) } if v, ok := tfMap["database_mode"].(string); ok && v != "" { - apiObject.DatabaseMode = aws.String(v) + apiObject.DatabaseMode = awstypes.DatabaseMode(v) } if v, ok := tfMap["ddl_artifacts_schema"].(string); ok && v != "" { apiObject.DdlArtifactsSchema = aws.String(v) } if v, ok := tfMap["execute_timeout"].(int); ok { - apiObject.ExecuteTimeout = aws.Int64(int64(v)) + apiObject.ExecuteTimeout = aws.Int32(int32(v)) } if v, ok := tfMap["fail_tasks_on_lob_truncation"].(bool); ok { apiObject.FailTasksOnLobTruncation = aws.Bool(v) @@ -2195,7 +2189,7 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti apiObject.HeartbeatEnable = aws.Bool(v) } if v, ok := tfMap["heartbeat_frequency"].(int); ok { - apiObject.HeartbeatFrequency = aws.Int64(int64(v)) + apiObject.HeartbeatFrequency = aws.Int32(int32(v)) } if v, ok := tfMap["heartbeat_schema"].(string); ok && v != "" { apiObject.HeartbeatSchema = aws.String(v) @@ -2207,13 +2201,13 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti apiObject.MapJsonbAsClob = aws.Bool(v) } if v, ok := tfMap["map_long_varchar_as"].(string); ok && v != "" { - apiObject.MapLongVarcharAs = aws.String(v) + apiObject.MapLongVarcharAs = awstypes.LongVarcharMappingType(v) } if v, ok := tfMap["max_file_size"].(int); ok { - apiObject.MaxFileSize = aws.Int64(int64(v)) + apiObject.MaxFileSize = aws.Int32(int32(v)) } if v, ok := tfMap["plugin_name"].(string); ok && v != "" { - apiObject.PluginName = aws.String(v) + apiObject.PluginName = awstypes.PluginNameValue(v) } if v, ok := tfMap["slot_name"].(string); ok && v != "" { apiObject.SlotName = aws.String(v) @@ -2222,7 +2216,7 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti return apiObject } -func flattenPostgreSQLSettings(apiObject *dms.PostgreSQLSettings) []map[string]interface{} { +func flattenPostgreSQLSettings(apiObject *awstypes.PostgreSQLSettings) []map[string]interface{} { if apiObject == nil { return nil } @@ -2230,63 +2224,57 @@ func flattenPostgreSQLSettings(apiObject *dms.PostgreSQLSettings) []map[string]i tfMap := map[string]interface{}{} if v := apiObject.AfterConnectScript; v != nil { - tfMap["after_connect_script"] = aws.StringValue(v) + tfMap["after_connect_script"] = aws.ToString(v) } if v := apiObject.BabelfishDatabaseName; v != nil { - tfMap["babelfish_database_name"] = aws.StringValue(v) + tfMap["babelfish_database_name"] = aws.ToString(v) } if v := apiObject.CaptureDdls; v != nil { - tfMap["capture_ddls"] = aws.BoolValue(v) - } - if v := apiObject.DatabaseMode; v != nil { - tfMap["database_mode"] = aws.StringValue(v) + tfMap["capture_ddls"] = aws.ToBool(v) } + tfMap["database_mode"] = string(apiObject.DatabaseMode) if v := apiObject.DdlArtifactsSchema; v != nil { - tfMap["ddl_artifacts_schema"] = aws.StringValue(v) + tfMap["ddl_artifacts_schema"] = aws.ToString(v) } if v := apiObject.ExecuteTimeout; v != nil { - tfMap["execute_timeout"] = aws.Int64Value(v) + tfMap["execute_timeout"] = aws.ToInt32(v) } if v := apiObject.FailTasksOnLobTruncation; v != nil { - tfMap["fail_tasks_on_lob_truncation"] = aws.BoolValue(v) + tfMap["fail_tasks_on_lob_truncation"] = aws.ToBool(v) } if v := apiObject.HeartbeatEnable; v != nil { - tfMap["heartbeat_enable"] = aws.BoolValue(v) + tfMap["heartbeat_enable"] = aws.ToBool(v) } if v := apiObject.HeartbeatFrequency; v != nil { - tfMap["heartbeat_frequency"] = aws.Int64Value(v) + tfMap["heartbeat_frequency"] = aws.ToInt32(v) } if v := apiObject.HeartbeatSchema; v != nil { - tfMap["heartbeat_schema"] = aws.StringValue(v) + tfMap["heartbeat_schema"] = aws.ToString(v) } if v := apiObject.MapBooleanAsBoolean; v != nil { - tfMap["map_boolean_as_boolean"] = aws.BoolValue(v) + tfMap["map_boolean_as_boolean"] = aws.ToBool(v) } if v := apiObject.MapJsonbAsClob; v != nil { - tfMap["map_jsonb_as_clob"] = aws.BoolValue(v) - } - if v := apiObject.MapLongVarcharAs; v != nil { - tfMap["map_long_varchar_as"] = aws.StringValue(v) + tfMap["map_jsonb_as_clob"] = aws.ToBool(v) } + tfMap["map_long_varchar_as"] = string(apiObject.MapLongVarcharAs) if v := apiObject.MaxFileSize; v != nil { - tfMap["max_file_size"] = aws.Int64Value(v) - } - if v := apiObject.PluginName; v != nil { - tfMap["plugin_name"] = aws.StringValue(v) + tfMap["max_file_size"] = aws.ToInt32(v) } + tfMap["plugin_name"] = string(apiObject.PluginName) if v := apiObject.SlotName; v != nil { - tfMap["slot_name"] = aws.StringValue(v) + tfMap["slot_name"] = aws.ToString(v) } return []map[string]interface{}{tfMap} } -func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { +func expandS3Settings(tfMap map[string]interface{}) *awstypes.S3Settings { if tfMap == nil { return nil } - apiObject := &dms.S3Settings{} + apiObject := &awstypes.S3Settings{} if v, ok := tfMap["add_column_name"].(bool); ok { apiObject.AddColumnName = aws.Bool(v) @@ -2298,7 +2286,7 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.BucketName = aws.String(v) } if v, ok := tfMap["canned_acl_for_objects"].(string); ok { - apiObject.CannedAclForObjects = aws.String(v) + apiObject.CannedAclForObjects = awstypes.CannedAclForObjectsValue(v) } if v, ok := tfMap["cdc_inserts_and_updates"].(bool); ok { apiObject.CdcInsertsAndUpdates = aws.Bool(v) @@ -2307,16 +2295,16 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.CdcInsertsOnly = aws.Bool(v) } if v, ok := tfMap["cdc_max_batch_interval"].(int); ok { - apiObject.CdcMaxBatchInterval = aws.Int64(int64(v)) + apiObject.CdcMaxBatchInterval = aws.Int32(int32(v)) } if v, ok := tfMap["cdc_min_file_size"].(int); ok { - apiObject.CdcMinFileSize = aws.Int64(int64(v)) + apiObject.CdcMinFileSize = aws.Int32(int32(v)) } if v, ok := tfMap["cdc_path"].(string); ok { apiObject.CdcPath = aws.String(v) } if v, ok := tfMap["compression_type"].(string); ok { - apiObject.CompressionType = aws.String(v) + apiObject.CompressionType = awstypes.CompressionTypeValue(v) } if v, ok := tfMap["csv_delimiter"].(string); ok { apiObject.CsvDelimiter = aws.String(v) @@ -2331,31 +2319,31 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.CsvRowDelimiter = aws.String(v) } if v, ok := tfMap["data_format"].(string); ok { - apiObject.DataFormat = aws.String(v) + apiObject.DataFormat = awstypes.DataFormatValue(v) } if v, ok := tfMap["data_page_size"].(int); ok { - apiObject.DataPageSize = aws.Int64(int64(v)) + apiObject.DataPageSize = aws.Int32(int32(v)) } if v, ok := tfMap["date_partition_delimiter"].(string); ok { - apiObject.DatePartitionDelimiter = aws.String(v) + apiObject.DatePartitionDelimiter = awstypes.DatePartitionDelimiterValue(v) } if v, ok := tfMap["date_partition_enabled"].(bool); ok { apiObject.DatePartitionEnabled = aws.Bool(v) } if v, ok := tfMap["date_partition_sequence"].(string); ok { - apiObject.DatePartitionSequence = aws.String(v) + apiObject.DatePartitionSequence = awstypes.DatePartitionSequenceValue(v) } if v, ok := tfMap["dict_page_size_limit"].(int); ok { - apiObject.DictPageSizeLimit = aws.Int64(int64(v)) + apiObject.DictPageSizeLimit = aws.Int32(int32(v)) } if v, ok := tfMap["enable_statistics"].(bool); ok { apiObject.EnableStatistics = aws.Bool(v) } if v, ok := tfMap["encoding_type"].(string); ok { - apiObject.EncodingType = aws.String(v) + apiObject.EncodingType = awstypes.EncodingTypeValue(v) } if v, ok := tfMap["encryption_mode"].(string); ok { - apiObject.EncryptionMode = aws.String(v) + apiObject.EncryptionMode = awstypes.EncryptionModeValue(v) } if v, ok := tfMap["external_table_definition"].(string); ok { apiObject.ExternalTableDefinition = aws.String(v) @@ -2364,19 +2352,19 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.GlueCatalogGeneration = aws.Bool(v) } if v, ok := tfMap["ignore_header_rows"].(int); ok { - apiObject.IgnoreHeaderRows = aws.Int64(int64(v)) + apiObject.IgnoreHeaderRows = aws.Int32(int32(v)) } if v, ok := tfMap["include_op_for_full_load"].(bool); ok { apiObject.IncludeOpForFullLoad = aws.Bool(v) } if v, ok := tfMap["max_file_size"].(int); ok { - apiObject.MaxFileSize = aws.Int64(int64(v)) + apiObject.MaxFileSize = aws.Int32(int32(v)) } if v, ok := tfMap["parquet_timestamp_in_millisecond"].(bool); ok { apiObject.ParquetTimestampInMillisecond = aws.Bool(v) } if v, ok := tfMap["parquet_version"].(string); ok { - apiObject.ParquetVersion = aws.String(v) + apiObject.ParquetVersion = awstypes.ParquetVersionValue(v) } if v, ok := tfMap["preserve_transactions"].(bool); ok { apiObject.PreserveTransactions = aws.Bool(v) @@ -2385,7 +2373,7 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { apiObject.Rfc4180 = aws.Bool(v) } if v, ok := tfMap["row_group_length"].(int); ok { - apiObject.RowGroupLength = aws.Int64(int64(v)) + apiObject.RowGroupLength = aws.Int32(int32(v)) } if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok { apiObject.ServerSideEncryptionKmsKeyId = aws.String(v) @@ -2406,7 +2394,7 @@ func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { return apiObject } -func flattenS3Settings(apiObject *dms.S3Settings) []map[string]interface{} { +func flattenS3Settings(apiObject *awstypes.S3Settings) []map[string]interface{} { if apiObject == nil { return []map[string]interface{}{} } @@ -2414,115 +2402,99 @@ func flattenS3Settings(apiObject *dms.S3Settings) []map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.AddColumnName; v != nil { - tfMap["add_column_name"] = aws.BoolValue(v) + tfMap["add_column_name"] = aws.ToBool(v) } if v := apiObject.BucketFolder; v != nil { - tfMap["bucket_folder"] = aws.StringValue(v) + tfMap["bucket_folder"] = aws.ToString(v) } if v := apiObject.BucketName; v != nil { - tfMap[names.AttrBucketName] = aws.StringValue(v) - } - if v := apiObject.CannedAclForObjects; v != nil { - tfMap["canned_acl_for_objects"] = aws.StringValue(v) + tfMap[names.AttrBucketName] = aws.ToString(v) } + tfMap["canned_acl_for_objects"] = string(apiObject.CannedAclForObjects) if v := apiObject.CdcInsertsAndUpdates; v != nil { - tfMap["cdc_inserts_and_updates"] = aws.BoolValue(v) + tfMap["cdc_inserts_and_updates"] = aws.ToBool(v) } if v := apiObject.CdcInsertsOnly; v != nil { - tfMap["cdc_inserts_only"] = aws.BoolValue(v) + tfMap["cdc_inserts_only"] = aws.ToBool(v) } if v := apiObject.CdcMaxBatchInterval; v != nil { - tfMap["cdc_max_batch_interval"] = aws.Int64Value(v) + tfMap["cdc_max_batch_interval"] = aws.ToInt32(v) } if v := apiObject.CdcMinFileSize; v != nil { - tfMap["cdc_min_file_size"] = aws.Int64Value(v) + tfMap["cdc_min_file_size"] = aws.ToInt32(v) } if v := apiObject.CdcPath; v != nil { - tfMap["cdc_path"] = aws.StringValue(v) - } - if v := apiObject.CompressionType; v != nil { - tfMap["compression_type"] = aws.StringValue(v) + tfMap["cdc_path"] = aws.ToString(v) } + tfMap["compression_type"] = string(apiObject.CompressionType) if v := apiObject.CsvDelimiter; v != nil { - tfMap["csv_delimiter"] = aws.StringValue(v) + tfMap["csv_delimiter"] = aws.ToString(v) } if v := apiObject.CsvNoSupValue; v != nil { - tfMap["csv_no_sup_value"] = aws.StringValue(v) + tfMap["csv_no_sup_value"] = aws.ToString(v) } if v := apiObject.CsvNullValue; v != nil { - tfMap["csv_null_value"] = aws.StringValue(v) + tfMap["csv_null_value"] = aws.ToString(v) } if v := apiObject.CsvRowDelimiter; v != nil { - tfMap["csv_row_delimiter"] = aws.StringValue(v) - } - if v := apiObject.DataFormat; v != nil { - tfMap["data_format"] = aws.StringValue(v) + tfMap["csv_row_delimiter"] = aws.ToString(v) } + tfMap["data_format"] = string(apiObject.DataFormat) if v := apiObject.DataPageSize; v != nil { - tfMap["data_page_size"] = aws.Int64Value(v) - } - if v := apiObject.DatePartitionDelimiter; v != nil { - tfMap["date_partition_delimiter"] = aws.StringValue(v) + tfMap["data_page_size"] = aws.ToInt32(v) } + tfMap["date_partition_delimiter"] = string(apiObject.DatePartitionDelimiter) if v := apiObject.DatePartitionEnabled; v != nil { - tfMap["date_partition_enabled"] = aws.BoolValue(v) - } - if v := apiObject.DatePartitionSequence; v != nil { - tfMap["date_partition_sequence"] = aws.StringValue(v) + tfMap["date_partition_enabled"] = aws.ToBool(v) } + tfMap["date_partition_sequence"] = string(apiObject.DatePartitionSequence) if v := apiObject.DictPageSizeLimit; v != nil { - tfMap["dict_page_size_limit"] = aws.Int64Value(v) + tfMap["dict_page_size_limit"] = aws.ToInt32(v) } if v := apiObject.EnableStatistics; v != nil { - tfMap["enable_statistics"] = aws.BoolValue(v) - } - if v := apiObject.EncodingType; v != nil { - tfMap["encoding_type"] = aws.StringValue(v) - } - if v := apiObject.EncryptionMode; v != nil { - tfMap["encryption_mode"] = aws.StringValue(v) + tfMap["enable_statistics"] = aws.ToBool(v) } + tfMap["encoding_type"] = string(apiObject.EncodingType) + tfMap["encryption_mode"] = string(apiObject.EncryptionMode) if v := apiObject.ExternalTableDefinition; v != nil { - tfMap["external_table_definition"] = aws.StringValue(v) + tfMap["external_table_definition"] = aws.ToString(v) } if v := apiObject.GlueCatalogGeneration; v != nil { - tfMap["glue_catalog_generation"] = aws.BoolValue(v) + tfMap["glue_catalog_generation"] = aws.ToBool(v) } if v := apiObject.IgnoreHeaderRows; v != nil { - tfMap["ignore_header_rows"] = aws.Int64Value(v) + tfMap["ignore_header_rows"] = aws.ToInt32(v) } if v := apiObject.IncludeOpForFullLoad; v != nil { - tfMap["include_op_for_full_load"] = aws.BoolValue(v) + tfMap["include_op_for_full_load"] = aws.ToBool(v) } if v := apiObject.MaxFileSize; v != nil { - tfMap["max_file_size"] = aws.Int64Value(v) + tfMap["max_file_size"] = aws.ToInt32(v) } if v := apiObject.ParquetTimestampInMillisecond; v != nil { - tfMap["parquet_timestamp_in_millisecond"] = aws.BoolValue(v) - } - if v := apiObject.ParquetVersion; v != nil { - tfMap["parquet_version"] = aws.StringValue(v) + tfMap["parquet_timestamp_in_millisecond"] = aws.ToBool(v) } + tfMap["parquet_version"] = string(apiObject.ParquetVersion) if v := apiObject.Rfc4180; v != nil { - tfMap["rfc_4180"] = aws.BoolValue(v) + tfMap["rfc_4180"] = aws.ToBool(v) } if v := apiObject.RowGroupLength; v != nil { - tfMap["row_group_length"] = aws.Int64Value(v) + tfMap["row_group_length"] = aws.ToInt32(v) } if v := apiObject.ServerSideEncryptionKmsKeyId; v != nil { - tfMap["server_side_encryption_kms_key_id"] = aws.StringValue(v) + tfMap["server_side_encryption_kms_key_id"] = aws.ToString(v) } if v := apiObject.ServiceAccessRoleArn; v != nil { - tfMap["service_access_role_arn"] = aws.StringValue(v) + tfMap["service_access_role_arn"] = aws.ToString(v) } if v := apiObject.TimestampColumnName; v != nil { - tfMap["timestamp_column_name"] = aws.StringValue(v) + tfMap["timestamp_column_name"] = aws.ToString(v) } if v := apiObject.UseCsvNoSupValue; v != nil { - tfMap["use_csv_no_sup_value"] = aws.BoolValue(v) + tfMap["use_csv_no_sup_value"] = aws.ToBool(v) } if v := apiObject.UseTaskStartTimeForFullLoadTimestamp; v != nil { - tfMap["use_task_start_time_for_full_load_timestamp"] = aws.BoolValue(v) + tfMap["use_task_start_time_for_full_load_timestamp"] = aws.ToBool(v) } return []map[string]interface{}{tfMap} @@ -2620,7 +2592,7 @@ func expandTopLevelConnectionInfo(d *schema.ResourceData, input *dms.CreateEndpo input.Username = aws.String(d.Get(names.AttrUsername).(string)) input.Password = aws.String(d.Get(names.AttrPassword).(string)) input.ServerName = aws.String(d.Get("server_name").(string)) - input.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) if v, ok := d.GetOk(names.AttrDatabaseName); ok { input.DatabaseName = aws.String(v.(string)) @@ -2631,26 +2603,26 @@ func expandTopLevelConnectionInfoModify(d *schema.ResourceData, input *dms.Modif input.Username = aws.String(d.Get(names.AttrUsername).(string)) input.Password = aws.String(d.Get(names.AttrPassword).(string)) input.ServerName = aws.String(d.Get("server_name").(string)) - input.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) + input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) if v, ok := d.GetOk(names.AttrDatabaseName); ok { input.DatabaseName = aws.String(v.(string)) } } -func flattenTopLevelConnectionInfo(d *schema.ResourceData, endpoint *dms.Endpoint) { +func flattenTopLevelConnectionInfo(d *schema.ResourceData, endpoint *awstypes.Endpoint) { d.Set(names.AttrUsername, endpoint.Username) d.Set("server_name", endpoint.ServerName) d.Set(names.AttrPort, endpoint.Port) d.Set(names.AttrDatabaseName, endpoint.DatabaseName) } -func FindEndpointByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.Endpoint, error) { +func findEndpointByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.Endpoint, error) { input := &dms.DescribeEndpointsInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("endpoint-id"), - Values: aws.StringSlice([]string{id}), + Values: []string{id}, }, }, } @@ -2658,50 +2630,43 @@ func FindEndpointByID(ctx context.Context, conn *dms.DatabaseMigrationService, i return findEndpoint(ctx, conn, input) } -func findEndpoint(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEndpointsInput) (*dms.Endpoint, error) { +func findEndpoint(ctx context.Context, conn *dms.Client, input *dms.DescribeEndpointsInput) (*awstypes.Endpoint, error) { output, err := findEndpoints(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findEndpoints(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEndpointsInput) ([]*dms.Endpoint, error) { - var output []*dms.Endpoint +func findEndpoints(ctx context.Context, conn *dms.Client, input *dms.DescribeEndpointsInput) ([]awstypes.Endpoint, error) { + var output []awstypes.Endpoint - err := conn.DescribeEndpointsPagesWithContext(ctx, input, func(page *dms.DescribeEndpointsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeEndpointsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Endpoints { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.Endpoints...) } return output, nil } -func statusEndpoint(ctx context.Context, conn *dms.DatabaseMigrationService, id string) retry.StateRefreshFunc { +func statusEndpoint(ctx context.Context, conn *dms.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindEndpointByID(ctx, conn, id) + output, err := findEndpointByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -2711,11 +2676,11 @@ func statusEndpoint(ctx context.Context, conn *dms.DatabaseMigrationService, id return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func waitEndpointDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { +func waitEndpointDeleted(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.Endpoint, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{endpointStatusDeleting}, Target: []string{}, @@ -2723,7 +2688,11 @@ func waitEndpointDeleted(ctx context.Context, conn *dms.DatabaseMigrationService Timeout: timeout, } - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.Endpoint); ok { + return output, err + } - return err + return nil, err } diff --git a/internal/service/dms/endpoint_data_source.go b/internal/service/dms/endpoint_data_source.go index f6391acd9d4..89f27a6d771 100644 --- a/internal/service/dms/endpoint_data_source.go +++ b/internal/service/dms/endpoint_data_source.go @@ -6,7 +6,7 @@ package dms import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -15,8 +15,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_endpoint") -func DataSourceEndpoint() *schema.Resource { +// @SDKDataSource("aws_dms_endpoint", name="Endpoint") +func dataSourceEndpoint() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceEndpointRead, @@ -572,20 +572,20 @@ func DataSourceEndpoint() *schema.Resource { func dataSourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig endptID := d.Get("endpoint_id").(string) - out, err := FindEndpointByID(ctx, conn, endptID) + out, err := findEndpointByID(ctx, conn, endptID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Endpoint (%s): %s", endptID, err) } - d.SetId(aws.StringValue(out.EndpointIdentifier)) + d.SetId(aws.ToString(out.EndpointIdentifier)) d.Set("endpoint_id", out.EndpointIdentifier) - arn := aws.StringValue(out.EndpointArn) + arn := aws.ToString(out.EndpointArn) d.Set("endpoint_arn", arn) d.Set(names.AttrEndpointType, out.EndpointType) d.Set(names.AttrDatabaseName, out.DatabaseName) @@ -600,6 +600,7 @@ func dataSourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta in } tags, err := listTags(ctx, conn, arn) + if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for DMS Endpoint (%s): %s", arn, err) } diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 24c3d91ba1d..49afff18070 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -2212,7 +2212,7 @@ func TestAccDMSEndpoint_pauseReplicationTasks(t *testing.T) { endpointNameSource := "aws_dms_endpoint.source" endpointNameTarget := "aws_dms_endpoint.target" replicationTaskName := "aws_dms_replication_task.test" - var task dms.ReplicationTask + var task awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -2253,7 +2253,7 @@ func testAccCheckResourceAttrRegionalHostname(resourceName, attributeName, servi func testAccCheckEndpointDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_dms_endpoint" { @@ -2284,11 +2284,7 @@ func testAccCheckEndpointExists(ctx context.Context, n string) resource.TestChec return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No DMS Endpoint ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindEndpointByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index a966dd5d7d2..d675a58a0ca 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -9,15 +9,16 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -28,7 +29,7 @@ import ( // @SDKResource("aws_dms_event_subscription", name="Event Subscription") // @Tags(identifierAttribute="arn") -func ResourceEventSubscription() *schema.Resource { +func resourceEventSubscription() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceEventSubscriptionCreate, ReadWithoutTimeout: resourceEventSubscriptionRead, @@ -96,12 +97,12 @@ func ResourceEventSubscription() *schema.Resource { func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) name := d.Get(names.AttrName).(string) input := &dms.CreateEventSubscriptionInput{ Enabled: aws.Bool(d.Get(names.AttrEnabled).(bool)), - EventCategories: flex.ExpandStringSet(d.Get("event_categories").(*schema.Set)), + EventCategories: flex.ExpandStringValueSet(d.Get("event_categories").(*schema.Set)), SnsTopicArn: aws.String(d.Get(names.AttrSNSTopicARN).(string)), SourceType: aws.String(d.Get(names.AttrSourceType).(string)), SubscriptionName: aws.String(name), @@ -109,10 +110,10 @@ func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData } if v, ok := d.GetOk("source_ids"); ok && v.(*schema.Set).Len() > 0 { - input.SourceIds = flex.ExpandStringSet(v.(*schema.Set)) + input.SourceIds = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := conn.CreateEventSubscriptionWithContext(ctx, input) + _, err := conn.CreateEventSubscription(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Event Subscription (%s): %s", name, err) @@ -129,9 +130,9 @@ func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - subscription, err := FindEventSubscriptionByName(ctx, conn, d.Id()) + subscription, err := findEventSubscriptionByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Event Subscription (%s) not found, removing from state", d.Id()) @@ -152,10 +153,10 @@ func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, }.String() d.Set(names.AttrARN, arn) d.Set(names.AttrEnabled, subscription.Enabled) - d.Set("event_categories", aws.StringValueSlice(subscription.EventCategoriesList)) + d.Set("event_categories", subscription.EventCategoriesList) d.Set(names.AttrName, d.Id()) d.Set(names.AttrSNSTopicARN, subscription.SnsTopicArn) - d.Set("source_ids", aws.StringValueSlice(subscription.SourceIdsList)) + d.Set("source_ids", subscription.SourceIdsList) d.Set(names.AttrSourceType, subscription.SourceType) return diags @@ -163,18 +164,18 @@ func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, func resourceEventSubscriptionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &dms.ModifyEventSubscriptionInput{ Enabled: aws.Bool(d.Get(names.AttrEnabled).(bool)), - EventCategories: flex.ExpandStringSet(d.Get("event_categories").(*schema.Set)), + EventCategories: flex.ExpandStringValueSet(d.Get("event_categories").(*schema.Set)), SnsTopicArn: aws.String(d.Get(names.AttrSNSTopicARN).(string)), SourceType: aws.String(d.Get(names.AttrSourceType).(string)), SubscriptionName: aws.String(d.Id()), } - _, err := conn.ModifyEventSubscriptionWithContext(ctx, input) + _, err := conn.ModifyEventSubscription(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying DMS Event Subscription (%s): %s", d.Id(), err) @@ -190,14 +191,14 @@ func resourceEventSubscriptionUpdate(ctx context.Context, d *schema.ResourceData func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Event Subscription: %s", d.Id()) - _, err := conn.DeleteEventSubscriptionWithContext(ctx, &dms.DeleteEventSubscriptionInput{ + _, err := conn.DeleteEventSubscription(ctx, &dms.DeleteEventSubscriptionInput{ SubscriptionName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -212,7 +213,7 @@ func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData return diags } -func FindEventSubscriptionByName(ctx context.Context, conn *dms.DatabaseMigrationService, name string) (*dms.EventSubscription, error) { +func findEventSubscriptionByName(ctx context.Context, conn *dms.Client, name string) (*awstypes.EventSubscription, error) { input := &dms.DescribeEventSubscriptionsInput{ SubscriptionName: aws.String(name), } @@ -220,50 +221,44 @@ func FindEventSubscriptionByName(ctx context.Context, conn *dms.DatabaseMigratio return findEventSubscription(ctx, conn, input) } -func findEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEventSubscriptionsInput) (*dms.EventSubscription, error) { +func findEventSubscription(ctx context.Context, conn *dms.Client, input *dms.DescribeEventSubscriptionsInput) (*awstypes.EventSubscription, error) { output, err := findEventSubscriptions(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findEventSubscriptions(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEventSubscriptionsInput) ([]*dms.EventSubscription, error) { - var output []*dms.EventSubscription +func findEventSubscriptions(ctx context.Context, conn *dms.Client, input *dms.DescribeEventSubscriptionsInput) ([]awstypes.EventSubscription, error) { + var output []awstypes.EventSubscription - err := conn.DescribeEventSubscriptionsPagesWithContext(ctx, input, func(page *dms.DescribeEventSubscriptionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeEventSubscriptionsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.EventSubscriptionsList { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.EventSubscriptionsList...) } return output, nil } -func statusEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationService, name string) retry.StateRefreshFunc { +func statusEventSubscription(ctx context.Context, conn *dms.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindEventSubscriptionByName(ctx, conn, name) + output, err := findEventSubscriptionByName(ctx, conn, name) if tfresource.NotFound(err) { return nil, "", nil @@ -273,11 +268,11 @@ func statusEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationSer return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func waitEventSubscriptionCreated(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { +func waitEventSubscriptionCreated(ctx context.Context, conn *dms.Client, name string, timeout time.Duration) (*awstypes.EventSubscription, error) { stateConf := &retry.StateChangeConf{ Pending: []string{eventSubscriptionStatusCreating, eventSubscriptionStatusModifying}, Target: []string{eventSubscriptionStatusActive}, @@ -289,14 +284,14 @@ func waitEventSubscriptionCreated(ctx context.Context, conn *dms.DatabaseMigrati outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.EventSubscription); ok { + if output, ok := outputRaw.(*awstypes.EventSubscription); ok { return output, err } return nil, err } -func waitEventSubscriptionUpdated(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { +func waitEventSubscriptionUpdated(ctx context.Context, conn *dms.Client, name string, timeout time.Duration) (*awstypes.EventSubscription, error) { stateConf := &retry.StateChangeConf{ Pending: []string{eventSubscriptionStatusModifying}, Target: []string{eventSubscriptionStatusActive}, @@ -308,14 +303,14 @@ func waitEventSubscriptionUpdated(ctx context.Context, conn *dms.DatabaseMigrati outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.EventSubscription); ok { + if output, ok := outputRaw.(*awstypes.EventSubscription); ok { return output, err } return nil, err } -func waitEventSubscriptionDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { +func waitEventSubscriptionDeleted(ctx context.Context, conn *dms.Client, name string, timeout time.Duration) (*awstypes.EventSubscription, error) { stateConf := &retry.StateChangeConf{ Pending: []string{eventSubscriptionStatusDeleting}, Target: []string{}, @@ -327,7 +322,7 @@ func waitEventSubscriptionDeleted(ctx context.Context, conn *dms.DatabaseMigrati outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.EventSubscription); ok { + if output, ok := outputRaw.(*awstypes.EventSubscription); ok { return output, err } diff --git a/internal/service/dms/event_subscription_test.go b/internal/service/dms/event_subscription_test.go index 7bdef14fbe4..d725bd5817d 100644 --- a/internal/service/dms/event_subscription_test.go +++ b/internal/service/dms/event_subscription_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccDMSEventSubscription_basic(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription resourceName := "aws_dms_event_subscription.test" snsTopicResourceName := "aws_sns_topic.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -57,7 +57,7 @@ func TestAccDMSEventSubscription_basic(t *testing.T) { func TestAccDMSEventSubscription_disappears(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription resourceName := "aws_dms_event_subscription.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -81,7 +81,7 @@ func TestAccDMSEventSubscription_disappears(t *testing.T) { func TestAccDMSEventSubscription_enabled(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription resourceName := "aws_dms_event_subscription.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -123,7 +123,7 @@ func TestAccDMSEventSubscription_enabled(t *testing.T) { func TestAccDMSEventSubscription_eventCategories(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription resourceName := "aws_dms_event_subscription.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -164,7 +164,7 @@ func TestAccDMSEventSubscription_eventCategories(t *testing.T) { func TestAccDMSEventSubscription_tags(t *testing.T) { ctx := acctest.Context(t) - var eventSubscription dms.EventSubscription + var eventSubscription awstypes.EventSubscription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_event_subscription.test" @@ -215,7 +215,7 @@ func testAccCheckEventSubscriptionDestroy(ctx context.Context) resource.TestChec continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindEventSubscriptionByName(ctx, conn, rs.Primary.ID) @@ -234,14 +234,14 @@ func testAccCheckEventSubscriptionDestroy(ctx context.Context) resource.TestChec } } -func testAccCheckEventSubscriptionExists(ctx context.Context, n string, v *dms.EventSubscription) resource.TestCheckFunc { +func testAccCheckEventSubscriptionExists(ctx context.Context, n string, v *awstypes.EventSubscription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) output, err := tfdms.FindEventSubscriptionByName(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/exports_test.go b/internal/service/dms/exports_test.go index 89008c13954..ac96699f63f 100644 --- a/internal/service/dms/exports_test.go +++ b/internal/service/dms/exports_test.go @@ -5,9 +5,25 @@ package dms // Exports for use in tests only. var ( - TaskSettingsEqual = taskSettingsEqual - ValidEndpointID = validEndpointID - ValidReplicationInstanceID = validReplicationInstanceID - ValidReplicationSubnetGroupID = validReplicationSubnetGroupID - ValidReplicationTaskID = validReplicationTaskID + ResourceCertificate = resourceCertificate + ResourceEndpoint = resourceEndpoint + ResourceEventSubscription = resourceEventSubscription + ResourceReplicationConfig = resourceReplicationConfig + ResourceReplicationInstance = resourceReplicationInstance + ResourceReplicationSubnetGroup = resourceReplicationSubnetGroup + ResourceReplicationTask = resourceReplicationTask + ResourceS3Endpoint = resourceS3Endpoint + + FindCertificateByID = findCertificateByID + FindEndpointByID = findEndpointByID + FindEventSubscriptionByName = findEventSubscriptionByName + FindReplicationConfigByARN = findReplicationConfigByARN + FindReplicationInstanceByID = findReplicationInstanceByID + FindReplicationSubnetGroupByID = findReplicationSubnetGroupByID + FindReplicationTaskByID = findReplicationTaskByID + TaskSettingsEqual = taskSettingsEqual + ValidEndpointID = validEndpointID + ValidReplicationInstanceID = validReplicationInstanceID + ValidReplicationSubnetGroupID = validReplicationSubnetGroupID + ValidReplicationTaskID = validReplicationTaskID ) diff --git a/internal/service/dms/generate.go b/internal/service/dms/generate.go index a9da99dfa6b..26422fa810d 100644 --- a/internal/service/dms/generate.go +++ b/internal/service/dms/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOutTagsElem=TagList -ServiceTagsSlice -TagOp=AddTagsToResource -UntagOp=RemoveTagsFromResource -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ListTagsOutTagsElem=TagList -ServiceTagsSlice -TagOp=AddTagsToResource -UntagOp=RemoveTagsFromResource -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index a3646109b68..734cdb42c83 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -10,14 +10,16 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -29,7 +31,7 @@ import ( // @SDKResource("aws_dms_replication_config", name="Replication Config") // @Tags(identifierAttribute="id") -func ResourceReplicationConfig() *schema.Resource { +func resourceReplicationConfig() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationConfigCreate, ReadWithoutTimeout: resourceReplicationConfigRead, @@ -127,9 +129,9 @@ func ResourceReplicationConfig() *schema.Resource { DiffSuppressOnRefresh: true, }, "replication_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.MigrationTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.MigrationTypeValue](), }, "resource_identifier": { Type: schema.TypeString, @@ -174,12 +176,12 @@ func ResourceReplicationConfig() *schema.Resource { func resourceReplicationConfigCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) replicationConfigID := d.Get("replication_config_identifier").(string) input := &dms.CreateReplicationConfigInput{ ReplicationConfigIdentifier: aws.String(replicationConfigID), - ReplicationType: aws.String(d.Get("replication_type").(string)), + ReplicationType: awstypes.MigrationTypeValue(d.Get("replication_type").(string)), SourceEndpointArn: aws.String(d.Get("source_endpoint_arn").(string)), TableMappings: aws.String(d.Get("table_mappings").(string)), Tags: getTagsIn(ctx), @@ -202,13 +204,13 @@ func resourceReplicationConfigCreate(ctx context.Context, d *schema.ResourceData input.SupplementalSettings = aws.String(v.(string)) } - output, err := conn.CreateReplicationConfigWithContext(ctx, input) + output, err := conn.CreateReplicationConfig(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Replication Config (%s): %s", replicationConfigID, err) } - d.SetId(aws.StringValue(output.ReplicationConfig.ReplicationConfigArn)) + d.SetId(aws.ToString(output.ReplicationConfig.ReplicationConfigArn)) if d.Get("start_replication").(bool) { if err := startReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { @@ -221,9 +223,9 @@ func resourceReplicationConfigCreate(ctx context.Context, d *schema.ResourceData func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - replicationConfig, err := FindReplicationConfigByARN(ctx, conn, d.Id()) + replicationConfig, err := findReplicationConfigByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Replication Config (%s) not found, removing from state", d.Id()) @@ -252,7 +254,7 @@ func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll, "start_replication") { if err := stopReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -274,7 +276,7 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("replication_type") { - input.ReplicationType = aws.String(d.Get("replication_type").(string)) + input.ReplicationType = awstypes.MigrationTypeValue(d.Get("replication_type").(string)) } if d.HasChange("source_endpoint_arn") { @@ -293,7 +295,7 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData input.TargetEndpointArn = aws.String(d.Get("target_endpoint_arn").(string)) } - _, err := conn.ModifyReplicationConfigWithContext(ctx, input) + _, err := conn.ModifyReplicationConfig(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying DMS Replication Config (%s): %s", d.Id(), err) @@ -307,7 +309,7 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData } if d.HasChange("start_replication") { - var f func(context.Context, *dms.DatabaseMigrationService, string, time.Duration) error + var f func(context.Context, *dms.Client, string, time.Duration) error if d.Get("start_replication").(bool) { f = startReplication } else { @@ -323,18 +325,18 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData func resourceReplicationConfigDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if err := stopReplication(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting DMS Replication Config: %s", d.Id()) - _, err := conn.DeleteReplicationConfigWithContext(ctx, &dms.DeleteReplicationConfigInput{ + _, err := conn.DeleteReplicationConfig(ctx, &dms.DeleteReplicationConfigInput{ ReplicationConfigArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -349,111 +351,99 @@ func resourceReplicationConfigDelete(ctx context.Context, d *schema.ResourceData return diags } -func FindReplicationConfigByARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) (*dms.ReplicationConfig, error) { +func findReplicationConfigByARN(ctx context.Context, conn *dms.Client, arn string) (*awstypes.ReplicationConfig, error) { input := &dms.DescribeReplicationConfigsInput{ - Filters: []*dms.Filter{{ + Filters: []awstypes.Filter{{ Name: aws.String("replication-config-arn"), - Values: aws.StringSlice([]string{arn}), + Values: []string{arn}, }}, } return findReplicationConfig(ctx, conn, input) } -func findReplicationConfig(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationConfigsInput) (*dms.ReplicationConfig, error) { +func findReplicationConfig(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationConfigsInput) (*awstypes.ReplicationConfig, error) { output, err := findReplicationConfigs(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationConfigs(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationConfigsInput) ([]*dms.ReplicationConfig, error) { - var output []*dms.ReplicationConfig +func findReplicationConfigs(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationConfigsInput) ([]awstypes.ReplicationConfig, error) { + var output []awstypes.ReplicationConfig - err := conn.DescribeReplicationConfigsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationConfigsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationConfigsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.ReplicationConfigs { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ReplicationConfigs...) } return output, nil } -func findReplicationByReplicationConfigARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) (*dms.Replication, error) { +func findReplicationByReplicationConfigARN(ctx context.Context, conn *dms.Client, arn string) (*awstypes.Replication, error) { input := &dms.DescribeReplicationsInput{ - Filters: []*dms.Filter{{ + Filters: []awstypes.Filter{{ Name: aws.String("replication-config-arn"), - Values: aws.StringSlice([]string{arn}), + Values: []string{arn}, }}, } return findReplication(ctx, conn, input) } -func findReplication(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationsInput) (*dms.Replication, error) { +func findReplication(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationsInput) (*awstypes.Replication, error) { output, err := findReplications(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplications(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationsInput) ([]*dms.Replication, error) { - var output []*dms.Replication +func findReplications(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationsInput) ([]awstypes.Replication, error) { + var output []awstypes.Replication - err := conn.DescribeReplicationsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Replications { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.Replications...) } return output, nil } -func statusReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) retry.StateRefreshFunc { +func statusReplication(ctx context.Context, conn *dms.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findReplicationByReplicationConfigARN(ctx, conn, arn) @@ -465,27 +455,24 @@ func statusReplication(ctx context.Context, conn *dms.DatabaseMigrationService, return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func setLastReplicationError(err error, replication *dms.Replication) { +func setLastReplicationError(err error, replication *awstypes.Replication) { var errs []error - errs = append(errs, tfslices.ApplyToAll(replication.FailureMessages, func(v *string) error { - if v := aws.StringValue(v); v != "" { - return errors.New(v) - } - return nil + errs = append(errs, tfslices.ApplyToAll(replication.FailureMessages, func(v string) error { + return errors.New(v) })...) - if v := aws.StringValue(replication.StopReason); v != "" { + if v := aws.ToString(replication.StopReason); v != "" { errs = append(errs, errors.New(v)) } tfresource.SetLastError(err, errors.Join(errs...)) } -func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { +func waitReplicationRunning(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) (*awstypes.Replication, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ replicationStatusReady, @@ -506,7 +493,7 @@ func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.Replication); ok { + if output, ok := outputRaw.(*awstypes.Replication); ok { setLastReplicationError(err, output) return output, err } @@ -514,7 +501,7 @@ func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationServ return nil, err } -func waitReplicationStopped(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { +func waitReplicationStopped(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) (*awstypes.Replication, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationStatusStopping, replicationStatusRunning}, Target: []string{replicationStatusStopped}, @@ -526,7 +513,7 @@ func waitReplicationStopped(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.Replication); ok { + if output, ok := outputRaw.(*awstypes.Replication); ok { setLastReplicationError(err, output) return output, err } @@ -534,7 +521,7 @@ func waitReplicationStopped(ctx context.Context, conn *dms.DatabaseMigrationServ return nil, err } -func waitReplicationDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { +func waitReplicationDeleted(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) (*awstypes.Replication, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusDeleting, replicationStatusStopped}, Target: []string{}, @@ -546,7 +533,7 @@ func waitReplicationDeleted(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.Replication); ok { + if output, ok := outputRaw.(*awstypes.Replication); ok { setLastReplicationError(err, output) return output, err } @@ -554,14 +541,14 @@ func waitReplicationDeleted(ctx context.Context, conn *dms.DatabaseMigrationServ return nil, err } -func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) error { +func startReplication(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) error { replication, err := findReplicationByReplicationConfigARN(ctx, conn, arn) if err != nil { return fmt.Errorf("reading DMS Replication Config (%s) replication: %s", arn, err) } - replicationStatus := aws.StringValue(replication.Status) + replicationStatus := aws.ToString(replication.Status) if replicationStatus == replicationStatusRunning { return nil } @@ -575,7 +562,7 @@ func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, a StartReplicationType: aws.String(startReplicationType), } - _, err = conn.StartReplicationWithContext(ctx, input) + _, err = conn.StartReplication(ctx, input) if err != nil { return fmt.Errorf("starting DMS Serverless Replication (%s): %w", arn, err) @@ -588,7 +575,7 @@ func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, a return nil } -func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) error { +func stopReplication(ctx context.Context, conn *dms.Client, arn string, timeout time.Duration) error { replication, err := findReplicationByReplicationConfigARN(ctx, conn, arn) if tfresource.NotFound(err) { @@ -599,8 +586,7 @@ func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, ar return fmt.Errorf("reading DMS Replication Config (%s) replication: %s", arn, err) } - replicationStatus := aws.StringValue(replication.Status) - if replicationStatus == replicationStatusStopped || replicationStatus == replicationStatusCreated || replicationStatus == replicationStatusFailed { + if replicationStatus := aws.ToString(replication.Status); replicationStatus == replicationStatusStopped || replicationStatus == replicationStatusCreated || replicationStatus == replicationStatusFailed { return nil } @@ -608,7 +594,7 @@ func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, ar ReplicationConfigArn: aws.String(arn), } - _, err = conn.StopReplicationWithContext(ctx, input) + _, err = conn.StopReplication(ctx, input) if err != nil { return fmt.Errorf("stopping DMS Serverless Replication (%s): %w", arn, err) @@ -621,32 +607,32 @@ func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, ar return nil } -func flattenComputeConfig(apiObject *dms.ComputeConfig) []interface{} { +func flattenComputeConfig(apiObject *awstypes.ComputeConfig) []interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{ - names.AttrAvailabilityZone: aws.StringValue(apiObject.AvailabilityZone), - "dns_name_servers": aws.StringValue(apiObject.DnsNameServers), - names.AttrKMSKeyID: aws.StringValue(apiObject.KmsKeyId), - "max_capacity_units": aws.Int64Value(apiObject.MaxCapacityUnits), - "min_capacity_units": aws.Int64Value(apiObject.MinCapacityUnits), - "multi_az": aws.BoolValue(apiObject.MultiAZ), - names.AttrPreferredMaintenanceWindow: aws.StringValue(apiObject.PreferredMaintenanceWindow), - "replication_subnet_group_id": aws.StringValue(apiObject.ReplicationSubnetGroupId), - names.AttrVPCSecurityGroupIDs: flex.FlattenStringSet(apiObject.VpcSecurityGroupIds), + names.AttrAvailabilityZone: aws.ToString(apiObject.AvailabilityZone), + "dns_name_servers": aws.ToString(apiObject.DnsNameServers), + names.AttrKMSKeyID: aws.ToString(apiObject.KmsKeyId), + "max_capacity_units": aws.ToInt32(apiObject.MaxCapacityUnits), + "min_capacity_units": aws.ToInt32(apiObject.MinCapacityUnits), + "multi_az": aws.ToBool(apiObject.MultiAZ), + names.AttrPreferredMaintenanceWindow: aws.ToString(apiObject.PreferredMaintenanceWindow), + "replication_subnet_group_id": aws.ToString(apiObject.ReplicationSubnetGroupId), + names.AttrVPCSecurityGroupIDs: apiObject.VpcSecurityGroupIds, } return []interface{}{tfMap} } -func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { +func expandComputeConfigInput(tfMap map[string]interface{}) *awstypes.ComputeConfig { if tfMap == nil { return nil } - apiObject := &dms.ComputeConfig{} + apiObject := &awstypes.ComputeConfig{} if v, ok := tfMap[names.AttrAvailabilityZone].(string); ok && v != "" { apiObject.AvailabilityZone = aws.String(v) @@ -661,11 +647,11 @@ func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { } if v, ok := tfMap["max_capacity_units"].(int); ok && v != 0 { - apiObject.MaxCapacityUnits = aws.Int64(int64(v)) + apiObject.MaxCapacityUnits = aws.Int32(int32(v)) } if v, ok := tfMap["min_capacity_units"].(int); ok && v != 0 { - apiObject.MinCapacityUnits = aws.Int64(int64(v)) + apiObject.MinCapacityUnits = aws.Int32(int32(v)) } if v, ok := tfMap["multi_az"].(bool); ok { @@ -681,7 +667,7 @@ func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { } if v, ok := tfMap[names.AttrVPCSecurityGroupIDs].(*schema.Set); ok && v.Len() > 0 { - apiObject.VpcSecurityGroupIds = flex.ExpandStringSet(v) + apiObject.VpcSecurityGroupIds = flex.ExpandStringValueSet(v) } return apiObject diff --git a/internal/service/dms/replication_config_test.go b/internal/service/dms/replication_config_test.go index 2cfdd78bd26..92ca9c91869 100644 --- a/internal/service/dms/replication_config_test.go +++ b/internal/service/dms/replication_config_test.go @@ -10,12 +10,13 @@ import ( "testing" "github.com/YakDriver/regexache" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -24,12 +25,12 @@ import ( func TestAccDMSReplicationConfig_basic(t *testing.T) { t.Parallel() - for _, migrationType := range dms.MigrationTypeValue_Values() { //nolint:paralleltest // false positive + for _, migrationType := range enum.Values[awstypes.MigrationTypeValue]() { //nolint:paralleltest // false positive t.Run(migrationType, func(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -82,7 +83,7 @@ func TestAccDMSReplicationConfig_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -106,7 +107,7 @@ func TestAccDMSReplicationConfig_settings_EnableLogging(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -199,7 +200,7 @@ func TestAccDMSReplicationConfig_settings_LogComponents(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -232,7 +233,7 @@ func TestAccDMSReplicationConfig_settings_StreamBuffer(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -263,7 +264,7 @@ func TestAccDMSReplicationConfig_tags(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -304,7 +305,7 @@ func TestAccDMSReplicationConfig_update(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -341,7 +342,7 @@ func TestAccDMSReplicationConfig_startReplication(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_config.test" - var v dms.ReplicationConfig + var v awstypes.ReplicationConfig resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -373,14 +374,14 @@ func TestAccDMSReplicationConfig_startReplication(t *testing.T) { }) } -func testAccCheckReplicationConfigExists(ctx context.Context, n string, v *dms.ReplicationConfig) resource.TestCheckFunc { +func testAccCheckReplicationConfigExists(ctx context.Context, n string, v *awstypes.ReplicationConfig) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) output, err := tfdms.FindReplicationConfigByARN(ctx, conn, rs.Primary.ID) @@ -401,7 +402,7 @@ func testAccCheckReplicationConfigDestroy(ctx context.Context) resource.TestChec continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindReplicationConfigByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/replication_instance.go b/internal/service/dms/replication_instance.go index 585022d167c..8c18532cf5c 100644 --- a/internal/service/dms/replication_instance.go +++ b/internal/service/dms/replication_instance.go @@ -8,14 +8,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -27,7 +28,7 @@ import ( // @SDKResource("aws_dms_replication_instance", name="Replication Instance") // @Tags(identifierAttribute="replication_instance_arn") -func ResourceReplicationInstance() *schema.Resource { +func resourceReplicationInstance() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationInstanceCreate, ReadWithoutTimeout: resourceReplicationInstanceRead, @@ -153,7 +154,7 @@ func ResourceReplicationInstance() *schema.Resource { func resourceReplicationInstanceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) replicationInstanceID := d.Get("replication_instance_id").(string) input := &dms.CreateReplicationInstanceInput{ @@ -170,7 +171,7 @@ func resourceReplicationInstanceCreate(ctx context.Context, d *schema.ResourceDa // to set the default value. See GitHub Issue #5694 https://github.com/hashicorp/terraform/issues/5694 if v, ok := d.GetOk(names.AttrAllocatedStorage); ok { - input.AllocatedStorage = aws.Int64(int64(v.(int))) + input.AllocatedStorage = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk(names.AttrAvailabilityZone); ok { input.AvailabilityZone = aws.String(v.(string)) @@ -191,10 +192,10 @@ func resourceReplicationInstanceCreate(ctx context.Context, d *schema.ResourceDa input.ReplicationSubnetGroupIdentifier = aws.String(v.(string)) } if v, ok := d.GetOk(names.AttrVPCSecurityGroupIDs); ok { - input.VpcSecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + input.VpcSecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := conn.CreateReplicationInstanceWithContext(ctx, input) + _, err := conn.CreateReplicationInstance(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Replication Instance (%s): %s", replicationInstanceID, err) @@ -211,9 +212,9 @@ func resourceReplicationInstanceCreate(ctx context.Context, d *schema.ResourceDa func resourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - instance, err := FindReplicationInstanceByID(ctx, conn, d.Id()) + instance, err := findReplicationInstanceByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Replication Instance (%s) not found, removing from state", d.Id()) @@ -237,11 +238,11 @@ func resourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData d.Set("replication_instance_arn", instance.ReplicationInstanceArn) d.Set("replication_instance_class", instance.ReplicationInstanceClass) d.Set("replication_instance_id", instance.ReplicationInstanceIdentifier) - d.Set("replication_instance_private_ips", aws.StringValueSlice(instance.ReplicationInstancePrivateIpAddresses)) - d.Set("replication_instance_public_ips", aws.StringValueSlice(instance.ReplicationInstancePublicIpAddresses)) + d.Set("replication_instance_private_ips", instance.ReplicationInstancePrivateIpAddresses) + d.Set("replication_instance_public_ips", instance.ReplicationInstancePublicIpAddresses) d.Set("replication_subnet_group_id", instance.ReplicationSubnetGroup.ReplicationSubnetGroupIdentifier) - vpcSecurityGroupIDs := tfslices.ApplyToAll(instance.VpcSecurityGroups, func(sg *dms.VpcSecurityGroupMembership) string { - return aws.StringValue(sg.VpcSecurityGroupId) + vpcSecurityGroupIDs := tfslices.ApplyToAll(instance.VpcSecurityGroups, func(v awstypes.VpcSecurityGroupMembership) string { + return aws.ToString(v.VpcSecurityGroupId) }) d.Set(names.AttrVPCSecurityGroupIDs, vpcSecurityGroupIDs) @@ -250,19 +251,19 @@ func resourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData func resourceReplicationInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll, names.AttrAllowMajorVersionUpgrade) { // Having allowing_major_version_upgrade by itself should not trigger ModifyReplicationInstance // as it results in InvalidParameterCombination: No modifications were requested input := &dms.ModifyReplicationInstanceInput{ - AllowMajorVersionUpgrade: aws.Bool(d.Get(names.AttrAllowMajorVersionUpgrade).(bool)), - ApplyImmediately: aws.Bool(d.Get(names.AttrApplyImmediately).(bool)), + AllowMajorVersionUpgrade: d.Get(names.AttrAllowMajorVersionUpgrade).(bool), + ApplyImmediately: d.Get(names.AttrApplyImmediately).(bool), ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), } if d.HasChange(names.AttrAllocatedStorage) { - input.AllocatedStorage = aws.Int64(int64(d.Get(names.AttrAllocatedStorage).(int))) + input.AllocatedStorage = aws.Int32(int32(d.Get(names.AttrAllocatedStorage).(int))) } if d.HasChange(names.AttrAutoMinorVersionUpgrade) { @@ -290,10 +291,10 @@ func resourceReplicationInstanceUpdate(ctx context.Context, d *schema.ResourceDa } if d.HasChange(names.AttrVPCSecurityGroupIDs) { - input.VpcSecurityGroupIds = flex.ExpandStringSet(d.Get(names.AttrVPCSecurityGroupIDs).(*schema.Set)) + input.VpcSecurityGroupIds = flex.ExpandStringValueSet(d.Get(names.AttrVPCSecurityGroupIDs).(*schema.Set)) } - _, err := conn.ModifyReplicationInstanceWithContext(ctx, input) + _, err := conn.ModifyReplicationInstance(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DMS Replication Instance (%s): %s", d.Id(), err) @@ -309,14 +310,14 @@ func resourceReplicationInstanceUpdate(ctx context.Context, d *schema.ResourceDa func resourceReplicationInstanceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Replication Instance: %s", d.Id()) - _, err := conn.DeleteReplicationInstanceWithContext(ctx, &dms.DeleteReplicationInstanceInput{ + _, err := conn.DeleteReplicationInstance(ctx, &dms.DeleteReplicationInstanceInput{ ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -331,12 +332,12 @@ func resourceReplicationInstanceDelete(ctx context.Context, d *schema.ResourceDa return diags } -func FindReplicationInstanceByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationInstance, error) { +func findReplicationInstanceByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationInstance, error) { input := &dms.DescribeReplicationInstancesInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("replication-instance-id"), - Values: aws.StringSlice([]string{id}), + Values: []string{id}, }, }, } @@ -344,50 +345,43 @@ func FindReplicationInstanceByID(ctx context.Context, conn *dms.DatabaseMigratio return findReplicationInstance(ctx, conn, input) } -func findReplicationInstance(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationInstancesInput) (*dms.ReplicationInstance, error) { +func findReplicationInstance(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationInstancesInput) (*awstypes.ReplicationInstance, error) { output, err := findReplicationInstances(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationInstances(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationInstancesInput) ([]*dms.ReplicationInstance, error) { - var output []*dms.ReplicationInstance +func findReplicationInstances(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationInstancesInput) ([]awstypes.ReplicationInstance, error) { + var output []awstypes.ReplicationInstance - err := conn.DescribeReplicationInstancesPagesWithContext(ctx, input, func(page *dms.DescribeReplicationInstancesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationInstancesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.ReplicationInstances { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ReplicationInstances...) } return output, nil } -func statusReplicationInstance(ctx context.Context, conn *dms.DatabaseMigrationService, id string) retry.StateRefreshFunc { +func statusReplicationInstance(ctx context.Context, conn *dms.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindReplicationInstanceByID(ctx, conn, id) + output, err := findReplicationInstanceByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -397,11 +391,11 @@ func statusReplicationInstance(ctx context.Context, conn *dms.DatabaseMigrationS return nil, "", err } - return output, aws.StringValue(output.ReplicationInstanceStatus), nil + return output, aws.ToString(output.ReplicationInstanceStatus), nil } } -func waitReplicationInstanceCreated(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationInstance, error) { +func waitReplicationInstanceCreated(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationInstance, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusCreating, replicationInstanceStatusModifying}, Target: []string{replicationInstanceStatusAvailable}, @@ -413,14 +407,14 @@ func waitReplicationInstanceCreated(ctx context.Context, conn *dms.DatabaseMigra outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationInstance); ok { + if output, ok := outputRaw.(*awstypes.ReplicationInstance); ok { return output, err } return nil, err } -func waitReplicationInstanceUpdated(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationInstance, error) { +func waitReplicationInstanceUpdated(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationInstance, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusModifying, replicationInstanceStatusUpgrading}, Target: []string{replicationInstanceStatusAvailable}, @@ -432,14 +426,14 @@ func waitReplicationInstanceUpdated(ctx context.Context, conn *dms.DatabaseMigra outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationInstance); ok { + if output, ok := outputRaw.(*awstypes.ReplicationInstance); ok { return output, err } return nil, err } -func waitReplicationInstanceDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationInstance, error) { +func waitReplicationInstanceDeleted(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationInstance, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationInstanceStatusDeleting}, Target: []string{}, @@ -451,7 +445,7 @@ func waitReplicationInstanceDeleted(ctx context.Context, conn *dms.DatabaseMigra outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationInstance); ok { + if output, ok := outputRaw.(*awstypes.ReplicationInstance); ok { return output, err } diff --git a/internal/service/dms/replication_instance_data_source.go b/internal/service/dms/replication_instance_data_source.go index afa7b0056f6..bcd3a2075ff 100644 --- a/internal/service/dms/replication_instance_data_source.go +++ b/internal/service/dms/replication_instance_data_source.go @@ -6,8 +6,8 @@ package dms import ( "context" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -17,8 +17,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_replication_instance") -func DataSourceReplicationInstance() *schema.Resource { +// @SDKDataSource("aws_dms_replication_instance", name="Replication Instance") +func dataSourceReplicationInstance() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceReplicationInstanceRead, @@ -98,18 +98,18 @@ func DataSourceReplicationInstance() *schema.Resource { func dataSourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig rID := d.Get("replication_instance_id").(string) - instance, err := FindReplicationInstanceByID(ctx, conn, rID) + instance, err := findReplicationInstanceByID(ctx, conn, rID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Replication Instance (%s): %s", rID, err) } - d.SetId(aws.StringValue(instance.ReplicationInstanceIdentifier)) + d.SetId(aws.ToString(instance.ReplicationInstanceIdentifier)) d.Set(names.AttrAllocatedStorage, instance.AllocatedStorage) d.Set(names.AttrAutoMinorVersionUpgrade, instance.AutoMinorVersionUpgrade) d.Set(names.AttrAvailabilityZone, instance.AvailabilityZone) @@ -119,15 +119,15 @@ func dataSourceReplicationInstanceRead(ctx context.Context, d *schema.ResourceDa d.Set("network_type", instance.NetworkType) d.Set(names.AttrPreferredMaintenanceWindow, instance.PreferredMaintenanceWindow) d.Set(names.AttrPubliclyAccessible, instance.PubliclyAccessible) - arn := aws.StringValue(instance.ReplicationInstanceArn) + arn := aws.ToString(instance.ReplicationInstanceArn) d.Set("replication_instance_arn", arn) d.Set("replication_instance_class", instance.ReplicationInstanceClass) d.Set("replication_instance_id", instance.ReplicationInstanceIdentifier) - d.Set("replication_instance_private_ips", aws.StringValueSlice(instance.ReplicationInstancePrivateIpAddresses)) - d.Set("replication_instance_public_ips", aws.StringValueSlice(instance.ReplicationInstancePublicIpAddresses)) + d.Set("replication_instance_private_ips", instance.ReplicationInstancePrivateIpAddresses) + d.Set("replication_instance_public_ips", instance.ReplicationInstancePublicIpAddresses) d.Set("replication_subnet_group_id", instance.ReplicationSubnetGroup.ReplicationSubnetGroupIdentifier) - vpcSecurityGroupIDs := tfslices.ApplyToAll(instance.VpcSecurityGroups, func(sg *dms.VpcSecurityGroupMembership) string { - return aws.StringValue(sg.VpcSecurityGroupId) + vpcSecurityGroupIDs := tfslices.ApplyToAll(instance.VpcSecurityGroups, func(sg awstypes.VpcSecurityGroupMembership) string { + return aws.ToString(sg.VpcSecurityGroupId) }) d.Set(names.AttrVPCSecurityGroupIDs, vpcSecurityGroupIDs) diff --git a/internal/service/dms/replication_instance_test.go b/internal/service/dms/replication_instance_test.go index bf95f735f98..c4a4d93dc11 100644 --- a/internal/service/dms/replication_instance_test.go +++ b/internal/service/dms/replication_instance_test.go @@ -48,7 +48,7 @@ func TestAccDMSReplicationInstance_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "replication_instance_class", replicationInstanceClass), resource.TestCheckResourceAttr(resourceName, "replication_instance_id", rName), resource.TestCheckResourceAttr(resourceName, "replication_instance_private_ips.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "replication_instance_public_ips.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replication_instance_public_ips.#", acctest.Ct0), resource.TestCheckResourceAttrSet(resourceName, "replication_subnet_group_id"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "vpc_security_group_ids.#", acctest.Ct1), @@ -519,7 +519,7 @@ func testAccCheckReplicationInstanceExists(ctx context.Context, n string) resour return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindReplicationInstanceByID(ctx, conn, rs.Primary.ID) @@ -529,7 +529,7 @@ func testAccCheckReplicationInstanceExists(ctx context.Context, n string) resour func testAccCheckReplicationInstanceDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_dms_replication_instance" { diff --git a/internal/service/dms/replication_subnet_group.go b/internal/service/dms/replication_subnet_group.go index 193ada4df24..6094f6376e9 100644 --- a/internal/service/dms/replication_subnet_group.go +++ b/internal/service/dms/replication_subnet_group.go @@ -8,14 +8,15 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -27,7 +28,7 @@ import ( // @SDKResource("aws_dms_replication_subnet_group", name="Replication Subnet Group") // @Tags(identifierAttribute="replication_subnet_group_arn") -func ResourceReplicationSubnetGroup() *schema.Resource { +func resourceReplicationSubnetGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationSubnetGroupCreate, ReadWithoutTimeout: resourceReplicationSubnetGroupRead, @@ -73,20 +74,19 @@ func ResourceReplicationSubnetGroup() *schema.Resource { func resourceReplicationSubnetGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) replicationSubnetGroupID := d.Get("replication_subnet_group_id").(string) input := &dms.CreateReplicationSubnetGroupInput{ ReplicationSubnetGroupDescription: aws.String(d.Get("replication_subnet_group_description").(string)), ReplicationSubnetGroupIdentifier: aws.String(replicationSubnetGroupID), - SubnetIds: flex.ExpandStringSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), + SubnetIds: flex.ExpandStringValueSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), Tags: getTagsIn(ctx), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateReplicationSubnetGroupWithContext(ctx, input) - }, dms.ErrCodeAccessDeniedFault) - + _, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, propagationTimeout, func() (interface{}, error) { + return conn.CreateReplicationSubnetGroup(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Replication Subnet Group (%s): %s", replicationSubnetGroupID, err) } @@ -98,9 +98,9 @@ func resourceReplicationSubnetGroupCreate(ctx context.Context, d *schema.Resourc func resourceReplicationSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - group, err := FindReplicationSubnetGroupByID(ctx, conn, d.Id()) + group, err := findReplicationSubnetGroupByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Replication Subnet Group (%s) not found, removing from state", d.Id()) @@ -124,8 +124,8 @@ func resourceReplicationSubnetGroupRead(ctx context.Context, d *schema.ResourceD d.Set("replication_subnet_group_arn", arn) d.Set("replication_subnet_group_description", group.ReplicationSubnetGroupDescription) d.Set("replication_subnet_group_id", group.ReplicationSubnetGroupIdentifier) - subnetIDs := tfslices.ApplyToAll(group.Subnets, func(sn *dms.Subnet) string { - return aws.StringValue(sn.SubnetIdentifier) + subnetIDs := tfslices.ApplyToAll(group.Subnets, func(sn awstypes.Subnet) string { + return aws.ToString(sn.SubnetIdentifier) }) d.Set(names.AttrSubnetIDs, subnetIDs) d.Set(names.AttrVPCID, group.VpcId) @@ -135,21 +135,21 @@ func resourceReplicationSubnetGroupRead(ctx context.Context, d *schema.ResourceD func resourceReplicationSubnetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { // Updates to subnet groups are only valid when sending SubnetIds even if there are no // changes to SubnetIds. input := &dms.ModifyReplicationSubnetGroupInput{ ReplicationSubnetGroupIdentifier: aws.String(d.Get("replication_subnet_group_id").(string)), - SubnetIds: flex.ExpandStringSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), + SubnetIds: flex.ExpandStringValueSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), } if d.HasChange("replication_subnet_group_description") { input.ReplicationSubnetGroupDescription = aws.String(d.Get("replication_subnet_group_description").(string)) } - _, err := conn.ModifyReplicationSubnetGroupWithContext(ctx, input) + _, err := conn.ModifyReplicationSubnetGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DMS Replication Subnet Group (%s): %s", d.Id(), err) @@ -161,14 +161,14 @@ func resourceReplicationSubnetGroupUpdate(ctx context.Context, d *schema.Resourc func resourceReplicationSubnetGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Replication Subnet Group: %s", d.Id()) - _, err := conn.DeleteReplicationSubnetGroupWithContext(ctx, &dms.DeleteReplicationSubnetGroupInput{ + _, err := conn.DeleteReplicationSubnetGroup(ctx, &dms.DeleteReplicationSubnetGroupInput{ ReplicationSubnetGroupIdentifier: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -179,12 +179,12 @@ func resourceReplicationSubnetGroupDelete(ctx context.Context, d *schema.Resourc return diags } -func FindReplicationSubnetGroupByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationSubnetGroup, error) { +func findReplicationSubnetGroupByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationSubnetGroup, error) { input := &dms.DescribeReplicationSubnetGroupsInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("replication-subnet-group-id"), - Values: aws.StringSlice([]string{id}), + Values: []string{id}, }, }, } @@ -192,42 +192,35 @@ func FindReplicationSubnetGroupByID(ctx context.Context, conn *dms.DatabaseMigra return findReplicationSubnetGroup(ctx, conn, input) } -func findReplicationSubnetGroup(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationSubnetGroupsInput) (*dms.ReplicationSubnetGroup, error) { +func findReplicationSubnetGroup(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationSubnetGroupsInput) (*awstypes.ReplicationSubnetGroup, error) { output, err := findReplicationSubnetGroups(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationSubnetGroups(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationSubnetGroupsInput) ([]*dms.ReplicationSubnetGroup, error) { - var output []*dms.ReplicationSubnetGroup +func findReplicationSubnetGroups(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationSubnetGroupsInput) ([]awstypes.ReplicationSubnetGroup, error) { + var output []awstypes.ReplicationSubnetGroup - err := conn.DescribeReplicationSubnetGroupsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationSubnetGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationSubnetGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.ReplicationSubnetGroups { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ReplicationSubnetGroups...) } return output, nil diff --git a/internal/service/dms/replication_subnet_group_data_source.go b/internal/service/dms/replication_subnet_group_data_source.go index 3f4f27c60ae..435f386c411 100644 --- a/internal/service/dms/replication_subnet_group_data_source.go +++ b/internal/service/dms/replication_subnet_group_data_source.go @@ -7,9 +7,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -19,8 +19,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_replication_subnet_group") -func DataSourceReplicationSubnetGroup() *schema.Resource { +// @SDKDataSource("aws_dms_replication_subnet_group", name="Replication Subnet Group") +func dataSourceReplicationSubnetGroup() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceReplicationSubnetGroupRead, @@ -57,18 +57,18 @@ func DataSourceReplicationSubnetGroup() *schema.Resource { func dataSourceReplicationSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig replicationSubnetGroupID := d.Get("replication_subnet_group_id").(string) - group, err := FindReplicationSubnetGroupByID(ctx, conn, replicationSubnetGroupID) + group, err := findReplicationSubnetGroupByID(ctx, conn, replicationSubnetGroupID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Replication Subnet Group (%s): %s", replicationSubnetGroupID, err) } - d.SetId(aws.StringValue(group.ReplicationSubnetGroupIdentifier)) + d.SetId(aws.ToString(group.ReplicationSubnetGroupIdentifier)) arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, Service: "dms", @@ -79,8 +79,8 @@ func dataSourceReplicationSubnetGroupRead(ctx context.Context, d *schema.Resourc d.Set("replication_subnet_group_arn", arn) d.Set("replication_subnet_group_description", group.ReplicationSubnetGroupDescription) d.Set("replication_subnet_group_id", group.ReplicationSubnetGroupIdentifier) - subnetIDs := tfslices.ApplyToAll(group.Subnets, func(sn *dms.Subnet) string { - return aws.StringValue(sn.SubnetIdentifier) + subnetIDs := tfslices.ApplyToAll(group.Subnets, func(sn awstypes.Subnet) string { + return aws.ToString(sn.SubnetIdentifier) }) d.Set(names.AttrSubnetIDs, subnetIDs) d.Set(names.AttrVPCID, group.VpcId) diff --git a/internal/service/dms/replication_subnet_group_test.go b/internal/service/dms/replication_subnet_group_test.go index acd4d5ad4dc..ad41eed6cc8 100644 --- a/internal/service/dms/replication_subnet_group_test.go +++ b/internal/service/dms/replication_subnet_group_test.go @@ -132,7 +132,7 @@ func testAccCheckReplicationSubnetGroupExists(ctx context.Context, n string) res return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindReplicationSubnetGroupByID(ctx, conn, rs.Primary.ID) @@ -142,7 +142,7 @@ func testAccCheckReplicationSubnetGroupExists(ctx context.Context, n string) res func testAccCheckReplicationSubnetGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_dms_replication_subnet_group" { diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 7375976c24b..6e42a43a161 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -11,14 +11,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -29,7 +31,7 @@ import ( // @SDKResource("aws_dms_replication_task", name="Replication Task") // @Tags(identifierAttribute="replication_task_arn") -func ResourceReplicationTask() *schema.Resource { +func resourceReplicationTask() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationTaskCreate, ReadWithoutTimeout: resourceReplicationTaskRead, @@ -54,9 +56,9 @@ func ResourceReplicationTask() *schema.Resource { ConflictsWith: []string{"cdc_start_position"}, }, "migration_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.MigrationTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.MigrationTypeValue](), }, "replication_instance_arn": { Type: schema.TypeString, @@ -134,11 +136,11 @@ func ResourceReplicationTask() *schema.Resource { func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) taskID := d.Get("replication_task_id").(string) input := &dms.CreateReplicationTaskInput{ - MigrationType: aws.String(d.Get("migration_type").(string)), + MigrationType: awstypes.MigrationTypeValue(d.Get("migration_type").(string)), ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), ReplicationTaskIdentifier: aws.String(taskID), SourceEndpointArn: aws.String(d.Get("source_endpoint_arn").(string)), @@ -168,7 +170,7 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, input.ResourceIdentifier = aws.String(v.(string)) } - _, err := conn.CreateReplicationTaskWithContext(ctx, input) + _, err := conn.CreateReplicationTask(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating DMS Replication Task (%s): %s", taskID, err) @@ -191,9 +193,9 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, func resourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - task, err := FindReplicationTaskByID(ctx, conn, d.Id()) + task, err := findReplicationTaskByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Replication Task (%s) not found, removing from state", d.Id()) @@ -221,7 +223,7 @@ func resourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, me func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll, "replication_instance_arn", "start_replication_task") { if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { @@ -229,7 +231,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } input := &dms.ModifyReplicationTaskInput{ - MigrationType: aws.String(d.Get("migration_type").(string)), + MigrationType: awstypes.MigrationTypeValue(d.Get("migration_type").(string)), ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), TableMappings: aws.String(d.Get("table_mappings").(string)), } @@ -259,7 +261,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } } - _, err := conn.ModifyReplicationTaskWithContext(ctx, input) + _, err := conn.ModifyReplicationTask(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying DMS Replication Task (%s): %s", d.Id(), err) @@ -286,7 +288,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, TargetReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), } - _, err := conn.MoveReplicationTaskWithContext(ctx, input) + _, err := conn.MoveReplicationTask(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "moving DMS Replication Task (%s): %s", d.Id(), err) @@ -304,7 +306,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChanges("start_replication_task") { - var f func(context.Context, *dms.DatabaseMigrationService, string) error + var f func(context.Context, *dms.Client, string) error if d.Get("start_replication_task").(bool) { f = startReplicationTask } else { @@ -320,18 +322,18 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting DMS Replication Task: %s", d.Id()) - _, err := conn.DeleteReplicationTaskWithContext(ctx, &dms.DeleteReplicationTaskInput{ + _, err := conn.DeleteReplicationTask(ctx, &dms.DeleteReplicationTaskInput{ ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } @@ -346,12 +348,12 @@ func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, return diags } -func FindReplicationTaskByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { +func findReplicationTaskByID(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationTask, error) { input := &dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ + Filters: []awstypes.Filter{ { Name: aws.String("replication-task-id"), - Values: aws.StringSlice([]string{id}), + Values: []string{id}, }, }, } @@ -359,50 +361,43 @@ func FindReplicationTaskByID(ctx context.Context, conn *dms.DatabaseMigrationSer return findReplicationTask(ctx, conn, input) } -func findReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationTasksInput) (*dms.ReplicationTask, error) { +func findReplicationTask(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationTasksInput) (*awstypes.ReplicationTask, error) { output, err := findReplicationTasks(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationTasksInput) ([]*dms.ReplicationTask, error) { - var output []*dms.ReplicationTask +func findReplicationTasks(ctx context.Context, conn *dms.Client, input *dms.DescribeReplicationTasksInput) ([]awstypes.ReplicationTask, error) { + var output []awstypes.ReplicationTask - err := conn.DescribeReplicationTasksPagesWithContext(ctx, input, func(page *dms.DescribeReplicationTasksOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := dms.NewDescribeReplicationTasksPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.ReplicationTasks { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.ReplicationTasks...) } return output, nil } -func statusReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) retry.StateRefreshFunc { +func statusReplicationTask(ctx context.Context, conn *dms.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindReplicationTaskByID(ctx, conn, id) + output, err := findReplicationTaskByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -412,24 +407,24 @@ func statusReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationServi return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } -func setLastReplicationTaskError(err error, replication *dms.ReplicationTask) { +func setLastReplicationTaskError(err error, replication *awstypes.ReplicationTask) { var errs []error - if v := aws.StringValue(replication.LastFailureMessage); v != "" { + if v := aws.ToString(replication.LastFailureMessage); v != "" { errs = append(errs, errors.New(v)) } - if v := aws.StringValue(replication.StopReason); v != "" { + if v := aws.ToString(replication.StopReason); v != "" { errs = append(errs, errors.New(v)) } tfresource.SetLastError(err, errors.Join(errs...)) } -func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { +func waitReplicationTaskDeleted(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusDeleting}, Target: []string{}, @@ -441,7 +436,7 @@ func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigration outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -449,7 +444,7 @@ func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigration return nil, err } -func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { +func waitReplicationTaskModified(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusModifying}, Target: []string{replicationTaskStatusReady, replicationTaskStatusStopped, replicationTaskStatusFailed}, @@ -461,7 +456,7 @@ func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigratio outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -469,7 +464,7 @@ func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigratio return nil, err } -func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { +func waitReplicationTaskMoved(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusModifying, replicationTaskStatusMoving}, Target: []string{replicationTaskStatusReady, replicationTaskStatusStopped, replicationTaskStatusFailed}, @@ -481,7 +476,7 @@ func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationSe outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -489,7 +484,7 @@ func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationSe return nil, err } -func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { +func waitReplicationTaskReady(ctx context.Context, conn *dms.Client, id string, timeout time.Duration) (*awstypes.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusCreating}, Target: []string{replicationTaskStatusReady}, @@ -501,7 +496,7 @@ func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationSe outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -509,7 +504,7 @@ func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationSe return nil, err } -func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { +func waitReplicationTaskRunning(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationTask, error) { const ( timeout = 5 * time.Minute ) @@ -524,7 +519,7 @@ func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigration outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -532,7 +527,7 @@ func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigration return nil, err } -func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { +func waitReplicationTaskStopped(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationTask, error) { const ( timeout = 5 * time.Minute ) @@ -548,7 +543,7 @@ func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigration outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -556,7 +551,7 @@ func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigration return nil, err } -func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { +func waitReplicationTaskSteady(ctx context.Context, conn *dms.Client, id string) (*awstypes.ReplicationTask, error) { const ( timeout = 5 * time.Minute ) @@ -572,7 +567,7 @@ func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationS outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*dms.ReplicationTask); ok { + if output, ok := outputRaw.(*awstypes.ReplicationTask); ok { setLastReplicationTaskError(err, output) return output, err } @@ -580,28 +575,28 @@ func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationS return nil, err } -func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { - task, err := FindReplicationTaskByID(ctx, conn, id) +func startReplicationTask(ctx context.Context, conn *dms.Client, id string) error { + task, err := findReplicationTaskByID(ctx, conn, id) if err != nil { return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) } - taskStatus := aws.StringValue(task.Status) + taskStatus := aws.ToString(task.Status) if taskStatus == replicationTaskStatusRunning { return nil } - startReplicationTaskType := dms.StartReplicationTaskTypeValueStartReplication + startReplicationTaskType := awstypes.StartReplicationTaskTypeValueStartReplication if taskStatus != replicationTaskStatusReady { - startReplicationTaskType = dms.StartReplicationTaskTypeValueResumeProcessing + startReplicationTaskType = awstypes.StartReplicationTaskTypeValueResumeProcessing } input := &dms.StartReplicationTaskInput{ ReplicationTaskArn: task.ReplicationTaskArn, - StartReplicationTaskType: aws.String(startReplicationTaskType), + StartReplicationTaskType: startReplicationTaskType, } - _, err = conn.StartReplicationTaskWithContext(ctx, input) + _, err = conn.StartReplicationTask(ctx, input) if err != nil { return fmt.Errorf("starting DMS Replication Task (%s): %w", id, err) @@ -614,8 +609,8 @@ func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationServic return nil } -func stopReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { - task, err := FindReplicationTaskByID(ctx, conn, id) +func stopReplicationTask(ctx context.Context, conn *dms.Client, id string) error { + task, err := findReplicationTaskByID(ctx, conn, id) if tfresource.NotFound(err) { return nil @@ -625,7 +620,7 @@ func stopReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) } - taskStatus := aws.StringValue(task.Status) + taskStatus := aws.ToString(task.Status) if taskStatus != replicationTaskStatusRunning { return nil } @@ -634,9 +629,9 @@ func stopReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService ReplicationTaskArn: task.ReplicationTaskArn, } - _, err = conn.StopReplicationTaskWithContext(ctx, input) + _, err = conn.StopReplicationTask(ctx, input) - if tfawserr.ErrMessageContains(err, dms.ErrCodeInvalidResourceStateFault, "is currently not running") { + if errs.IsAErrorMessageContains[*awstypes.InvalidResourceStateFault](err, "is currently not running") { return nil } diff --git a/internal/service/dms/replication_task_data_source.go b/internal/service/dms/replication_task_data_source.go index 57faa6b7e3c..6c8db2e6e0a 100644 --- a/internal/service/dms/replication_task_data_source.go +++ b/internal/service/dms/replication_task_data_source.go @@ -6,7 +6,7 @@ package dms import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -15,8 +15,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_dms_replication_task") -func DataSourceReplicationTask() *schema.Resource { +// @SDKDataSource("aws_dms_replication_task", name="Replication Task") +func dataSourceReplicationTask() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceReplicationTaskRead, @@ -77,19 +77,18 @@ func DataSourceReplicationTask() *schema.Resource { func dataSourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig taskID := d.Get("replication_task_id").(string) - - task, err := FindReplicationTaskByID(ctx, conn, taskID) + task, err := findReplicationTaskByID(ctx, conn, taskID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading DMS Replication Task (%s): %s", taskID, err) } - d.SetId(aws.StringValue(task.ReplicationTaskIdentifier)) + d.SetId(aws.ToString(task.ReplicationTaskIdentifier)) d.Set("cdc_start_position", task.CdcStartPosition) d.Set("migration_type", task.MigrationType) d.Set("replication_instance_arn", task.ReplicationInstanceArn) @@ -101,7 +100,7 @@ func dataSourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, d.Set("table_mappings", task.TableMappings) d.Set("target_endpoint_arn", task.TargetEndpointArn) - tags, err := listTags(ctx, conn, aws.StringValue(task.ReplicationTaskArn)) + tags, err := listTags(ctx, conn, aws.ToString(task.ReplicationTaskArn)) if err != nil { return sdkdiag.AppendErrorf(diags, "listing DMS Replication Task (%s) tags: %s", d.Id(), err) diff --git a/internal/service/dms/replication_task_data_source_test.go b/internal/service/dms/replication_task_data_source_test.go index 06ca0f53db2..5f738c76c6d 100644 --- a/internal/service/dms/replication_task_data_source_test.go +++ b/internal/service/dms/replication_task_data_source_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -19,7 +19,7 @@ func TestAccDMSReplicationTaskDataSource_basic(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" dataSourceName := "data.aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index bfd34cdbe32..77a881ceaad 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -13,15 +13,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -30,12 +31,12 @@ import ( func TestAccDMSReplicationTask_basic(t *testing.T) { t.Parallel() - for _, migrationType := range dms.MigrationTypeValue_Values() { //nolint:paralleltest // false positive + for _, migrationType := range enum.Values[awstypes.MigrationTypeValue]() { //nolint:paralleltest // false positive t.Run(migrationType, func(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -79,7 +80,7 @@ func TestAccDMSReplicationTask_updateSettingsAndMappings(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -185,7 +186,7 @@ func TestAccDMSReplicationTask_settings_EnableLogging(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -202,7 +203,7 @@ func TestAccDMSReplicationTask_settings_EnableLogging(t *testing.T) { acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.LogComponents[?Id=='DATA_STRUCTURE'].Severity | [0]", "LOGGER_SEVERITY_DEFAULT"), acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.CloudWatchLogGroup", fmt.Sprintf("dms-tasks-%s", rName)), func(s *terraform.State) error { - arn, err := arn.Parse(aws.StringValue(v.ReplicationTaskArn)) + arn, err := arn.Parse(aws.ToString(v.ReplicationTaskArn)) if err != nil { return err } @@ -230,7 +231,7 @@ func TestAccDMSReplicationTask_settings_EnableLogging(t *testing.T) { acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.LogComponents[?Id=='DATA_STRUCTURE'].Severity | [0]", "LOGGER_SEVERITY_DEFAULT"), acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.CloudWatchLogGroup", fmt.Sprintf("dms-tasks-%s", rName)), func(s *terraform.State) error { - arn, err := arn.Parse(aws.StringValue(v.ReplicationTaskArn)) + arn, err := arn.Parse(aws.ToString(v.ReplicationTaskArn)) if err != nil { return err } @@ -263,7 +264,7 @@ func TestAccDMSReplicationTask_settings_EnableLogging(t *testing.T) { acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.LogComponents[?Id=='DATA_STRUCTURE'].Severity | [0]", "LOGGER_SEVERITY_DEFAULT"), acctest.CheckResourceAttrJMES(resourceName, "replication_task_settings", "Logging.CloudWatchLogGroup", fmt.Sprintf("dms-tasks-%s", rName)), func(s *terraform.State) error { - arn, err := arn.Parse(aws.StringValue(v.ReplicationTaskArn)) + arn, err := arn.Parse(aws.ToString(v.ReplicationTaskArn)) if err != nil { return err } @@ -321,7 +322,7 @@ func TestAccDMSReplicationTask_settings_LogComponents(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -354,7 +355,7 @@ func TestAccDMSReplicationTask_settings_StreamBuffer(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -385,7 +386,7 @@ func TestAccDMSReplicationTask_cdcStartPosition(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -415,7 +416,7 @@ func TestAccDMSReplicationTask_resourceIdentifier(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -448,7 +449,7 @@ func TestAccDMSReplicationTask_startReplicationTask(t *testing.T) { rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -491,7 +492,7 @@ func TestAccDMSReplicationTask_s3ToRDS(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask //https://github.com/hashicorp/terraform-provider-aws/issues/28277 @@ -521,7 +522,7 @@ func TestAccDMSReplicationTask_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -545,7 +546,7 @@ func TestAccDMSReplicationTask_cdcStartTime_rfc3339_date(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask currentTime := time.Now().UTC() rfc3339Time := currentTime.Format(time.RFC3339) @@ -578,7 +579,7 @@ func TestAccDMSReplicationTask_cdcStartTime_unix_timestamp(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" - var v dms.ReplicationTask + var v awstypes.ReplicationTask currentTime := time.Now().UTC() rfc3339Time := currentTime.Format(time.RFC3339) @@ -615,7 +616,7 @@ func TestAccDMSReplicationTask_move(t *testing.T) { resourceName := "aws_dms_replication_task.test" instanceOne := "aws_dms_replication_instance.test" instanceTwo := "aws_dms_replication_instance.test2" - var v dms.ReplicationTask + var v awstypes.ReplicationTask resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -645,14 +646,14 @@ func TestAccDMSReplicationTask_move(t *testing.T) { }) } -func testAccCheckReplicationTaskExists(ctx context.Context, n string, v *dms.ReplicationTask) resource.TestCheckFunc { +func testAccCheckReplicationTaskExists(ctx context.Context, n string, v *awstypes.ReplicationTask) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) output, err := tfdms.FindReplicationTaskByID(ctx, conn, rs.Primary.ID) @@ -673,7 +674,7 @@ func testAccCheckReplicationTaskDestroy(ctx context.Context) resource.TestCheckF continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DMSClient(ctx) _, err := tfdms.FindReplicationTaskByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/dms/s3_endpoint.go b/internal/service/dms/s3_endpoint.go index 90833c20427..5357182061e 100644 --- a/internal/service/dms/s3_endpoint.go +++ b/internal/service/dms/s3_endpoint.go @@ -5,22 +5,22 @@ package dms import ( "context" - "errors" "fmt" "log" "strings" "time" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -29,7 +29,7 @@ import ( // @SDKResource("aws_dms_s3_endpoint", name="S3 Endpoint") // @Tags(identifierAttribute="endpoint_arn") -func ResourceS3Endpoint() *schema.Resource { +func resourceS3Endpoint() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceS3EndpointCreate, ReadWithoutTimeout: resourceS3EndpointRead, @@ -63,9 +63,9 @@ func ResourceS3Endpoint() *schema.Resource { ValidateFunc: validEndpointID, }, names.AttrEndpointType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(dms.ReplicationEndpointTypeValue_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ReplicationEndpointTypeValue](), }, "engine_display_name": { Type: schema.TypeString, @@ -83,10 +83,10 @@ func ResourceS3Endpoint() *schema.Resource { ValidateFunc: verify.ValidARN, }, "ssl_mode": { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DmsSslModeValue_Values(), false), + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.DmsSslModeValue](), }, names.AttrStatus: { Type: schema.TypeString, @@ -114,9 +114,9 @@ func ResourceS3Endpoint() *schema.Resource { Required: true, }, "canned_acl_for_objects": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.CannedAclForObjectsValue_Values(), true), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.CannedAclForObjectsValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -146,10 +146,10 @@ func ResourceS3Endpoint() *schema.Resource { Optional: true, }, "compression_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.CompressionTypeValue_Values(), true), - Default: strings.ToUpper(dms.CompressionTypeValueNone), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.CompressionTypeValue](), + Default: strings.ToUpper(string(awstypes.CompressionTypeValueNone)), StateFunc: func(v interface{}) string { return strings.ToUpper(v.(string)) }, @@ -173,9 +173,9 @@ func ResourceS3Endpoint() *schema.Resource { Default: "\\n", }, "data_format": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DataFormatValue_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.DataFormatValue](), }, "data_page_size": { Type: schema.TypeInt, @@ -183,9 +183,9 @@ func ResourceS3Endpoint() *schema.Resource { ValidateFunc: validation.IntAtLeast(0), }, "date_partition_delimiter": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DatePartitionDelimiterValue_Values(), true), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.DatePartitionDelimiterValue](), StateFunc: func(v interface{}) string { return strings.ToUpper(v.(string)) }, @@ -196,9 +196,9 @@ func ResourceS3Endpoint() *schema.Resource { Default: false, }, "date_partition_sequence": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.DatePartitionSequenceValue_Values(), true), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.DatePartitionSequenceValue](), StateFunc: func(v interface{}) string { return strings.ToLower(v.(string)) }, @@ -222,9 +222,9 @@ func ResourceS3Endpoint() *schema.Resource { Default: true, }, "encoding_type": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.EncodingTypeValue_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.EncodingTypeValue](), }, "encryption_mode": { Type: schema.TypeString, @@ -272,9 +272,9 @@ func ResourceS3Endpoint() *schema.Resource { Default: false, }, "parquet_version": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(dms.ParquetVersionValue_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.ParquetVersionValue](), }, "preserve_transactions": { Type: schema.TypeBool, @@ -321,17 +321,14 @@ func ResourceS3Endpoint() *schema.Resource { } } -const ( - ResNameS3Endpoint = "S3 Endpoint" -) - func resourceS3EndpointCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) + endpointID := d.Get("endpoint_id").(string) input := &dms.CreateEndpointInput{ - EndpointIdentifier: aws.String(d.Get("endpoint_id").(string)), - EndpointType: aws.String(d.Get(names.AttrEndpointType).(string)), + EndpointIdentifier: aws.String(endpointID), + EndpointType: awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)), EngineName: aws.String("s3"), Tags: getTagsIn(ctx), } @@ -344,46 +341,28 @@ func resourceS3EndpointCreate(ctx context.Context, d *schema.ResourceData, meta input.KmsKeyId = aws.String(v.(string)) } - if v, ok := d.GetOk("ssl_mode"); ok { - input.SslMode = aws.String(v.(string)) - } - if v, ok := d.GetOk("service_access_role_arn"); ok { input.ServiceAccessRoleArn = aws.String(v.(string)) } - input.S3Settings = s3Settings(d, d.Get(names.AttrEndpointType).(string) == dms.ReplicationEndpointTypeValueTarget) - - input.ExtraConnectionAttributes = extraConnectionAnomalies(d) - - log.Println("[DEBUG] DMS create endpoint:", input) - - var out *dms.CreateEndpointOutput - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { - var err error - out, err = conn.CreateEndpointWithContext(ctx, input) + if v, ok := d.GetOk("ssl_mode"); ok { + input.SslMode = awstypes.DmsSslModeValue(v.(string)) + } - if tfawserr.ErrCodeEquals(err, "AccessDeniedFault") { - return retry.RetryableError(err) - } + input.S3Settings = s3Settings(d, d.Get(names.AttrEndpointType).(string) == string(awstypes.ReplicationEndpointTypeValueTarget)) - if err != nil { - return retry.NonRetryableError(err) - } + input.ExtraConnectionAttributes = extraConnectionAnomalies(d) - return nil + outputRaw, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { + return conn.CreateEndpoint(ctx, input) }) - if tfresource.TimedOut(err) { - out, err = conn.CreateEndpointWithContext(ctx, input) - } - - if err != nil || out == nil || out.Endpoint == nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionCreating, ResNameS3Endpoint, d.Get("endpoint_id").(string), err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating DMS S3 Endpoint (%s): %s", endpointID, err) } - d.SetId(d.Get("endpoint_id").(string)) - d.Set("endpoint_arn", out.Endpoint.EndpointArn) + d.SetId(endpointID) + d.Set("endpoint_arn", outputRaw.(*dms.CreateEndpointOutput).Endpoint.EndpointArn) // AWS bug? ssekki is ignored on create but sets on update if _, ok := d.GetOk("server_side_encryption_kms_key_id"); ok { @@ -395,9 +374,9 @@ func resourceS3EndpointCreate(ctx context.Context, d *schema.ResourceData, meta func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) - endpoint, err := FindEndpointByID(ctx, conn, d.Id()) + endpoint, err := findEndpointByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] DMS Endpoint (%s) not found, removing from state", d.Id()) @@ -405,19 +384,18 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in return diags } - if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionReading, ResNameS3Endpoint, d.Id(), err) + if err == nil && endpoint.S3Settings == nil { + err = tfresource.NewEmptyResultError(nil) } - if endpoint.S3Settings == nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionReading, ResNameS3Endpoint, d.Id(), errors.New("no settings returned")) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating DMS S3 Endpoint (%s): %s", d.Id(), err) } d.Set("endpoint_arn", endpoint.EndpointArn) - d.Set(names.AttrCertificateARN, endpoint.CertificateArn) d.Set("endpoint_id", endpoint.EndpointIdentifier) - d.Set(names.AttrEndpointType, strings.ToLower(*endpoint.EndpointType)) // For some reason the AWS API only accepts lowercase type but returns it as uppercase + d.Set(names.AttrEndpointType, strings.ToLower(string(endpoint.EndpointType))) // For some reason the AWS API only accepts lowercase type but returns it as uppercase d.Set("engine_display_name", endpoint.EngineDisplayName) d.Set(names.AttrExternalID, endpoint.ExternalId) // d.Set("external_table_definition", endpoint.ExternalTableDefinition) // set from s3 settings @@ -426,7 +404,7 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("ssl_mode", endpoint.SslMode) d.Set(names.AttrStatus, endpoint.Status) - setDetachTargetOnLobLookupFailureParquet(d, aws.StringValue(endpoint.ExtraConnectionAttributes)) + setDetachTargetOnLobLookupFailureParquet(d, aws.ToString(endpoint.ExtraConnectionAttributes)) s3settings := endpoint.S3Settings d.Set("add_column_name", s3settings.AddColumnName) @@ -455,12 +433,12 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("timestamp_column_name", s3settings.TimestampColumnName) d.Set("use_task_start_time_for_full_load_timestamp", s3settings.UseTaskStartTimeForFullLoadTimestamp) - if d.Get(names.AttrEndpointType).(string) == dms.ReplicationEndpointTypeValueTarget { + if d.Get(names.AttrEndpointType).(string) == string(awstypes.ReplicationEndpointTypeValueTarget) { d.Set("add_trailing_padding_character", s3settings.AddTrailingPaddingCharacter) d.Set("compression_type", s3settings.CompressionType) d.Set("csv_no_sup_value", s3settings.CsvNoSupValue) d.Set("data_format", s3settings.DataFormat) - d.Set("date_partition_delimiter", strings.ToUpper(aws.StringValue(s3settings.DatePartitionDelimiter))) + d.Set("date_partition_delimiter", strings.ToUpper(string(s3settings.DatePartitionDelimiter))) d.Set("date_partition_enabled", s3settings.DatePartitionEnabled) d.Set("date_partition_sequence", s3settings.DatePartitionSequence) d.Set("date_partition_timezone", s3settings.DatePartitionTimezone) @@ -473,9 +451,9 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("use_csv_no_sup_value", s3settings.UseCsvNoSupValue) } - p, err := structure.NormalizeJsonString(aws.StringValue(s3settings.ExternalTableDefinition)) + p, err := structure.NormalizeJsonString(aws.ToString(s3settings.ExternalTableDefinition)) if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionSetting, ResNameS3Endpoint, d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } d.Set("external_table_definition", p) @@ -485,7 +463,7 @@ func resourceS3EndpointRead(ctx context.Context, d *schema.ResourceData, meta in func resourceS3EndpointUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &dms.ModifyEndpointInput{ @@ -497,13 +475,13 @@ func resourceS3EndpointUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange(names.AttrEndpointType) { - input.EndpointType = aws.String(d.Get(names.AttrEndpointType).(string)) + input.EndpointType = awstypes.ReplicationEndpointTypeValue(d.Get(names.AttrEndpointType).(string)) } input.EngineName = aws.String(engineNameS3) if d.HasChange("ssl_mode") { - input.SslMode = aws.String(d.Get("ssl_mode").(string)) + input.SslMode = awstypes.DmsSslModeValue(d.Get("ssl_mode").(string)) } if d.HasChangesExcept( @@ -511,34 +489,18 @@ func resourceS3EndpointUpdate(ctx context.Context, d *schema.ResourceData, meta names.AttrEndpointType, "ssl_mode", ) { - input.S3Settings = s3Settings(d, d.Get(names.AttrEndpointType).(string) == dms.ReplicationEndpointTypeValueTarget) + input.S3Settings = s3Settings(d, d.Get(names.AttrEndpointType).(string) == string(awstypes.ReplicationEndpointTypeValueTarget)) input.ServiceAccessRoleArn = aws.String(d.Get("service_access_role_arn").(string)) input.ExtraConnectionAttributes = extraConnectionAnomalies(d) } - log.Println("[DEBUG] DMS update endpoint:", input) - - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutCreate), func() *retry.RetryError { - _, err := conn.ModifyEndpointWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, "AccessDeniedFault") { - return retry.RetryableError(err) - } - - if err != nil { - return retry.NonRetryableError(err) - } - - return nil + _, err := tfresource.RetryWhenIsA[*awstypes.AccessDeniedFault](ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.ModifyEndpoint(ctx, input) }) - if tfresource.TimedOut(err) { - _, err = conn.ModifyEndpointWithContext(ctx, input) - } - if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionUpdating, ResNameS3Endpoint, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating DMS S3 Endpoint (%s): %s", d.Id(), err) } } @@ -547,30 +509,30 @@ func resourceS3EndpointUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceS3EndpointDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DMSConn(ctx) + conn := meta.(*conns.AWSClient).DMSClient(ctx) log.Printf("[DEBUG] Deleting DMS Endpoint: (%s)", d.Id()) - _, err := conn.DeleteEndpointWithContext(ctx, &dms.DeleteEndpointInput{ + _, err := conn.DeleteEndpoint(ctx, &dms.DeleteEndpointInput{ EndpointArn: aws.String(d.Get("endpoint_arn").(string)), }) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if errs.IsA[*awstypes.ResourceNotFoundFault](err) { return diags } if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionDeleting, ResNameS3Endpoint, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating DMS S3 Endpoint (%s): %s", d.Id(), err) } - if err = waitEndpointDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionWaitingForDeletion, ResNameS3Endpoint, d.Id(), err) + if _, err := waitEndpointDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS S3 Endpoint (%s) delete: %s", d.Id(), err) } return diags } -func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { - s3s := &dms.S3Settings{} +func s3Settings(d *schema.ResourceData, target bool) *awstypes.S3Settings { + s3s := &awstypes.S3Settings{} if v, ok := d.Get("add_column_name").(bool); ok { // likely only useful for target s3s.AddColumnName = aws.Bool(v) @@ -589,7 +551,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("canned_acl_for_objects"); ok { // likely only useful for target - s3s.CannedAclForObjects = aws.String(v.(string)) + s3s.CannedAclForObjects = awstypes.CannedAclForObjectsValue(v.(string)) } if v, ok := d.Get("cdc_inserts_and_updates").(bool); ok { // likely only useful for target @@ -601,11 +563,11 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("cdc_max_batch_interval"); ok { // likely only useful for target - s3s.CdcMaxBatchInterval = aws.Int64(int64(v.(int))) + s3s.CdcMaxBatchInterval = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("cdc_min_file_size"); ok { // likely only useful for target - s3s.CdcMinFileSize = aws.Int64(int64(v.(int))) + s3s.CdcMinFileSize = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("cdc_path"); ok { @@ -613,7 +575,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("compression_type"); ok && target { // likely only useful for target - s3s.CompressionType = aws.String(v.(string)) + s3s.CompressionType = awstypes.CompressionTypeValue(v.(string)) } if v, ok := d.GetOk("csv_delimiter"); ok { @@ -633,15 +595,15 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("data_format"); ok && target { // target - s3s.DataFormat = aws.String(v.(string)) + s3s.DataFormat = awstypes.DataFormatValue(v.(string)) } if v, ok := d.GetOk("data_page_size"); ok { // likely only useful for target - s3s.DataPageSize = aws.Int64(int64(v.(int))) + s3s.DataPageSize = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("date_partition_delimiter"); ok && target { // target - s3s.DatePartitionDelimiter = aws.String(v.(string)) + s3s.DatePartitionDelimiter = awstypes.DatePartitionDelimiterValue(v.(string)) } if v, ok := d.Get("date_partition_enabled").(bool); ok && target { // likely only useful for target @@ -649,7 +611,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("date_partition_sequence"); ok && target { // target - s3s.DatePartitionSequence = aws.String(v.(string)) + s3s.DatePartitionSequence = awstypes.DatePartitionSequenceValue(v.(string)) } if v, ok := d.GetOk("date_partition_timezone"); ok && target { // target @@ -657,7 +619,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("dict_page_size_limit"); ok { // likely only useful for target - s3s.DictPageSizeLimit = aws.Int64(int64(v.(int))) + s3s.DictPageSizeLimit = aws.Int32(int32(v.(int))) } if v, ok := d.Get("enable_statistics").(bool); ok { // likely only useful for target @@ -665,11 +627,11 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("encoding_type"); ok { // likely only useful for target - s3s.EncodingType = aws.String(v.(string)) + s3s.EncodingType = awstypes.EncodingTypeValue(v.(string)) } if v, ok := d.GetOk("encryption_mode"); ok && target { // target - s3s.EncryptionMode = aws.String(v.(string)) + s3s.EncryptionMode = awstypes.EncryptionModeValue(v.(string)) } if v, ok := d.GetOk(names.AttrExpectedBucketOwner); ok { // likely only useful for target @@ -685,7 +647,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("ignore_header_rows"); ok { - s3s.IgnoreHeaderRows = aws.Int64(int64(v.(int))) + s3s.IgnoreHeaderRows = aws.Int32(int32(v.(int))) } if v, ok := d.Get("include_op_for_full_load").(bool); ok { // likely only useful for target @@ -693,7 +655,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("max_file_size"); ok { // likely only useful for target - s3s.MaxFileSize = aws.Int64(int64(v.(int))) + s3s.MaxFileSize = aws.Int32(int32(v.(int))) } if v, ok := d.Get("parquet_timestamp_in_millisecond").(bool); ok && target { // target @@ -701,7 +663,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("parquet_version"); ok && target { // target - s3s.ParquetVersion = aws.String(v.(string)) + s3s.ParquetVersion = awstypes.ParquetVersionValue(v.(string)) } if v, ok := d.Get("preserve_transactions").(bool); ok && target { // target @@ -713,7 +675,7 @@ func s3Settings(d *schema.ResourceData, target bool) *dms.S3Settings { } if v, ok := d.GetOk("row_group_length"); ok { // likely only useful for target - s3s.RowGroupLength = aws.Int64(int64(v.(int))) + s3s.RowGroupLength = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("server_side_encryption_kms_key_id"); ok && target { // target diff --git a/internal/service/dms/service_endpoint_resolver_gen.go b/internal/service/dms/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..efa8af3ae75 --- /dev/null +++ b/internal/service/dms/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package dms + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + databasemigrationservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ databasemigrationservice_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver databasemigrationservice_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: databasemigrationservice_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params databasemigrationservice_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up databasemigrationservice endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*databasemigrationservice_sdkv2.Options) { + return func(o *databasemigrationservice_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/dms/service_endpoints_gen_test.go b/internal/service/dms/service_endpoints_gen_test.go index 04bedfc4c45..3491fffc9bb 100644 --- a/internal/service/dms/service_endpoints_gen_test.go +++ b/internal/service/dms/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package dms_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - databasemigrationservice_sdkv1 "github.com/aws/aws-sdk-go/service/databasemigrationservice" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + databasemigrationservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/go-cty/cty" @@ -90,7 +95,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -330,7 +335,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -351,55 +356,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := databasemigrationservice_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(databasemigrationservice_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), databasemigrationservice_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := databasemigrationservice_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(databasemigrationservice_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), databasemigrationservice_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.DMSConn(ctx) - - req, _ := client.DescribeCertificatesRequest(&databasemigrationservice_sdkv1.DescribeCertificatesInput{}) + client := meta.DMSClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.DescribeCertificates(ctx, &databasemigrationservice_sdkv2.DescribeCertificatesInput{}, + func(opts *databasemigrationservice_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -466,16 +480,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -600,6 +636,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/dms/service_package_gen.go b/internal/service/dms/service_package_gen.go index 8f670d8a92a..66ddfdbc8ac 100644 --- a/internal/service/dms/service_package_gen.go +++ b/internal/service/dms/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package dms import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - databasemigrationservice_sdkv1 "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + databasemigrationservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -28,24 +25,29 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceCertificate, + Factory: dataSourceCertificate, TypeName: "aws_dms_certificate", + Name: "Certificate", }, { - Factory: DataSourceEndpoint, + Factory: dataSourceEndpoint, TypeName: "aws_dms_endpoint", + Name: "Endpoint", }, { - Factory: DataSourceReplicationInstance, + Factory: dataSourceReplicationInstance, TypeName: "aws_dms_replication_instance", + Name: "Replication Instance", }, { - Factory: DataSourceReplicationSubnetGroup, + Factory: dataSourceReplicationSubnetGroup, TypeName: "aws_dms_replication_subnet_group", + Name: "Replication Subnet Group", }, { - Factory: DataSourceReplicationTask, + Factory: dataSourceReplicationTask, TypeName: "aws_dms_replication_task", + Name: "Replication Task", }, } } @@ -53,7 +55,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceCertificate, + Factory: resourceCertificate, TypeName: "aws_dms_certificate", Name: "Certificate", Tags: &types.ServicePackageResourceTags{ @@ -61,7 +63,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceEndpoint, + Factory: resourceEndpoint, TypeName: "aws_dms_endpoint", Name: "Endpoint", Tags: &types.ServicePackageResourceTags{ @@ -69,7 +71,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceEventSubscription, + Factory: resourceEventSubscription, TypeName: "aws_dms_event_subscription", Name: "Event Subscription", Tags: &types.ServicePackageResourceTags{ @@ -77,7 +79,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationConfig, + Factory: resourceReplicationConfig, TypeName: "aws_dms_replication_config", Name: "Replication Config", Tags: &types.ServicePackageResourceTags{ @@ -85,7 +87,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationInstance, + Factory: resourceReplicationInstance, TypeName: "aws_dms_replication_instance", Name: "Replication Instance", Tags: &types.ServicePackageResourceTags{ @@ -93,7 +95,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationSubnetGroup, + Factory: resourceReplicationSubnetGroup, TypeName: "aws_dms_replication_subnet_group", Name: "Replication Subnet Group", Tags: &types.ServicePackageResourceTags{ @@ -101,7 +103,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceReplicationTask, + Factory: resourceReplicationTask, TypeName: "aws_dms_replication_task", Name: "Replication Task", Tags: &types.ServicePackageResourceTags{ @@ -109,7 +111,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceS3Endpoint, + Factory: resourceS3Endpoint, TypeName: "aws_dms_s3_endpoint", Name: "S3 Endpoint", Tags: &types.ServicePackageResourceTags{ @@ -123,25 +125,14 @@ func (p *servicePackage) ServicePackageName() string { return names.DMS } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*databasemigrationservice_sdkv1.DatabaseMigrationService, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*databasemigrationservice_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return databasemigrationservice_sdkv1.New(sess.Copy(&cfg)), nil + return databasemigrationservice_sdkv2.NewFromConfig(cfg, + databasemigrationservice_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/dms/sweep.go b/internal/service/dms/sweep.go index 85184a1c14e..06a186c4da2 100644 --- a/internal/service/dms/sweep.go +++ b/internal/service/dms/sweep.go @@ -7,11 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" + "github.com/aws/aws-sdk-go-v2/aws" + dms "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -56,34 +56,31 @@ func sweepEndpoints(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeEndpointsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeEndpointsPagesWithContext(ctx, input, func(page *dms.DescribeEndpointsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeEndpointsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Endpoint sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Endpoints (%s): %w", region, err) } for _, v := range page.Endpoints { - r := ResourceEndpoint() + r := resourceEndpoint() d := r.Data(nil) - d.SetId(aws.StringValue(v.EndpointIdentifier)) + d.SetId(aws.ToString(v.EndpointIdentifier)) d.Set("endpoint_arn", v.EndpointArn) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Endpoint sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Endpoints (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -101,33 +98,30 @@ func sweepReplicationConfigs(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeReplicationConfigsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeReplicationConfigsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationConfigsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeReplicationConfigsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Replication Config sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Replication Configs (%s): %w", region, err) } for _, v := range page.ReplicationConfigs { - r := ResourceReplicationConfig() + r := resourceReplicationConfig() d := r.Data(nil) - d.SetId(aws.StringValue(v.ReplicationConfigArn)) + d.SetId(aws.ToString(v.ReplicationConfigArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Replication Config sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Replication Configs (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -145,34 +139,31 @@ func sweepReplicationInstances(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeReplicationInstancesInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeReplicationInstancesPagesWithContext(ctx, input, func(page *dms.DescribeReplicationInstancesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeReplicationInstancesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Replication Instance sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Replication Instances (%s): %w", region, err) } for _, v := range page.ReplicationInstances { - r := ResourceReplicationInstance() + r := resourceReplicationInstance() d := r.Data(nil) - d.SetId(aws.StringValue(v.ReplicationInstanceIdentifier)) + d.SetId(aws.ToString(v.ReplicationInstanceIdentifier)) d.Set("replication_instance_arn", v.ReplicationInstanceArn) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Replication Instance sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Replication Instances (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -190,33 +181,30 @@ func sweepReplicationSubnetGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeReplicationSubnetGroupsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeReplicationSubnetGroupsPagesWithContext(ctx, input, func(page *dms.DescribeReplicationSubnetGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeReplicationSubnetGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Replication Subnet Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Replication Subnet Groups (%s): %w", region, err) } for _, v := range page.ReplicationSubnetGroups { - r := ResourceReplicationSubnetGroup() + r := resourceReplicationSubnetGroup() d := r.Data(nil) - d.SetId(aws.StringValue(v.ReplicationSubnetGroupIdentifier)) + d.SetId(aws.ToString(v.ReplicationSubnetGroupIdentifier)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Replication Subnet Group sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Replication Subnet Groups (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -234,36 +222,33 @@ func sweepReplicationTasks(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.DMSConn(ctx) + conn := client.DMSClient(ctx) input := &dms.DescribeReplicationTasksInput{ WithoutSettings: aws.Bool(true), } sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeReplicationTasksPagesWithContext(ctx, input, func(page *dms.DescribeReplicationTasksOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := dms.NewDescribeReplicationTasksPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping DMS Replication Task sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing DMS Replication Tasks (%s): %w", region, err) } for _, v := range page.ReplicationTasks { - r := ResourceReplicationTask() + r := resourceReplicationTask() d := r.Data(nil) - d.SetId(aws.StringValue(v.ReplicationTaskIdentifier)) + d.SetId(aws.ToString(v.ReplicationTaskIdentifier)) d.Set("replication_task_arn", v.ReplicationTaskArn) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping DMS Replication Task sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing DMS Replication Tasks (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/dms/tags_gen.go b/internal/service/dms/tags_gen.go index 48393cf0070..b02092634a2 100644 --- a/internal/service/dms/tags_gen.go +++ b/internal/service/dms/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/aws/aws-sdk-go/service/databasemigrationservice/databasemigrationserviceiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/databasemigrationservice/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists dms service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn databasemigrationserviceiface.DatabaseMigrationServiceAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *databasemigrationservice.Client, identifier string, optFns ...func(*databasemigrationservice.Options)) (tftags.KeyValueTags, error) { input := &databasemigrationservice.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn databasemigrationserviceiface.DatabaseMi // ListTags lists dms service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).DMSConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).DMSClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns dms service tags. -func Tags(tags tftags.KeyValueTags) []*databasemigrationservice.Tag { - result := make([]*databasemigrationservice.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &databasemigrationservice.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*databasemigrationservice.Tag { } // KeyValueTags creates tftags.KeyValueTags from databasemigrationservice service tags. -func KeyValueTags(ctx context.Context, tags []*databasemigrationservice.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*databasemigrationservice.Tag) tft // getTagsIn returns dms service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*databasemigrationservice.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*databasemigrationservice.Tag { } // setTagsOut sets dms service tags in Context. -func setTagsOut(ctx context.Context, tags []*databasemigrationservice.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*databasemigrationservice.Tag) { // updateTags updates dms service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn databasemigrationserviceiface.DatabaseMigrationServiceAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *databasemigrationservice.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*databasemigrationservice.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn databasemigrationserviceiface.Database if len(removedTags) > 0 { input := &databasemigrationservice.RemoveTagsFromResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.RemoveTagsFromResourceWithContext(ctx, input) + _, err := conn.RemoveTagsFromResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn databasemigrationserviceiface.Database Tags: Tags(updatedTags), } - _, err := conn.AddTagsToResourceWithContext(ctx, input) + _, err := conn.AddTagsToResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn databasemigrationserviceiface.Database // UpdateTags updates dms service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).DMSConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).DMSClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/docdb/service_endpoint_resolver_gen.go b/internal/service/docdb/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..8d551cfd7b9 --- /dev/null +++ b/internal/service/docdb/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package docdb + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + docdb_sdkv2 "github.com/aws/aws-sdk-go-v2/service/docdb" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ docdb_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver docdb_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: docdb_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params docdb_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up docdb endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*docdb_sdkv2.Options) { + return func(o *docdb_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/docdb/service_endpoints_gen_test.go b/internal/service/docdb/service_endpoints_gen_test.go index 5c1b1b68949..75c8751a3de 100644 --- a/internal/service/docdb/service_endpoints_gen_test.go +++ b/internal/service/docdb/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := docdb_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), docdb_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := docdb_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), docdb_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/docdb/service_package_gen.go b/internal/service/docdb/service_package_gen.go index d06fe85387c..a6154e666ba 100644 --- a/internal/service/docdb/service_package_gen.go +++ b/internal/service/docdb/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package docdb @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" docdb_sdkv2 "github.com/aws/aws-sdk-go-v2/service/docdb" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -97,19 +96,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*docdb_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return docdb_sdkv2.NewFromConfig(cfg, func(o *docdb_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return docdb_sdkv2.NewFromConfig(cfg, + docdb_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/docdbelastic/service_endpoint_resolver_gen.go b/internal/service/docdbelastic/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..2f0ca4f25f2 --- /dev/null +++ b/internal/service/docdbelastic/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package docdbelastic + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + docdbelastic_sdkv2 "github.com/aws/aws-sdk-go-v2/service/docdbelastic" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ docdbelastic_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver docdbelastic_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: docdbelastic_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params docdbelastic_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up docdbelastic endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*docdbelastic_sdkv2.Options) { + return func(o *docdbelastic_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/docdbelastic/service_endpoints_gen_test.go b/internal/service/docdbelastic/service_endpoints_gen_test.go index a9ac7cf6b26..874c229c4e0 100644 --- a/internal/service/docdbelastic/service_endpoints_gen_test.go +++ b/internal/service/docdbelastic/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := docdbelastic_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), docdbelastic_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := docdbelastic_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), docdbelastic_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/docdbelastic/service_package_gen.go b/internal/service/docdbelastic/service_package_gen.go index 6a18acda69e..4a9d0907313 100644 --- a/internal/service/docdbelastic/service_package_gen.go +++ b/internal/service/docdbelastic/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package docdbelastic @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" docdbelastic_sdkv2 "github.com/aws/aws-sdk-go-v2/service/docdbelastic" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -47,19 +46,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*docdbelastic_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return docdbelastic_sdkv2.NewFromConfig(cfg, func(o *docdbelastic_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return docdbelastic_sdkv2.NewFromConfig(cfg, + docdbelastic_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/drs/consts.go b/internal/service/drs/consts.go new file mode 100644 index 00000000000..f9c32fcac45 --- /dev/null +++ b/internal/service/drs/consts.go @@ -0,0 +1,10 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package drs + +// Exports for use in tests only. +const ( + ResNameReplicationConfigurationTemplate = "Replication Configuration Template" + ResPrefixReplicationConfigurationTemplate = "ReplicationConfigurationTemplate" +) diff --git a/internal/service/drs/exports_test.go b/internal/service/drs/exports_test.go new file mode 100644 index 00000000000..810adaafe6c --- /dev/null +++ b/internal/service/drs/exports_test.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package drs + +// Exports for use in tests only. +var ( + ResourceReplicationConfigurationTemplate = newReplicationConfigurationTemplateResource + + FindReplicationConfigurationTemplateByID = findReplicationConfigurationTemplateByID +) diff --git a/internal/service/drs/generate.go b/internal/service/drs/generate.go index 590623b58dc..04beee97cc0 100644 --- a/internal/service/drs/generate.go +++ b/internal/service/drs/generate.go @@ -1,8 +1,8 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ListTagsInIDElem=ResourceArn -ServiceTagsMap -SkipTypesImp -KVTValues -TagOp=TagResource -TagInIDElem=ResourceArn -UntagOp=UntagResource -CreateTags -ListTags -UpdateTags //go:generate go run ../../generate/servicepackage/main.go -//go:generate go run ../../generate/tagstests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. package drs diff --git a/internal/service/drs/replication_configuration_template.go b/internal/service/drs/replication_configuration_template.go new file mode 100644 index 00000000000..2a610146702 --- /dev/null +++ b/internal/service/drs/replication_configuration_template.go @@ -0,0 +1,459 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package drs + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/drs" + awstypes "github.com/aws/aws-sdk-go-v2/service/drs/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource(name="Replication Configuration Template") +// @Tags(identifierAttribute="arn") +func newReplicationConfigurationTemplateResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &replicationConfigurationTemplateResource{} + + r.SetDefaultCreateTimeout(20 * time.Minute) + r.SetDefaultUpdateTimeout(20 * time.Minute) + r.SetDefaultDeleteTimeout(20 * time.Minute) + + return r, nil +} + +type replicationConfigurationTemplateResource struct { + framework.ResourceWithConfigure + framework.WithImportByID + framework.WithTimeouts +} + +func (r *replicationConfigurationTemplateResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_drs_replication_configuration_template" +} + +func (r *replicationConfigurationTemplateResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: schema.StringAttribute{ + Computed: true, + }, + "associate_default_security_group": schema.BoolAttribute{ + Required: true, + }, + "auto_replicate_new_disks": schema.BoolAttribute{ + Computed: true, + Optional: true, + PlanModifiers: []planmodifier.Bool{ + boolplanmodifier.UseStateForUnknown(), + }, + }, + "bandwidth_throttling": schema.Int64Attribute{ + Required: true, + }, + "create_public_ip": schema.BoolAttribute{ + Required: true, + }, + "data_plane_routing": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[awstypes.ReplicationConfigurationDataPlaneRouting](), + }, + "default_large_staging_disk_type": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[awstypes.ReplicationConfigurationDefaultLargeStagingDiskType](), + }, + "ebs_encryption": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[awstypes.ReplicationConfigurationEbsEncryption](), + }, + "ebs_encryption_key_arn": schema.StringAttribute{ + Optional: true, + }, + names.AttrID: schema.StringAttribute{ + Computed: true, + }, + "replication_server_instance_type": schema.StringAttribute{ + Required: true, + }, + "replication_servers_security_groups_ids": schema.ListAttribute{ + Required: true, + ElementType: types.StringType, + }, + "staging_area_subnet_id": schema.StringAttribute{ + Required: true, + }, + + "staging_area_tags": tftags.TagsAttribute(), + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + + "use_dedicated_replication_server": schema.BoolAttribute{ + Required: true, + }, + }, + Blocks: map[string]schema.Block{ + "pit_policy": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[pitPolicy](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrEnabled: schema.BoolAttribute{ + Optional: true, + }, + names.AttrInterval: schema.Int64Attribute{ + Required: true, + }, + "retention_duration": schema.Int64Attribute{ + Required: true, + }, + "rule_id": schema.Int64Attribute{ + Optional: true, + }, + "units": schema.StringAttribute{ + Required: true, + CustomType: fwtypes.StringEnumType[awstypes.PITPolicyRuleUnits](), + }, + }, + }, + Validators: []validator.List{ + listvalidator.SizeAtLeast(1), + }, + }, + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + }, + } +} + +func (r *replicationConfigurationTemplateResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data replicationConfigurationTemplateResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().DRSClient(ctx) + + input := &drs.CreateReplicationConfigurationTemplateInput{} + response.Diagnostics.Append(flex.Expand(context.WithValue(ctx, flex.ResourcePrefix, ResPrefixReplicationConfigurationTemplate), data, input)...) + if response.Diagnostics.HasError() { + return + } + + input.Tags = getTagsIn(ctx) + + _, err := conn.CreateReplicationConfigurationTemplate(ctx, input) + if err != nil { + create.AddError(&response.Diagnostics, names.DRS, create.ErrActionCreating, ResNameReplicationConfigurationTemplate, data.ID.ValueString(), err) + + return + } + + output, err := waitReplicationConfigurationTemplateAvailable(ctx, conn, data.ID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { + create.AddError(&response.Diagnostics, names.DRS, create.ErrActionWaitingForCreation, ResNameReplicationConfigurationTemplate, data.ID.ValueString(), err) + + return + } + + response.Diagnostics.Append(flex.Flatten(context.WithValue(ctx, flex.ResourcePrefix, ResPrefixReplicationConfigurationTemplate), output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *replicationConfigurationTemplateResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data replicationConfigurationTemplateResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().DRSClient(ctx) + + output, err := findReplicationConfigurationTemplateByID(ctx, conn, data.ID.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + create.AddError(&response.Diagnostics, names.DRS, create.ErrActionReading, ResNameReplicationConfigurationTemplate, data.ID.ValueString(), err) + + return + } + + response.Diagnostics.Append(flex.Flatten(context.WithValue(ctx, flex.ResourcePrefix, ResPrefixReplicationConfigurationTemplate), output, &data)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *replicationConfigurationTemplateResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new replicationConfigurationTemplateResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().DRSClient(ctx) + + if replicationConfigurationTemplateHasChanges(ctx, new, old) { + input := &drs.UpdateReplicationConfigurationTemplateInput{} + response.Diagnostics.Append(flex.Expand(context.WithValue(ctx, flex.ResourcePrefix, ResPrefixReplicationConfigurationTemplate), new, input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateReplicationConfigurationTemplate(ctx, input) + if err != nil { + create.AddError(&response.Diagnostics, names.DRS, create.ErrActionUpdating, ResNameReplicationConfigurationTemplate, new.ID.ValueString(), err) + + return + } + + if _, err := waitReplicationConfigurationTemplateAvailable(ctx, conn, old.ID.ValueString(), r.UpdateTimeout(ctx, new.Timeouts)); err != nil { + create.AddError(&response.Diagnostics, names.DRS, create.ErrActionWaitingForUpdate, ResNameReplicationConfigurationTemplate, new.ID.ValueString(), err) + + return + } + } + + output, err := findReplicationConfigurationTemplateByID(ctx, conn, old.ID.ValueString()) + if err != nil { + create.AddError(&response.Diagnostics, names.DRS, create.ErrActionUpdating, ResNameReplicationConfigurationTemplate, old.ID.ValueString(), err) + + return + } + + response.Diagnostics.Append(flex.Flatten(context.WithValue(ctx, flex.ResourcePrefix, ResPrefixReplicationConfigurationTemplate), output, &new)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *replicationConfigurationTemplateResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data replicationConfigurationTemplateResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().DRSClient(ctx) + + tflog.Debug(ctx, "deleting DRS Replication Configuration Template", map[string]interface{}{ + names.AttrID: data.ID.ValueString(), + }) + + input := &drs.DeleteReplicationConfigurationTemplateInput{ + ReplicationConfigurationTemplateID: aws.String(data.ID.ValueString()), + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func() (interface{}, error) { + return conn.DeleteReplicationConfigurationTemplate(ctx, input) + }, "DependencyViolation") + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + create.AddError(&response.Diagnostics, names.DRS, create.ErrActionDeleting, ResNameReplicationConfigurationTemplate, data.ID.ValueString(), err) + + return + } + + if _, err := waitReplicationConfigurationTemplateDeleted(ctx, conn, data.ID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + create.AddError(&response.Diagnostics, names.DRS, create.ErrActionWaitingForDeletion, ResNameReplicationConfigurationTemplate, data.ID.ValueString(), err) + + return + } +} + +func (r *replicationConfigurationTemplateResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func findReplicationConfigurationTemplate(ctx context.Context, conn *drs.Client, input *drs.DescribeReplicationConfigurationTemplatesInput) (*awstypes.ReplicationConfigurationTemplate, error) { + output, err := findReplicationConfigurationTemplates(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findReplicationConfigurationTemplates(ctx context.Context, conn *drs.Client, input *drs.DescribeReplicationConfigurationTemplatesInput) ([]awstypes.ReplicationConfigurationTemplate, error) { + var output []awstypes.ReplicationConfigurationTemplate + + pages := drs.NewDescribeReplicationConfigurationTemplatesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.Items...) + } + + return output, nil +} + +func findReplicationConfigurationTemplateByID(ctx context.Context, conn *drs.Client, id string) (*awstypes.ReplicationConfigurationTemplate, error) { + input := &drs.DescribeReplicationConfigurationTemplatesInput{ + //ReplicationConfigurationTemplateIDs: []string{id}, // Uncomment when SDK supports this, currently MAX of 1 so you find it anyway + } + + return findReplicationConfigurationTemplate(ctx, conn, input) +} + +const ( + replicationConfigurationTemplateAvailable = "AVAILABLE" +) + +func statusReplicationConfigurationTemplate(ctx context.Context, conn *drs.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findReplicationConfigurationTemplateByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + if err != nil { + return nil, "", err + } + + return output, replicationConfigurationTemplateAvailable, nil + } +} + +func waitReplicationConfigurationTemplateAvailable(ctx context.Context, conn *drs.Client, id string, timeout time.Duration) (*awstypes.ReplicationConfigurationTemplate, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{}, + Target: []string{replicationConfigurationTemplateAvailable}, + Refresh: statusReplicationConfigurationTemplate(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ReplicationConfigurationTemplate); ok { + return output, err + } + + return nil, err +} + +func waitReplicationConfigurationTemplateDeleted(ctx context.Context, conn *drs.Client, id string, timeout time.Duration) (*awstypes.ReplicationConfigurationTemplate, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{replicationConfigurationTemplateAvailable}, + Target: []string{}, + Refresh: statusReplicationConfigurationTemplate(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.ReplicationConfigurationTemplate); ok { + return output, err + } + + return nil, err +} + +type replicationConfigurationTemplateResourceModel struct { + ARN types.String `tfsdk:"arn"` + AssociateDefaultSecurityGroup types.Bool `tfsdk:"associate_default_security_group"` + AutoReplicateNewDisks types.Bool `tfsdk:"auto_replicate_new_disks"` + BandwidthThrottling types.Int64 `tfsdk:"bandwidth_throttling"` + CreatePublicIP types.Bool `tfsdk:"create_public_ip"` + DataPlaneRouting fwtypes.StringEnum[awstypes.ReplicationConfigurationDataPlaneRouting] `tfsdk:"data_plane_routing"` + DefaultLargeStagingDiskType fwtypes.StringEnum[awstypes.ReplicationConfigurationDefaultLargeStagingDiskType] `tfsdk:"default_large_staging_disk_type"` + EBSEncryption fwtypes.StringEnum[awstypes.ReplicationConfigurationEbsEncryption] `tfsdk:"ebs_encryption"` + EBSEncryptionKeyARN types.String `tfsdk:"ebs_encryption_key_arn"` + ID types.String `tfsdk:"id"` + PitPolicy fwtypes.ListNestedObjectValueOf[pitPolicy] `tfsdk:"pit_policy"` + ReplicationServerInstanceType types.String `tfsdk:"replication_server_instance_type"` + ReplicationServersSecurityGroupsIDs types.List `tfsdk:"replication_servers_security_groups_ids"` + StagingAreaSubnetID types.String `tfsdk:"staging_area_subnet_id"` + UseDedicatedReplicationServer types.Bool `tfsdk:"use_dedicated_replication_server"` + StagingAreaTags types.Map `tfsdk:"staging_area_tags"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +type pitPolicy struct { + Enabled types.Bool `tfsdk:"enabled"` + Interval types.Int64 `tfsdk:"interval"` + RetentionDuration types.Int64 `tfsdk:"retention_duration"` + RuleID types.Int64 `tfsdk:"rule_id"` + Units fwtypes.StringEnum[awstypes.PITPolicyRuleUnits] `tfsdk:"units"` +} + +func replicationConfigurationTemplateHasChanges(_ context.Context, plan, state replicationConfigurationTemplateResourceModel) bool { + return !plan.AssociateDefaultSecurityGroup.Equal(state.AssociateDefaultSecurityGroup) || + !plan.AutoReplicateNewDisks.Equal(state.AutoReplicateNewDisks) || + !plan.BandwidthThrottling.Equal(state.BandwidthThrottling) || + !plan.CreatePublicIP.Equal(state.CreatePublicIP) || + !plan.DataPlaneRouting.Equal(state.DataPlaneRouting) || + !plan.DefaultLargeStagingDiskType.Equal(state.DefaultLargeStagingDiskType) || + !plan.EBSEncryption.Equal(state.EBSEncryption) || + !plan.EBSEncryptionKeyARN.Equal(state.EBSEncryptionKeyARN) || + !plan.ID.Equal(state.ID) || + !plan.PitPolicy.Equal(state.PitPolicy) || + !plan.ReplicationServerInstanceType.Equal(state.ReplicationServerInstanceType) || + !plan.ReplicationServersSecurityGroupsIDs.Equal(state.ReplicationServersSecurityGroupsIDs) || + !plan.StagingAreaSubnetID.Equal(state.StagingAreaSubnetID) || + !plan.UseDedicatedReplicationServer.Equal(state.UseDedicatedReplicationServer) +} diff --git a/internal/service/drs/replication_configuration_template_test.go b/internal/service/drs/replication_configuration_template_test.go new file mode 100644 index 00000000000..1a9ea805a49 --- /dev/null +++ b/internal/service/drs/replication_configuration_template_test.go @@ -0,0 +1,227 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package drs_test + +import ( + "context" + "fmt" + "testing" + "time" + + awstypes "github.com/aws/aws-sdk-go-v2/service/drs/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfdrs "github.com/hashicorp/terraform-provider-aws/internal/service/drs" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// TestAccDRSReplicationConfigurationTemplate_serial serializes the tests +// since the account limit tends to be 1. +func TestAccDRSReplicationConfigurationTemplate_serial(t *testing.T) { + t.Parallel() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccReplicationConfigurationTemplate_basic, + acctest.CtDisappears: testAccReplicationConfigurationTemplate_disappears, + } + + acctest.RunSerialTests1Level(t, testCases, 5*time.Second) +} + +func testAccReplicationConfigurationTemplate_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_drs_replication_configuration_template.test" + var rct awstypes.ReplicationConfigurationTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: resource.ComposeAggregateTestCheckFunc( + testAccCheckReplicationConfigurationTemplateDestroy(ctx), + ), + Steps: []resource.TestStep{ + { + Config: testAccReplicationConfigurationTemplateConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckReplicationConfigurationTemplateExists(ctx, resourceName, &rct), + resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "associate_default_security_group", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "bandwidth_throttling", "12"), + resource.TestCheckResourceAttr(resourceName, "create_public_ip", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "data_plane_routing", "PRIVATE_IP"), + resource.TestCheckResourceAttr(resourceName, "default_large_staging_disk_type", "GP2"), + resource.TestCheckResourceAttr(resourceName, "ebs_encryption", "NONE"), + resource.TestCheckResourceAttr(resourceName, "use_dedicated_replication_server", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "replication_server_instance_type", "t3.small"), + resource.TestCheckResourceAttr(resourceName, "replication_servers_security_groups_ids.#", acctest.Ct1), + resource.TestCheckResourceAttrPair(resourceName, "staging_area_subnet_id", "aws_subnet.test.0", names.AttrID), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "pit_policy.*", map[string]string{ + names.AttrEnabled: acctest.CtTrue, + names.AttrInterval: acctest.Ct10, + "retention_duration": "60", + "units": "MINUTE", + "rule_id": acctest.Ct1, + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "pit_policy.*", map[string]string{ + names.AttrEnabled: acctest.CtTrue, + names.AttrInterval: acctest.Ct1, + "retention_duration": "24", + "units": "HOUR", + "rule_id": acctest.Ct2, + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "pit_policy.*", map[string]string{ + names.AttrEnabled: acctest.CtTrue, + names.AttrInterval: acctest.Ct1, + "retention_duration": acctest.Ct3, + "units": "DAY", + "rule_id": acctest.Ct3, + }), + resource.TestCheckResourceAttr(resourceName, "staging_area_tags.%", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "staging_area_tags.Name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccReplicationConfigurationTemplate_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_drs_replication_configuration_template.test" + var rct awstypes.ReplicationConfigurationTemplate + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DRSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationConfigurationTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationConfigurationTemplateConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationConfigurationTemplateExists(ctx, resourceName, &rct), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfdrs.ResourceReplicationConfigurationTemplate, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckReplicationConfigurationTemplateExists(ctx context.Context, n string, v *awstypes.ReplicationConfigurationTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).DRSClient(ctx) + + output, err := tfdrs.FindReplicationConfigurationTemplateByID(ctx, conn, rs.Primary.ID) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccCheckReplicationConfigurationTemplateDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).DRSClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_drs_replication_configuration_template" { + continue + } + + _, err := tfdrs.FindReplicationConfigurationTemplateByID(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + if err != nil { + return err + } + + return fmt.Errorf("DRS Replication Configuration Template (%s) still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccReplicationConfigurationTemplateConfig_basic(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigVPCWithSubnets(rName, 1), + fmt.Sprintf(` +resource "aws_security_group" "test" { + name = %[1]q + description = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = -1 + to_port = -1 + protocol = "icmp" + cidr_blocks = ["0.0.0.0/0"] + } +} + +resource "aws_drs_replication_configuration_template" "test" { + associate_default_security_group = false + bandwidth_throttling = 12 + create_public_ip = false + data_plane_routing = "PRIVATE_IP" + default_large_staging_disk_type = "GP2" + ebs_encryption = "NONE" + use_dedicated_replication_server = false + replication_server_instance_type = "t3.small" + replication_servers_security_groups_ids = [aws_security_group.test.id] + staging_area_subnet_id = aws_subnet.test[0].id + + pit_policy { + enabled = true + interval = 10 + retention_duration = 60 + units = "MINUTE" + rule_id = 1 + } + + pit_policy { + enabled = true + interval = 1 + retention_duration = 24 + units = "HOUR" + rule_id = 2 + } + + pit_policy { + enabled = true + interval = 1 + retention_duration = 3 + units = "DAY" + rule_id = 3 + } + + staging_area_tags = { + Name = %[1]q + } +} +`, rName)) +} diff --git a/internal/service/drs/service_endpoint_resolver_gen.go b/internal/service/drs/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..660ba2ba0d7 --- /dev/null +++ b/internal/service/drs/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package drs + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + drs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/drs" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ drs_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver drs_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: drs_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params drs_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up drs endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*drs_sdkv2.Options) { + return func(o *drs_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/drs/service_endpoints_gen_test.go b/internal/service/drs/service_endpoints_gen_test.go index 4ee4b72c3f2..0b1051365de 100644 --- a/internal/service/drs/service_endpoints_gen_test.go +++ b/internal/service/drs/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := drs_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), drs_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := drs_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), drs_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/drs/service_package_gen.go b/internal/service/drs/service_package_gen.go index 93ba7aec234..bef76c21f67 100644 --- a/internal/service/drs/service_package_gen.go +++ b/internal/service/drs/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package drs @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" drs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/drs" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,7 +19,15 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { - return []*types.ServicePackageFrameworkResource{} + return []*types.ServicePackageFrameworkResource{ + { + Factory: newReplicationConfigurationTemplateResource, + Name: "Replication Configuration Template", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, + }, + } } func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { @@ -39,19 +46,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*drs_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return drs_sdkv2.NewFromConfig(cfg, func(o *drs_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return drs_sdkv2.NewFromConfig(cfg, + drs_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/drs/tags_gen.go b/internal/service/drs/tags_gen.go new file mode 100644 index 00000000000..094e6660623 --- /dev/null +++ b/internal/service/drs/tags_gen.go @@ -0,0 +1,137 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package drs + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/drs" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists drs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *drs.Client, identifier string, optFns ...func(*drs.Options)) (tftags.KeyValueTags, error) { + input := &drs.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), err + } + + return KeyValueTags(ctx, output.Tags), nil +} + +// ListTags lists drs service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).DRSClient(ctx), identifier) + + if err != nil { + return err + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// map[string]string handling + +// Tags returns drs service tags. +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() +} + +// KeyValueTags creates tftags.KeyValueTags from drs service tags. +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { + return tftags.New(ctx, tags) +} + +// getTagsIn returns drs service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) map[string]string { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets drs service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) + } +} + +// createTags creates drs service tags for new resources. +func createTags(ctx context.Context, conn *drs.Client, identifier string, tags map[string]string, optFns ...func(*drs.Options)) error { + if len(tags) == 0 { + return nil + } + + return updateTags(ctx, conn, identifier, nil, tags, optFns...) +} + +// updateTags updates drs service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *drs.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*drs.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.DRS) + if len(removedTags) > 0 { + input := &drs.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("untagging resource (%s): %w", identifier, err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.DRS) + if len(updatedTags) > 0 { + input := &drs.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: Tags(updatedTags), + } + + _, err := conn.TagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// UpdateTags updates drs service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).DRSClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/ds/conditional_forwarder.go b/internal/service/ds/conditional_forwarder.go index 3694fc4e84e..9756af35a27 100644 --- a/internal/service/ds/conditional_forwarder.go +++ b/internal/service/ds/conditional_forwarder.go @@ -8,19 +8,23 @@ import ( "fmt" "log" "strings" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_directory_service_conditional_forwarder") -func ResourceConditionalForwarder() *schema.Resource { +// @SDKResource("aws_directory_service_conditional_forwarder", name="Conditional Forwarder") +func resourceConditionalForwarder() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceConditionalForwarderCreate, ReadWithoutTimeout: resourceConditionalForwarderRead, @@ -37,7 +41,6 @@ func ResourceConditionalForwarder() *schema.Resource { Required: true, ForceNew: true, }, - "dns_ips": { Type: schema.TypeList, Required: true, @@ -46,7 +49,6 @@ func ResourceConditionalForwarder() *schema.Resource { Type: schema.TypeString, }, }, - "remote_domain_name": { Type: schema.TypeString, Required: true, @@ -60,61 +62,62 @@ func ResourceConditionalForwarder() *schema.Resource { func resourceConditionalForwarderCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) - dnsIps := flex.ExpandStringList(d.Get("dns_ips").([]interface{})) - - directoryId := d.Get("directory_id").(string) + directoryID := d.Get("directory_id").(string) domainName := d.Get("remote_domain_name").(string) - - _, err := conn.CreateConditionalForwarderWithContext(ctx, &directoryservice.CreateConditionalForwarderInput{ - DirectoryId: aws.String(directoryId), - DnsIpAddrs: dnsIps, + id := conditionalForwarderCreateResourceID(directoryID, domainName) + input := &directoryservice.CreateConditionalForwarderInput{ + DirectoryId: aws.String(directoryID), + DnsIpAddrs: flex.ExpandStringValueList(d.Get("dns_ips").([]interface{})), RemoteDomainName: aws.String(domainName), - }) + } + + _, err := conn.CreateConditionalForwarder(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Directory Service Conditional Forwarder: %s", err) + return sdkdiag.AppendErrorf(diags, "creating Directory Service Conditional Forwarder (%s): %s", id, err) } - d.SetId(fmt.Sprintf("%s:%s", directoryId, domainName)) + d.SetId(id) - return diags + const ( + timeout = 1 * time.Minute + ) + _, err = tfresource.RetryWhenNotFound(ctx, timeout, func() (interface{}, error) { + return findConditionalForwarderByTwoPartKey(ctx, conn, directoryID, domainName) + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Directory Service Conditional Forwarder (%s) create: %s", d.Id(), err) + } + + return append(diags, resourceConditionalForwarderRead(ctx, d, meta)...) } func resourceConditionalForwarderRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) - directoryId, domainName, err := ParseConditionalForwarderID(d.Id()) + directoryID, domainName, err := conditionalForwarderParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Directory Service Conditional Forwarder (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - res, err := conn.DescribeConditionalForwardersWithContext(ctx, &directoryservice.DescribeConditionalForwardersInput{ - DirectoryId: aws.String(directoryId), - RemoteDomainNames: []*string{aws.String(domainName)}, - }) - - if err != nil { - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { - log.Printf("[WARN] Directory Service Conditional Forwarder (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } - return sdkdiag.AppendErrorf(diags, "reading Directory Service Conditional Forwarder (%s): %s", d.Id(), err) - } + cfd, err := findConditionalForwarderByTwoPartKey(ctx, conn, directoryID, domainName) - if len(res.ConditionalForwarders) == 0 { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Directory Service Conditional Forwarder (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - cfd := res.ConditionalForwarders[0] + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Directory Service Conditional Forwarder (%s): %s", d.Id(), err) + } - d.Set("dns_ips", flex.FlattenStringList(cfd.DnsIpAddrs)) - d.Set("directory_id", directoryId) + d.Set("directory_id", directoryID) + d.Set("dns_ips", cfd.DnsIpAddrs) d.Set("remote_domain_name", cfd.RemoteDomainName) return diags @@ -122,20 +125,20 @@ func resourceConditionalForwarderRead(ctx context.Context, d *schema.ResourceDat func resourceConditionalForwarderUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) - directoryId, domainName, err := ParseConditionalForwarderID(d.Id()) + directoryID, domainName, err := conditionalForwarderParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating Directory Service Conditional Forwarder (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - dnsIps := flex.ExpandStringList(d.Get("dns_ips").([]interface{})) - - _, err = conn.UpdateConditionalForwarderWithContext(ctx, &directoryservice.UpdateConditionalForwarderInput{ - DirectoryId: aws.String(directoryId), - DnsIpAddrs: dnsIps, + input := &directoryservice.UpdateConditionalForwarderInput{ + DirectoryId: aws.String(directoryID), + DnsIpAddrs: flex.ExpandStringValueList(d.Get("dns_ips").([]interface{})), RemoteDomainName: aws.String(domainName), - }) + } + + _, err = conn.UpdateConditionalForwarder(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Directory Service Conditional Forwarder (%s): %s", d.Id(), err) @@ -146,31 +149,85 @@ func resourceConditionalForwarderUpdate(ctx context.Context, d *schema.ResourceD func resourceConditionalForwarderDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) - directoryId, domainName, err := ParseConditionalForwarderID(d.Id()) + directoryID, domainName, err := conditionalForwarderParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Directory Service Conditional Forwarder (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - _, err = conn.DeleteConditionalForwarderWithContext(ctx, &directoryservice.DeleteConditionalForwarderInput{ - DirectoryId: aws.String(directoryId), + log.Printf("[DEBUG] Deleting Directory Conditional Forwarder: %s", d.Id()) + _, err = conn.DeleteConditionalForwarder(ctx, &directoryservice.DeleteConditionalForwarderInput{ + DirectoryId: aws.String(directoryID), RemoteDomainName: aws.String(domainName), }) - if err != nil && !tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { + return diags + } + + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Directory Service Conditional Forwarder (%s): %s", d.Id(), err) } return diags } -func ParseConditionalForwarderID(id string) (directoryId, domainName string, err error) { - parts := strings.SplitN(id, ":", 2) +const conditionalForwarderResourceIDSeparator = ":" // nosemgrep:ci.ds-in-const-name,ci.ds-in-var-name + +func conditionalForwarderCreateResourceID(directoryID, domainName string) string { + parts := []string{directoryID, domainName} + id := strings.Join(parts, conditionalForwarderResourceIDSeparator) + + return id +} + +func conditionalForwarderParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, conditionalForwarderResourceIDSeparator, 2) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected DIRECTORY_ID%[2]sDOMAIN_NAME", id, conditionalForwarderResourceIDSeparator) +} + +func findConditionalForwarder(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeConditionalForwardersInput) (*awstypes.ConditionalForwarder, error) { + output, err := findConditionalForwarders(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findConditionalForwarders(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeConditionalForwardersInput) ([]awstypes.ConditionalForwarder, error) { + output, err := conn.DescribeConditionalForwarders(ctx, input) + + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ConditionalForwarders, nil +} - if len(parts) != 2 { - return "", "", fmt.Errorf("please make sure ID is in format DIRECTORY_ID:DOMAIN_NAME") +func findConditionalForwarderByTwoPartKey(ctx context.Context, conn *directoryservice.Client, directoryID, domainName string) (*awstypes.ConditionalForwarder, error) { + input := &directoryservice.DescribeConditionalForwardersInput{ + DirectoryId: aws.String(directoryID), + RemoteDomainNames: []string{domainName}, } - return parts[0], parts[1], nil + return findConditionalForwarder(ctx, conn, input) } diff --git a/internal/service/ds/conditional_forwarder_test.go b/internal/service/ds/conditional_forwarder_test.go index 8aab85ebce6..342885661c9 100644 --- a/internal/service/ds/conditional_forwarder_test.go +++ b/internal/service/ds/conditional_forwarder_test.go @@ -8,21 +8,19 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfds "github.com/hashicorp/terraform-provider-aws/internal/service/ds" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccDSConditionalForwarder_Condition_basic(t *testing.T) { +func TestAccDSConditionalForwarder_basic(t *testing.T) { ctx := acctest.Context(t) - resourceName := "aws_directory_service_conditional_forwarder.fwd" + resourceName := "aws_directory_service_conditional_forwarder.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() ip1, ip2, ip3 := "8.8.8.8", "1.1.1.1", "8.8.4.4" @@ -33,25 +31,24 @@ func TestAccDSConditionalForwarder_Condition_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckConditionalForwarderDestroy(ctx), Steps: []resource.TestStep{ - // test create { Config: testAccConditionalForwarderConfig_basic(rName, domainName, ip1, ip2), Check: resource.ComposeTestCheckFunc( - testAccCheckConditionalForwarderExists(ctx, resourceName, - []string{ip1, ip2}, - ), + testAccCheckConditionalForwarderExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "dns_ips.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "dns_ips.0", ip1), + resource.TestCheckResourceAttr(resourceName, "dns_ips.1", ip2), ), }, - // test update { Config: testAccConditionalForwarderConfig_basic(rName, domainName, ip1, ip3), Check: resource.ComposeTestCheckFunc( - testAccCheckConditionalForwarderExists(ctx, resourceName, - []string{ip1, ip3}, - ), + testAccCheckConditionalForwarderExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "dns_ips.#", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "dns_ips.0", ip1), + resource.TestCheckResourceAttr(resourceName, "dns_ips.1", ip3), ), }, - // test import { ResourceName: resourceName, ImportState: true, @@ -61,26 +58,43 @@ func TestAccDSConditionalForwarder_Condition_basic(t *testing.T) { }) } +func TestAccDSConditionalForwarder_disappears(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_directory_service_conditional_forwarder.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domainName := acctest.RandomDomainName() + ip1, ip2 := "8.8.8.8", "1.1.1.1" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckDirectoryService(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckConditionalForwarderDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccConditionalForwarderConfig_basic(rName, domainName, ip1, ip2), + Check: resource.ComposeTestCheckFunc( + testAccCheckConditionalForwarderExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfds.ResourceConditionalForwarder(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckConditionalForwarderDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_directory_service_conditional_forwarder" { continue } - directoryId, domainName, err := tfds.ParseConditionalForwarderID(rs.Primary.ID) - if err != nil { - return err - } - - res, err := conn.DescribeConditionalForwardersWithContext(ctx, &directoryservice.DescribeConditionalForwardersInput{ - DirectoryId: aws.String(directoryId), - RemoteDomainNames: []*string{aws.String(domainName)}, - }) + _, err := tfds.FindConditionalForwarderByTwoPartKey(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.Attributes["remote_domain_name"]) - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { + if tfresource.NotFound(err) { continue } @@ -88,81 +102,43 @@ func testAccCheckConditionalForwarderDestroy(ctx context.Context) resource.TestC return err } - if len(res.ConditionalForwarders) > 0 { - return fmt.Errorf("Expected AWS Directory Service Conditional Forwarder to be gone, but was still found") - } + return fmt.Errorf("Directory Service Conditional Forwarder %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckConditionalForwarderExists(ctx context.Context, name string, dnsIps []string) resource.TestCheckFunc { +func testAccCheckConditionalForwarderExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - - directoryId, domainName, err := tfds.ParseConditionalForwarderID(rs.Primary.ID) - if err != nil { - return err + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) - res, err := conn.DescribeConditionalForwardersWithContext(ctx, &directoryservice.DescribeConditionalForwardersInput{ - DirectoryId: aws.String(directoryId), - RemoteDomainNames: []*string{aws.String(domainName)}, - }) - - if err != nil { - return err - } + _, err := tfds.FindConditionalForwarderByTwoPartKey(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.Attributes["remote_domain_name"]) - if len(res.ConditionalForwarders) == 0 { - return fmt.Errorf("No Conditional Fowrwarder found") - } - - cfd := res.ConditionalForwarders[0] - - if dnsIps != nil { - if len(dnsIps) != len(cfd.DnsIpAddrs) { - return fmt.Errorf("DnsIpAddrs length mismatch") - } - - for k, v := range cfd.DnsIpAddrs { - if *v != dnsIps[k] { - return fmt.Errorf("DnsIp mismatch, '%s' != '%s' at index '%d'", *v, dnsIps[k], k) - } - } - } - - return nil + return err } } func testAccConditionalForwarderConfig_basic(rName, domain, ip1, ip2 string) string { - return acctest.ConfigCompose( - acctest.ConfigVPCWithSubnets(rName, 2), - fmt.Sprintf(` -resource "aws_directory_service_conditional_forwarder" "fwd" { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` +resource "aws_directory_service_conditional_forwarder" "test" { directory_id = aws_directory_service_directory.test.id remote_domain_name = "test.example.com" dns_ips = [ - %[2]q, %[3]q, + %[4]q, ] } resource "aws_directory_service_directory" "test" { - name = %[1]q + name = %[2]q password = "SuperSecretPassw0rd" type = "MicrosoftAD" edition = "Standard" @@ -173,9 +149,8 @@ resource "aws_directory_service_directory" "test" { } tags = { - Name = "terraform-testacc-directory-service-conditional-forwarder" + Name = %[1]q } } -`, domain, ip1, ip2), - ) +`, rName, domain, ip1, ip2)) } diff --git a/internal/service/ds/directory.go b/internal/service/ds/directory.go index 4b08a2b7a96..2e297cf7d42 100644 --- a/internal/service/ds/directory.go +++ b/internal/service/ds/directory.go @@ -10,15 +10,16 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -34,7 +35,7 @@ const ( // @SDKResource("aws_directory_service_directory", name="Directory") // @Tags(identifierAttribute="id") -func ResourceDirectory() *schema.Resource { +func resourceDirectory() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDirectoryCreate, ReadWithoutTimeout: resourceDirectoryRead, @@ -124,11 +125,11 @@ func ResourceDirectory() *schema.Resource { Computed: true, }, "edition": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(directoryservice.DirectoryEdition_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.DirectoryEdition](), }, "enable_sso": { Type: schema.TypeBool, @@ -158,20 +159,20 @@ func ResourceDirectory() *schema.Resource { ForceNew: true, }, names.AttrSize: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(directoryservice.DirectorySize_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.DirectorySize](), }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrType: { - Type: schema.TypeString, - Optional: true, - Default: directoryservice.DirectoryTypeSimpleAd, - ForceNew: true, - ValidateFunc: validation.StringInSlice(directoryservice.DirectoryType_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DirectoryTypeSimpleAd, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.DirectoryType](), }, "vpc_settings": { Type: schema.TypeList, @@ -207,18 +208,18 @@ func ResourceDirectory() *schema.Resource { func resourceDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) name := d.Get(names.AttrName).(string) var creator directoryCreator - switch directoryType := d.Get(names.AttrType).(string); directoryType { - case directoryservice.DirectoryTypeAdconnector: + switch directoryType := awstypes.DirectoryType(d.Get(names.AttrType).(string)); directoryType { + case awstypes.DirectoryTypeAdConnector: creator = adConnectorCreator{} - case directoryservice.DirectoryTypeMicrosoftAd: + case awstypes.DirectoryTypeMicrosoftAd: creator = microsoftADCreator{} - case directoryservice.DirectoryTypeSimpleAd: + case awstypes.DirectoryTypeSimpleAd: creator = simpleADCreator{} } @@ -233,28 +234,32 @@ func resourceDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta i if _, err := waitDirectoryCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { if use, ok := errs.As[*retry.UnexpectedStateError](err); ok { - if use.State == directoryservice.DirectoryStageFailed { + if use.State == string(awstypes.DirectoryStageFailed) { tflog.Info(ctx, "retrying failed Directory creation", map[string]any{ "directory_id": d.Id(), names.AttrDomainName: name, }) - _, deleteErr := conn.DeleteDirectoryWithContext(ctx, &directoryservice.DeleteDirectoryInput{ + _, deleteErr := conn.DeleteDirectory(ctx, &directoryservice.DeleteDirectoryInput{ DirectoryId: aws.String(d.Id()), }) + if deleteErr != nil { diags = append(diags, errs.NewWarningDiagnostic( "Unable to Delete Failed Directory", fmt.Sprintf("While creating the Directory Service Directory %q, an attempt failed. Deleting the failed Directory failed: %s", name, deleteErr), )) } + return retry.RetryableError(err) } } + return retry.NonRetryableError(err) } return nil }, tfresource.WithPollInterval(1*time.Minute)) + if err != nil { return sdkdiag.AppendFromErr(diags, fmt.Errorf("creating Directory Service %s Directory (%s): %w", creator.TypeName(), name, err)) } @@ -282,9 +287,9 @@ func resourceDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) - dir, err := FindDirectoryByID(ctx, conn, d.Id()) + dir, err := findDirectoryByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Directory Service Directory (%s) not found, removing from state", d.Id()) @@ -307,15 +312,15 @@ func resourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta int } d.Set(names.AttrDescription, dir.Description) d.Set("desired_number_of_domain_controllers", dir.DesiredNumberOfDomainControllers) - if aws.StringValue(dir.Type) == directoryservice.DirectoryTypeAdconnector { - d.Set("dns_ip_addresses", aws.StringValueSlice(dir.ConnectSettings.ConnectIps)) + if dir.Type == awstypes.DirectoryTypeAdConnector { + d.Set("dns_ip_addresses", dir.ConnectSettings.ConnectIps) } else { - d.Set("dns_ip_addresses", aws.StringValueSlice(dir.DnsIpAddrs)) + d.Set("dns_ip_addresses", dir.DnsIpAddrs) } d.Set("edition", dir.Edition) d.Set("enable_sso", dir.SsoEnabled) d.Set(names.AttrName, dir.Name) - if aws.StringValue(dir.Type) == directoryservice.DirectoryTypeAdconnector { + if dir.Type == awstypes.DirectoryTypeAdConnector { d.Set("security_group_id", dir.ConnectSettings.SecurityGroupId) } else { d.Set("security_group_id", dir.VpcSettings.SecurityGroupId) @@ -336,7 +341,7 @@ func resourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta int func resourceDirectoryUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) if d.HasChange("desired_number_of_domain_controllers") { if err := updateNumberOfDomainControllers(ctx, conn, d.Id(), d.Get("desired_number_of_domain_controllers").(int), d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -361,16 +366,16 @@ func resourceDirectoryUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceDirectoryDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) log.Printf("[DEBUG] Deleting Directory Service Directory: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, directoryApplicationDeauthorizedPropagationTimeout, func() (interface{}, error) { - return conn.DeleteDirectoryWithContext(ctx, &directoryservice.DeleteDirectoryInput{ + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ClientException](ctx, directoryApplicationDeauthorizedPropagationTimeout, func() (interface{}, error) { + return conn.DeleteDirectory(ctx, &directoryservice.DeleteDirectoryInput{ DirectoryId: aws.String(d.Id()), }) - }, directoryservice.ErrCodeClientException, "authorized applications") + }, "authorized applications") - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { return diags } @@ -387,7 +392,7 @@ func resourceDirectoryDelete(ctx context.Context, d *schema.ResourceData, meta i type directoryCreator interface { TypeName() string - Create(ctx context.Context, conn *directoryservice.DirectoryService, name string, d *schema.ResourceData) error + Create(ctx context.Context, conn *directoryservice.Client, name string, d *schema.ResourceData) error } type adConnectorCreator struct{} @@ -396,7 +401,7 @@ func (c adConnectorCreator) TypeName() string { return "AD Connector" } -func (c adConnectorCreator) Create(ctx context.Context, conn *directoryservice.DirectoryService, name string, d *schema.ResourceData) error { +func (c adConnectorCreator) Create(ctx context.Context, conn *directoryservice.Client, name string, d *schema.ResourceData) error { input := &directoryservice.ConnectDirectoryInput{ Name: aws.String(name), Password: aws.String(d.Get(names.AttrPassword).(string)), @@ -412,23 +417,23 @@ func (c adConnectorCreator) Create(ctx context.Context, conn *directoryservice.D } if v, ok := d.GetOk(names.AttrSize); ok { - input.Size = aws.String(v.(string)) + input.Size = awstypes.DirectorySize(v.(string)) } else { // Matching previous behavior of Default: "Large" for Size attribute. - input.Size = aws.String(directoryservice.DirectorySizeLarge) + input.Size = awstypes.DirectorySizeLarge } if v, ok := d.GetOk("short_name"); ok { input.ShortName = aws.String(v.(string)) } - output, err := conn.ConnectDirectoryWithContext(ctx, input) + output, err := conn.ConnectDirectory(ctx, input) if err != nil { return err } - d.SetId(aws.StringValue(output.DirectoryId)) + d.SetId(aws.ToString(output.DirectoryId)) return nil } @@ -439,7 +444,7 @@ func (c microsoftADCreator) TypeName() string { return "Microsoft AD" } -func (c microsoftADCreator) Create(ctx context.Context, conn *directoryservice.DirectoryService, name string, d *schema.ResourceData) error { +func (c microsoftADCreator) Create(ctx context.Context, conn *directoryservice.Client, name string, d *schema.ResourceData) error { input := &directoryservice.CreateMicrosoftADInput{ Name: aws.String(name), Password: aws.String(d.Get(names.AttrPassword).(string)), @@ -451,7 +456,7 @@ func (c microsoftADCreator) Create(ctx context.Context, conn *directoryservice.D } if v, ok := d.GetOk("edition"); ok { - input.Edition = aws.String(v.(string)) + input.Edition = awstypes.DirectoryEdition(v.(string)) } if v, ok := d.GetOk("short_name"); ok { @@ -462,13 +467,13 @@ func (c microsoftADCreator) Create(ctx context.Context, conn *directoryservice.D input.VpcSettings = expandDirectoryVpcSettings(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.CreateMicrosoftADWithContext(ctx, input) + output, err := conn.CreateMicrosoftAD(ctx, input) if err != nil { return err } - d.SetId(aws.StringValue(output.DirectoryId)) + d.SetId(aws.ToString(output.DirectoryId)) return nil } @@ -479,7 +484,7 @@ func (c simpleADCreator) TypeName() string { return "Simple AD" } -func (c simpleADCreator) Create(ctx context.Context, conn *directoryservice.DirectoryService, name string, d *schema.ResourceData) error { +func (c simpleADCreator) Create(ctx context.Context, conn *directoryservice.Client, name string, d *schema.ResourceData) error { input := &directoryservice.CreateDirectoryInput{ Name: aws.String(name), Password: aws.String(d.Get(names.AttrPassword).(string)), @@ -491,10 +496,10 @@ func (c simpleADCreator) Create(ctx context.Context, conn *directoryservice.Dire } if v, ok := d.GetOk(names.AttrSize); ok { - input.Size = aws.String(v.(string)) + input.Size = awstypes.DirectorySize(v.(string)) } else { // Matching previous behavior of Default: "Large" for Size attribute. - input.Size = aws.String(directoryservice.DirectorySizeLarge) + input.Size = awstypes.DirectorySizeLarge } if v, ok := d.GetOk("short_name"); ok { @@ -505,24 +510,24 @@ func (c simpleADCreator) Create(ctx context.Context, conn *directoryservice.Dire input.VpcSettings = expandDirectoryVpcSettings(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.CreateDirectoryWithContext(ctx, input) + output, err := conn.CreateDirectory(ctx, input) if err != nil { return err } - d.SetId(aws.StringValue(output.DirectoryId)) + d.SetId(aws.ToString(output.DirectoryId)) return nil } -func createAlias(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, alias string) error { +func createAlias(ctx context.Context, conn *directoryservice.Client, directoryID, alias string) error { input := &directoryservice.CreateAliasInput{ Alias: aws.String(alias), DirectoryId: aws.String(directoryID), } - _, err := conn.CreateAliasWithContext(ctx, input) + _, err := conn.CreateAlias(ctx, input) if err != nil { return fmt.Errorf("creating Directory Service Directory (%s) alias (%s): %w", directoryID, alias, err) @@ -531,12 +536,12 @@ func createAlias(ctx context.Context, conn *directoryservice.DirectoryService, d return nil } -func disableSSO(ctx context.Context, conn *directoryservice.DirectoryService, directoryID string) error { +func disableSSO(ctx context.Context, conn *directoryservice.Client, directoryID string) error { input := &directoryservice.DisableSsoInput{ DirectoryId: aws.String(directoryID), } - _, err := conn.DisableSsoWithContext(ctx, input) + _, err := conn.DisableSso(ctx, input) if err != nil { return fmt.Errorf("disabling Directory Service Directory (%s) SSO: %w", directoryID, err) @@ -545,12 +550,12 @@ func disableSSO(ctx context.Context, conn *directoryservice.DirectoryService, di return nil } -func enableSSO(ctx context.Context, conn *directoryservice.DirectoryService, directoryID string) error { +func enableSSO(ctx context.Context, conn *directoryservice.Client, directoryID string) error { input := &directoryservice.EnableSsoInput{ DirectoryId: aws.String(directoryID), } - _, err := conn.EnableSsoWithContext(ctx, input) + _, err := conn.EnableSso(ctx, input) if err != nil { return fmt.Errorf("enabling Directory Service Directory (%s) SSO: %w", directoryID, err) @@ -559,29 +564,29 @@ func enableSSO(ctx context.Context, conn *directoryservice.DirectoryService, dir return nil } -func updateNumberOfDomainControllers(ctx context.Context, conn *directoryservice.DirectoryService, directoryID string, desiredNumber int, timeout time.Duration) error { - oldDomainControllers, err := FindDomainControllers(ctx, conn, &directoryservice.DescribeDomainControllersInput{ +func updateNumberOfDomainControllers(ctx context.Context, conn *directoryservice.Client, directoryID string, desiredNumber int, timeout time.Duration, optFns ...func(*directoryservice.Options)) error { + oldDomainControllers, err := findDomainControllers(ctx, conn, &directoryservice.DescribeDomainControllersInput{ DirectoryId: aws.String(directoryID), - }) + }, optFns...) if err != nil { return fmt.Errorf("reading Directory Service Directory (%s) domain controllers: %w", directoryID, err) } input := &directoryservice.UpdateNumberOfDomainControllersInput{ - DesiredNumber: aws.Int64(int64(desiredNumber)), + DesiredNumber: aws.Int32(int32(desiredNumber)), DirectoryId: aws.String(directoryID), } - _, err = conn.UpdateNumberOfDomainControllersWithContext(ctx, input) + _, err = conn.UpdateNumberOfDomainControllers(ctx, input, optFns...) if err != nil { return fmt.Errorf("updating Directory Service Directory (%s) number of domain controllers (%d): %w", directoryID, desiredNumber, err) } - newDomainControllers, err := FindDomainControllers(ctx, conn, &directoryservice.DescribeDomainControllersInput{ + newDomainControllers, err := findDomainControllers(ctx, conn, &directoryservice.DescribeDomainControllersInput{ DirectoryId: aws.String(directoryID), - }) + }, optFns...) if err != nil { return fmt.Errorf("reading Directory Service Directory (%s) domain controllers: %w", directoryID, err) @@ -590,14 +595,14 @@ func updateNumberOfDomainControllers(ctx context.Context, conn *directoryservice var wait []string for _, v := range newDomainControllers { - domainControllerID := aws.StringValue(v.DomainControllerId) + domainControllerID := aws.ToString(v.DomainControllerId) isNew := true for _, v := range oldDomainControllers { - if aws.StringValue(v.DomainControllerId) == domainControllerID { + if aws.ToString(v.DomainControllerId) == domainControllerID { isNew = false - if aws.StringValue(v.Status) != directoryservice.DomainControllerStatusActive { + if v.Status != awstypes.DomainControllerStatusActive { wait = append(wait, domainControllerID) } } @@ -610,11 +615,11 @@ func updateNumberOfDomainControllers(ctx context.Context, conn *directoryservice for _, v := range wait { if len(newDomainControllers) > len(oldDomainControllers) { - if _, err = waitDomainControllerCreated(ctx, conn, directoryID, v, timeout); err != nil { + if _, err = waitDomainControllerCreated(ctx, conn, directoryID, v, timeout, optFns...); err != nil { return fmt.Errorf("waiting for Directory Service Directory (%s) Domain Controller (%s) create: %w", directoryID, v, err) } } else { - if _, err := waitDomainControllerDeleted(ctx, conn, directoryID, v, timeout); err != nil { + if _, err := waitDomainControllerDeleted(ctx, conn, directoryID, v, timeout, optFns...); err != nil { return fmt.Errorf("waiting for Directory Service Directory (%s) Domain Controller (%s) delete: %w", directoryID, v, err) } } @@ -623,15 +628,248 @@ func updateNumberOfDomainControllers(ctx context.Context, conn *directoryservice return nil } -func expandDirectoryConnectSettings(tfMap map[string]interface{}) *directoryservice.DirectoryConnectSettings { +func findDirectory(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeDirectoriesInput) (*awstypes.DirectoryDescription, error) { + output, err := findDirectories(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findDirectories(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeDirectoriesInput) ([]awstypes.DirectoryDescription, error) { + var output []awstypes.DirectoryDescription + + pages := directoryservice.NewDescribeDirectoriesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.DirectoryDescriptions...) + } + + return output, nil +} + +func findDirectoryByID(ctx context.Context, conn *directoryservice.Client, id string) (*awstypes.DirectoryDescription, error) { + input := &directoryservice.DescribeDirectoriesInput{ + DirectoryIds: []string{id}, + } + + output, err := findDirectory(ctx, conn, input) + + if err != nil { + return nil, err + } + + if stage := output.Stage; stage == awstypes.DirectoryStageDeleted { + return nil, &retry.NotFoundError{ + Message: string(stage), + LastRequest: input, + } + } + + return output, nil +} + +func statusDirectoryStage(ctx context.Context, conn *directoryservice.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDirectoryByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Stage), nil + } +} + +func waitDirectoryCreated(ctx context.Context, conn *directoryservice.Client, id string, timeout time.Duration) (*awstypes.DirectoryDescription, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DirectoryStageRequested, awstypes.DirectoryStageCreating, awstypes.DirectoryStageCreated), + Target: enum.Slice(awstypes.DirectoryStageActive), + Refresh: statusDirectoryStage(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + // Wrap any error returned with waiting message + defer func() { + if err != nil { + err = fmt.Errorf("waiting for completion: %w", err) + } + }() + + if output, ok := outputRaw.(*awstypes.DirectoryDescription); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StageReason))) + + return output, err + } + + return nil, err +} + +func waitDirectoryDeleted(ctx context.Context, conn *directoryservice.Client, id string, timeout time.Duration) (*awstypes.DirectoryDescription, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DirectoryStageActive, awstypes.DirectoryStageDeleting), + Target: []string{}, + Refresh: statusDirectoryStage(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + // Wrap any error returned with waiting message + defer func() { + if err != nil { + err = fmt.Errorf("waiting for completion: %w", err) + } + }() + + if output, ok := outputRaw.(*awstypes.DirectoryDescription); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StageReason))) + + return output, err + } + + return nil, err +} + +func findDomainController(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeDomainControllersInput, optFns ...func(*directoryservice.Options)) (*awstypes.DomainController, error) { + output, err := findDomainControllers(ctx, conn, input, optFns...) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findDomainControllers(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeDomainControllersInput, optFns ...func(*directoryservice.Options)) ([]awstypes.DomainController, error) { + var output []awstypes.DomainController + + pages := directoryservice.NewDescribeDomainControllersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx, optFns...) + + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.DomainControllers...) + } + + return output, nil +} + +func findDomainControllerByTwoPartKey(ctx context.Context, conn *directoryservice.Client, directoryID, domainControllerID string, optFns ...func(*directoryservice.Options)) (*awstypes.DomainController, error) { + input := &directoryservice.DescribeDomainControllersInput{ + DirectoryId: aws.String(directoryID), + DomainControllerIds: []string{domainControllerID}, + } + + output, err := findDomainController(ctx, conn, input, optFns...) + + if err != nil { + return nil, err + } + + if status := output.Status; status == awstypes.DomainControllerStatusDeleted { + return nil, &retry.NotFoundError{ + Message: string(status), + LastRequest: input, + } + } + + return output, nil +} + +func statusDomainController(ctx context.Context, conn *directoryservice.Client, directoryID, domainControllerID string, optFns ...func(*directoryservice.Options)) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDomainControllerByTwoPartKey(ctx, conn, directoryID, domainControllerID, optFns...) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitDomainControllerCreated(ctx context.Context, conn *directoryservice.Client, directoryID, domainControllerID string, timeout time.Duration, optFns ...func(*directoryservice.Options)) (*awstypes.DomainController, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DomainControllerStatusCreating), + Target: enum.Slice(awstypes.DomainControllerStatusActive), + Refresh: statusDomainController(ctx, conn, directoryID, domainControllerID, optFns...), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.DomainController); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusReason))) + + return output, err + } + + return nil, err +} + +func waitDomainControllerDeleted(ctx context.Context, conn *directoryservice.Client, directoryID, domainControllerID string, timeout time.Duration, optFns ...func(*directoryservice.Options)) (*awstypes.DomainController, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DomainControllerStatusDeleting), + Target: []string{}, + Refresh: statusDomainController(ctx, conn, directoryID, domainControllerID, optFns...), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.DomainController); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusReason))) + + return output, err + } + + return nil, err +} + +func expandDirectoryConnectSettings(tfMap map[string]interface{}) *awstypes.DirectoryConnectSettings { if tfMap == nil { return nil } - apiObject := &directoryservice.DirectoryConnectSettings{} + apiObject := &awstypes.DirectoryConnectSettings{} if v, ok := tfMap["customer_dns_ips"].(*schema.Set); ok && v.Len() > 0 { - apiObject.CustomerDnsIps = flex.ExpandStringSet(v) + apiObject.CustomerDnsIps = flex.ExpandStringValueSet(v) } if v, ok := tfMap["customer_username"].(string); ok && v != "" { @@ -639,7 +877,7 @@ func expandDirectoryConnectSettings(tfMap map[string]interface{}) *directoryserv } if v, ok := tfMap[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { - apiObject.SubnetIds = flex.ExpandStringSet(v) + apiObject.SubnetIds = flex.ExpandStringValueSet(v) } if v, ok := tfMap[names.AttrVPCID].(string); ok && v != "" { @@ -649,7 +887,7 @@ func expandDirectoryConnectSettings(tfMap map[string]interface{}) *directoryserv return apiObject } -func flattenDirectoryConnectSettingsDescription(apiObject *directoryservice.DirectoryConnectSettingsDescription, dnsIpAddrs []*string) map[string]interface{} { +func flattenDirectoryConnectSettingsDescription(apiObject *awstypes.DirectoryConnectSettingsDescription, dnsIpAddrs []string) map[string]interface{} { if apiObject == nil { return nil } @@ -657,41 +895,41 @@ func flattenDirectoryConnectSettingsDescription(apiObject *directoryservice.Dire tfMap := map[string]interface{}{} if v := apiObject.AvailabilityZones; v != nil { - tfMap[names.AttrAvailabilityZones] = aws.StringValueSlice(v) + tfMap[names.AttrAvailabilityZones] = v } if v := apiObject.ConnectIps; v != nil { - tfMap["connect_ips"] = aws.StringValueSlice(v) + tfMap["connect_ips"] = v } if dnsIpAddrs != nil { - tfMap["customer_dns_ips"] = aws.StringValueSlice(dnsIpAddrs) + tfMap["customer_dns_ips"] = dnsIpAddrs } if v := apiObject.CustomerUserName; v != nil { - tfMap["customer_username"] = aws.StringValue(v) + tfMap["customer_username"] = aws.ToString(v) } if v := apiObject.SubnetIds; v != nil { - tfMap[names.AttrSubnetIDs] = aws.StringValueSlice(v) + tfMap[names.AttrSubnetIDs] = v } if v := apiObject.VpcId; v != nil { - tfMap[names.AttrVPCID] = aws.StringValue(v) + tfMap[names.AttrVPCID] = aws.ToString(v) } return tfMap } -func expandDirectoryVpcSettings(tfMap map[string]interface{}) *directoryservice.DirectoryVpcSettings { // nosemgrep:ci.caps5-in-func-name +func expandDirectoryVpcSettings(tfMap map[string]interface{}) *awstypes.DirectoryVpcSettings { // nosemgrep:ci.caps5-in-func-name if tfMap == nil { return nil } - apiObject := &directoryservice.DirectoryVpcSettings{} + apiObject := &awstypes.DirectoryVpcSettings{} if v, ok := tfMap[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { - apiObject.SubnetIds = flex.ExpandStringSet(v) + apiObject.SubnetIds = flex.ExpandStringValueSet(v) } if v, ok := tfMap[names.AttrVPCID].(string); ok && v != "" { @@ -701,7 +939,7 @@ func expandDirectoryVpcSettings(tfMap map[string]interface{}) *directoryservice. return apiObject } -func flattenDirectoryVpcSettings(apiObject *directoryservice.DirectoryVpcSettings) map[string]interface{} { // nosemgrep:ci.caps5-in-func-name +func flattenDirectoryVpcSettings(apiObject *awstypes.DirectoryVpcSettings) map[string]interface{} { // nosemgrep:ci.caps5-in-func-name if apiObject == nil { return nil } @@ -709,17 +947,17 @@ func flattenDirectoryVpcSettings(apiObject *directoryservice.DirectoryVpcSetting tfMap := map[string]interface{}{} if v := apiObject.SubnetIds; v != nil { - tfMap[names.AttrSubnetIDs] = aws.StringValueSlice(v) + tfMap[names.AttrSubnetIDs] = v } if v := apiObject.VpcId; v != nil { - tfMap[names.AttrVPCID] = aws.StringValue(v) + tfMap[names.AttrVPCID] = aws.ToString(v) } return tfMap } -func flattenDirectoryVpcSettingsDescription(apiObject *directoryservice.DirectoryVpcSettingsDescription) map[string]interface{} { // nosemgrep:ci.caps5-in-func-name +func flattenDirectoryVpcSettingsDescription(apiObject *awstypes.DirectoryVpcSettingsDescription) map[string]interface{} { // nosemgrep:ci.caps5-in-func-name if apiObject == nil { return nil } @@ -727,68 +965,16 @@ func flattenDirectoryVpcSettingsDescription(apiObject *directoryservice.Director tfMap := map[string]interface{}{} if v := apiObject.AvailabilityZones; v != nil { - tfMap[names.AttrAvailabilityZones] = aws.StringValueSlice(v) + tfMap[names.AttrAvailabilityZones] = v } if v := apiObject.SubnetIds; v != nil { - tfMap[names.AttrSubnetIDs] = aws.StringValueSlice(v) + tfMap[names.AttrSubnetIDs] = v } if v := apiObject.VpcId; v != nil { - tfMap[names.AttrVPCID] = aws.StringValue(v) + tfMap[names.AttrVPCID] = aws.ToString(v) } return tfMap } - -func waitDirectoryCreated(ctx context.Context, conn *directoryservice.DirectoryService, id string, timeout time.Duration) (*directoryservice.DirectoryDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{directoryservice.DirectoryStageRequested, directoryservice.DirectoryStageCreating, directoryservice.DirectoryStageCreated}, - Target: []string{directoryservice.DirectoryStageActive}, - Refresh: statusDirectoryStage(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - // Wrap any error returned with waiting message - defer func() { - if err != nil { - err = fmt.Errorf("waiting for completion: %w", err) - } - }() - - if output, ok := outputRaw.(*directoryservice.DirectoryDescription); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StageReason))) - - return output, err - } - - return nil, err -} - -func waitDirectoryDeleted(ctx context.Context, conn *directoryservice.DirectoryService, id string, timeout time.Duration) (*directoryservice.DirectoryDescription, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{directoryservice.DirectoryStageActive, directoryservice.DirectoryStageDeleting}, - Target: []string{}, - Refresh: statusDirectoryStage(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - // Wrap any error returned with waiting message - defer func() { - if err != nil { - err = fmt.Errorf("waiting for completion: %w", err) - } - }() - - if output, ok := outputRaw.(*directoryservice.DirectoryDescription); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StageReason))) - - return output, err - } - - return nil, err -} diff --git a/internal/service/ds/directory_data_source.go b/internal/service/ds/directory_data_source.go index bffe2b53bef..22696a633f8 100644 --- a/internal/service/ds/directory_data_source.go +++ b/internal/service/ds/directory_data_source.go @@ -6,8 +6,8 @@ package ds import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -17,8 +17,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_directory_service_directory") -func DataSourceDirectory() *schema.Resource { +// @SDKDataSource("aws_directory_service_directory", name="Directory") +func dataSourceDirectory() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceDirectoryRead, @@ -174,16 +174,16 @@ func DataSourceDirectory() *schema.Resource { func dataSourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - dir, err := FindDirectoryByID(ctx, conn, d.Get("directory_id").(string)) + dir, err := findDirectoryByID(ctx, conn, d.Get("directory_id").(string)) if err != nil { return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("Directory Service Directory", err)) } - d.SetId(aws.StringValue(dir.DirectoryId)) + d.SetId(aws.ToString(dir.DirectoryId)) d.Set("access_url", dir.AccessUrl) d.Set(names.AttrAlias, dir.Alias) if dir.ConnectSettings != nil { @@ -194,12 +194,12 @@ func dataSourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("connect_settings", nil) } d.Set(names.AttrDescription, dir.Description) - if aws.StringValue(dir.Type) == directoryservice.DirectoryTypeAdconnector { - d.Set("dns_ip_addresses", aws.StringValueSlice(dir.ConnectSettings.ConnectIps)) - } else if aws.StringValue(dir.Type) == directoryservice.DirectoryTypeSharedMicrosoftAd { - d.Set("dns_ip_addresses", aws.StringValueSlice(dir.OwnerDirectoryDescription.DnsIpAddrs)) + if dir.Type == awstypes.DirectoryTypeAdConnector { + d.Set("dns_ip_addresses", dir.ConnectSettings.ConnectIps) + } else if dir.Type == awstypes.DirectoryTypeSharedMicrosoftAd { + d.Set("dns_ip_addresses", dir.OwnerDirectoryDescription.DnsIpAddrs) } else { - d.Set("dns_ip_addresses", aws.StringValueSlice(dir.DnsIpAddrs)) + d.Set("dns_ip_addresses", dir.DnsIpAddrs) } d.Set("edition", dir.Edition) d.Set("enable_sso", dir.SsoEnabled) @@ -211,7 +211,7 @@ func dataSourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta i } else { d.Set("radius_settings", nil) } - if aws.StringValue(dir.Type) == directoryservice.DirectoryTypeAdconnector { + if dir.Type == awstypes.DirectoryTypeAdConnector { d.Set("security_group_id", dir.ConnectSettings.SecurityGroupId) } else if dir.VpcSettings != nil { d.Set("security_group_id", dir.VpcSettings.SecurityGroupId) @@ -242,39 +242,31 @@ func dataSourceDirectoryRead(ctx context.Context, d *schema.ResourceData, meta i return diags } -func flattenRadiusSettings(apiObject *directoryservice.RadiusSettings) map[string]interface{} { +func flattenRadiusSettings(apiObject *awstypes.RadiusSettings) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.AuthenticationProtocol; v != nil { - tfMap["authentication_protocol"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "authentication_protocol": apiObject.AuthenticationProtocol, + "radius_retries": apiObject.RadiusRetries, + "use_same_username": apiObject.UseSameUsername, } if v := apiObject.DisplayLabel; v != nil { - tfMap["display_label"] = aws.StringValue(v) + tfMap["display_label"] = aws.ToString(v) } if v := apiObject.RadiusPort; v != nil { - tfMap["radius_port"] = aws.Int64Value(v) - } - - if v := apiObject.RadiusRetries; v != nil { - tfMap["radius_retries"] = aws.Int64Value(v) + tfMap["radius_port"] = aws.ToInt32(v) } if v := apiObject.RadiusServers; v != nil { - tfMap["radius_servers"] = aws.StringValueSlice(v) + tfMap["radius_servers"] = v } if v := apiObject.RadiusTimeout; v != nil { - tfMap["radius_timeout"] = aws.Int64Value(v) - } - - if v := apiObject.UseSameUsername; v != nil { - tfMap["use_same_username"] = aws.BoolValue(v) + tfMap["radius_timeout"] = aws.ToInt32(v) } return tfMap diff --git a/internal/service/ds/directory_test.go b/internal/service/ds/directory_test.go index ad3b5b8cef3..68807421bbd 100644 --- a/internal/service/ds/directory_test.go +++ b/internal/service/ds/directory_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccDSDirectory_basic(t *testing.T) { ctx := acctest.Context(t) - var ds directoryservice.DirectoryDescription + var ds awstypes.DirectoryDescription resourceName := "aws_directory_service_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -73,7 +73,7 @@ func TestAccDSDirectory_basic(t *testing.T) { func TestAccDSDirectory_disappears(t *testing.T) { ctx := acctest.Context(t) - var ds directoryservice.DirectoryDescription + var ds awstypes.DirectoryDescription resourceName := "aws_directory_service_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -102,7 +102,7 @@ func TestAccDSDirectory_disappears(t *testing.T) { func TestAccDSDirectory_tags(t *testing.T) { ctx := acctest.Context(t) - var ds directoryservice.DirectoryDescription + var ds awstypes.DirectoryDescription resourceName := "aws_directory_service_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -156,7 +156,7 @@ func TestAccDSDirectory_tags(t *testing.T) { func TestAccDSDirectory_microsoft(t *testing.T) { ctx := acctest.Context(t) - var ds directoryservice.DirectoryDescription + var ds awstypes.DirectoryDescription resourceName := "aws_directory_service_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -204,7 +204,7 @@ func TestAccDSDirectory_microsoft(t *testing.T) { func TestAccDSDirectory_microsoftStandard(t *testing.T) { ctx := acctest.Context(t) - var ds directoryservice.DirectoryDescription + var ds awstypes.DirectoryDescription resourceName := "aws_directory_service_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -252,7 +252,7 @@ func TestAccDSDirectory_microsoftStandard(t *testing.T) { func TestAccDSDirectory_connector(t *testing.T) { ctx := acctest.Context(t) - var ds directoryservice.DirectoryDescription + var ds awstypes.DirectoryDescription resourceName := "aws_directory_service_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -306,7 +306,7 @@ func TestAccDSDirectory_connector(t *testing.T) { func TestAccDSDirectory_withAliasAndSSO(t *testing.T) { ctx := acctest.Context(t) - var ds directoryservice.DirectoryDescription + var ds awstypes.DirectoryDescription alias := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_directory_service_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -373,7 +373,7 @@ func TestAccDSDirectory_withAliasAndSSO(t *testing.T) { func TestAccDSDirectory_desiredNumberOfDomainControllers(t *testing.T) { ctx := acctest.Context(t) - var ds directoryservice.DirectoryDescription + var ds awstypes.DirectoryDescription resourceName := "aws_directory_service_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -435,7 +435,7 @@ func TestAccDSDirectory_desiredNumberOfDomainControllers(t *testing.T) { func testAccCheckDirectoryDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_directory_service_directory" { @@ -459,18 +459,14 @@ func testAccCheckDirectoryDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckDirectoryExists(ctx context.Context, n string, v *directoryservice.DirectoryDescription) resource.TestCheckFunc { +func testAccCheckDirectoryExists(ctx context.Context, n string, v *awstypes.DirectoryDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Directory Service Directory ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) output, err := tfds.FindDirectoryByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/ds/exports_test.go b/internal/service/ds/exports_test.go index 93491dd5851..c18f06d5de7 100644 --- a/internal/service/ds/exports_test.go +++ b/internal/service/ds/exports_test.go @@ -5,7 +5,20 @@ package ds // Exports for use in tests only. var ( - DirectoryIDValidator = directoryIDValidator - DomainWithTrailingDotValidator = domainWithTrailingDotValidator - TrustPasswordValidator = trustPasswordValidator + ResourceConditionalForwarder = resourceConditionalForwarder + ResourceDirectory = resourceDirectory + ResourceLogSubscription = resourceLogSubscription + ResourceRadiusSettings = resourceRadiusSettings + ResourceRegion = resourceRegion + ResourceSharedDirectory = resourceSharedDirectory + ResourceSharedDirectoryAccepter = resourceSharedDirectoryAccepter + ResourceTrust = newTrustResource + + FindConditionalForwarderByTwoPartKey = findConditionalForwarderByTwoPartKey + FindDirectoryByID = findDirectoryByID + FindLogSubscriptionByID = findLogSubscriptionByID + FindRadiusSettingsByID = findRadiusSettingsByID + FindRegionByTwoPartKey = findRegionByTwoPartKey + FindSharedDirectoryByTwoPartKey = findSharedDirectoryByTwoPartKey // nosemgrep:ci.ds-in-var-name + FindTrustByTwoPartKey = findTrustByTwoPartKey ) diff --git a/internal/service/ds/find.go b/internal/service/ds/find.go deleted file mode 100644 index 2073bc00d6f..00000000000 --- a/internal/service/ds/find.go +++ /dev/null @@ -1,247 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package ds - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindDirectoryByID(ctx context.Context, conn *directoryservice.DirectoryService, id string) (*directoryservice.DirectoryDescription, error) { - input := &directoryservice.DescribeDirectoriesInput{ - DirectoryIds: aws.StringSlice([]string{id}), - } - var output []*directoryservice.DirectoryDescription - - err := describeDirectoriesPages(ctx, conn, input, func(page *directoryservice.DescribeDirectoriesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.DirectoryDescriptions { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(output) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - directory := output[0] - - if stage := aws.StringValue(directory.Stage); stage == directoryservice.DirectoryStageDeleted { - return nil, &retry.NotFoundError{ - Message: stage, - LastRequest: input, - } - } - - return directory, nil -} - -func FindDomainController(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, domainControllerID string) (*directoryservice.DomainController, error) { - input := &directoryservice.DescribeDomainControllersInput{ - DirectoryId: aws.String(directoryID), - DomainControllerIds: aws.StringSlice([]string{domainControllerID}), - } - - output, err := FindDomainControllers(ctx, conn, input) - - if err != nil { - return nil, err - } - - if len(output) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - domainController := output[0] - - if status := aws.StringValue(domainController.Status); status == directoryservice.DomainControllerStatusDeleted { - return nil, &retry.NotFoundError{ - Message: status, - LastRequest: input, - } - } - - return domainController, nil -} - -func FindDomainControllers(ctx context.Context, conn *directoryservice.DirectoryService, input *directoryservice.DescribeDomainControllersInput) ([]*directoryservice.DomainController, error) { - var output []*directoryservice.DomainController - - err := conn.DescribeDomainControllersPagesWithContext(ctx, input, func(page *directoryservice.DescribeDomainControllersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.DomainControllers { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - return output, nil -} - -func FindRadiusSettings(ctx context.Context, conn *directoryservice.DirectoryService, directoryID string) (*directoryservice.RadiusSettings, error) { - output, err := FindDirectoryByID(ctx, conn, directoryID) - - if err != nil { - return nil, err - } - - if output.RadiusSettings == nil { - return nil, tfresource.NewEmptyResultError(directoryID) - } - - return output.RadiusSettings, nil -} - -func FindRegion(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, regionName string) (*directoryservice.RegionDescription, error) { - input := &directoryservice.DescribeRegionsInput{ - DirectoryId: aws.String(directoryID), - RegionName: aws.String(regionName), - } - var output []*directoryservice.RegionDescription - - err := describeRegionsPages(ctx, conn, input, func(page *directoryservice.DescribeRegionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.RegionsDescription { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeDirectoryDoesNotExistException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(output) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - region := output[0] - - if status := aws.StringValue(region.Status); status == directoryservice.DirectoryStageDeleted { - return nil, &retry.NotFoundError{ - Message: status, - LastRequest: input, - } - } - - return region, nil -} - -func FindSharedDirectory(ctx context.Context, conn *directoryservice.DirectoryService, ownerDirectoryID, sharedDirectoryID string) (*directoryservice.SharedDirectory, error) { // nosemgrep:ci.ds-in-func-name - input := &directoryservice.DescribeSharedDirectoriesInput{ - OwnerDirectoryId: aws.String(ownerDirectoryID), - SharedDirectoryIds: aws.StringSlice([]string{sharedDirectoryID}), - } - - var output []*directoryservice.SharedDirectory - - err := conn.DescribeSharedDirectoriesPagesWithContext(ctx, input, func(page *directoryservice.DescribeSharedDirectoriesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.SharedDirectories { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(output) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - sharedDirectory := output[0] - - if status := aws.StringValue(sharedDirectory.ShareStatus); status == directoryservice.ShareStatusDeleted { - return nil, &retry.NotFoundError{ - Message: status, - LastRequest: input, - } - } - - return sharedDirectory, nil -} diff --git a/internal/service/ds/generate.go b/internal/service/ds/generate.go index 2b4a67038d3..73b7e66bedd 100644 --- a/internal/service/ds/generate.go +++ b/internal/service/ds/generate.go @@ -1,8 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/listpages/main.go -ListOps=DescribeDirectories,DescribeRegions -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsInIDElem=ResourceId -ServiceTagsSlice -TagOp=AddTagsToResource -TagInIDElem=ResourceId -UntagOp=RemoveTagsFromResource -UpdateTags -CreateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ListTagsInIDElem=ResourceId -ServiceTagsSlice -TagOp=AddTagsToResource -TagInIDElem=ResourceId -UntagOp=RemoveTagsFromResource -UpdateTags -CreateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/ds/list_pages_gen.go b/internal/service/ds/list_pages_gen.go deleted file mode 100644 index 9527ffce4c9..00000000000 --- a/internal/service/ds/list_pages_gen.go +++ /dev/null @@ -1,44 +0,0 @@ -// Code generated by "internal/generate/listpages/main.go -ListOps=DescribeDirectories,DescribeRegions"; DO NOT EDIT. - -package ds - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface" -) - -func describeDirectoriesPages(ctx context.Context, conn directoryserviceiface.DirectoryServiceAPI, input *directoryservice.DescribeDirectoriesInput, fn func(*directoryservice.DescribeDirectoriesOutput, bool) bool) error { - for { - output, err := conn.DescribeDirectoriesWithContext(ctx, input) - if err != nil { - return err - } - - lastPage := aws.StringValue(output.NextToken) == "" - if !fn(output, lastPage) || lastPage { - break - } - - input.NextToken = output.NextToken - } - return nil -} -func describeRegionsPages(ctx context.Context, conn directoryserviceiface.DirectoryServiceAPI, input *directoryservice.DescribeRegionsInput, fn func(*directoryservice.DescribeRegionsOutput, bool) bool) error { - for { - output, err := conn.DescribeRegionsWithContext(ctx, input) - if err != nil { - return err - } - - lastPage := aws.StringValue(output.NextToken) == "" - if !fn(output, lastPage) || lastPage { - break - } - - input.NextToken = output.NextToken - } - return nil -} diff --git a/internal/service/ds/log_subscription.go b/internal/service/ds/log_subscription.go index d2ef189b6bf..3d0cdf2453f 100644 --- a/internal/service/ds/log_subscription.go +++ b/internal/service/ds/log_subscription.go @@ -7,21 +7,26 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_directory_service_log_subscription") -func ResourceLogSubscription() *schema.Resource { +// @SDKResource("aws_directory_service_log_subscription", name="Log Subscription") +func resourceLogSubscription() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLogSubscriptionCreate, ReadWithoutTimeout: resourceLogSubscriptionRead, DeleteWithoutTimeout: resourceLogSubscriptionDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -43,48 +48,41 @@ func ResourceLogSubscription() *schema.Resource { func resourceLogSubscriptionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) - - directoryId := d.Get("directory_id") - logGroupName := d.Get(names.AttrLogGroupName) + conn := meta.(*conns.AWSClient).DSClient(ctx) - input := directoryservice.CreateLogSubscriptionInput{ - DirectoryId: aws.String(directoryId.(string)), - LogGroupName: aws.String(logGroupName.(string)), + directoryID := d.Get("directory_id").(string) + input := &directoryservice.CreateLogSubscriptionInput{ + DirectoryId: aws.String(directoryID), + LogGroupName: aws.String(d.Get(names.AttrLogGroupName).(string)), } - _, err := conn.CreateLogSubscriptionWithContext(ctx, &input) + _, err := conn.CreateLogSubscription(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Directory Service Log Subscription: %s", err) + return sdkdiag.AppendErrorf(diags, "creating Directory Service Log Subscription (%s): %s", directoryID, err) } - d.SetId(directoryId.(string)) + d.SetId(directoryID) return append(diags, resourceLogSubscriptionRead(ctx, d, meta)...) } func resourceLogSubscriptionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) - directoryId := d.Id() + logSubscription, err := findLogSubscriptionByID(ctx, conn, d.Id()) - input := directoryservice.ListLogSubscriptionsInput{ - DirectoryId: aws.String(directoryId), + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] Directory Service Log Subscription (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags } - out, err := conn.ListLogSubscriptionsWithContext(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "listing Directory Service Log Subscription: %s", err) - } - - if len(out.LogSubscriptions) == 0 { - log.Printf("[WARN] No log subscriptions for directory %s found", directoryId) - d.SetId("") - return diags + return sdkdiag.AppendErrorf(diags, "reading Directory Service Log Subscription (%s): %s", d.Id(), err) } - logSubscription := out.LogSubscriptions[0] d.Set("directory_id", logSubscription.DirectoryId) d.Set(names.AttrLogGroupName, logSubscription.LogGroupName) @@ -93,18 +91,62 @@ func resourceLogSubscriptionRead(ctx context.Context, d *schema.ResourceData, me func resourceLogSubscriptionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) - directoryId := d.Id() + log.Printf("[DEBUG] Deleting Directory Service Log Subscription: %s", d.Id()) + _, err := conn.DeleteLogSubscription(ctx, &directoryservice.DeleteLogSubscriptionInput{ + DirectoryId: aws.String(d.Id()), + }) - input := directoryservice.DeleteLogSubscriptionInput{ - DirectoryId: aws.String(directoryId), + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { + return diags } - _, err := conn.DeleteLogSubscriptionWithContext(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Directory Service Log Subscription: %s", err) + return sdkdiag.AppendErrorf(diags, "deleting Directory Service Log Subscription (%s): %s", d.Id(), err) } return diags } + +func findLogSubscription(ctx context.Context, conn *directoryservice.Client, input *directoryservice.ListLogSubscriptionsInput) (*awstypes.LogSubscription, error) { + output, err := findLogSubscriptions(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findLogSubscriptions(ctx context.Context, conn *directoryservice.Client, input *directoryservice.ListLogSubscriptionsInput) ([]awstypes.LogSubscription, error) { + var output []awstypes.LogSubscription + + pages := directoryservice.NewListLogSubscriptionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.LogSubscriptions...) + } + + return output, nil +} + +func findLogSubscriptionByID(ctx context.Context, conn *directoryservice.Client, directoryID string) (*awstypes.LogSubscription, error) { + input := &directoryservice.ListLogSubscriptionsInput{ + DirectoryId: aws.String(directoryID), + } + + return findLogSubscription(ctx, conn, input) +} diff --git a/internal/service/ds/log_subscription_test.go b/internal/service/ds/log_subscription_test.go index cc662ac3a13..7034ca5cd9e 100644 --- a/internal/service/ds/log_subscription_test.go +++ b/internal/service/ds/log_subscription_test.go @@ -8,21 +8,19 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfds "github.com/hashicorp/terraform-provider-aws/internal/service/ds" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccDSLogSubscription_basic(t *testing.T) { ctx := acctest.Context(t) - resourceName := "aws_directory_service_log_subscription.subscription" - logGroupName := "ad-service-log-subscription-test" + resourceName := "aws_directory_service_log_subscription.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -32,16 +30,13 @@ func TestAccDSLogSubscription_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckLogSubscriptionDestroy(ctx), Steps: []resource.TestStep{ - // test create { - Config: testAccLogSubscriptionConfig_basic(rName, domainName, logGroupName), + Config: testAccLogSubscriptionConfig_basic(rName, domainName), Check: resource.ComposeTestCheckFunc( - testAccCheckLogSubscriptionExists(ctx, resourceName, - logGroupName, - ), + testAccCheckLogSubscriptionExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrLogGroupName, rName), ), }, - // test import { ResourceName: resourceName, ImportState: true, @@ -51,20 +46,42 @@ func TestAccDSLogSubscription_basic(t *testing.T) { }) } +func TestAccDSLogSubscription_disappears(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_directory_service_log_subscription.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domainName := acctest.RandomDomainName() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckDirectoryService(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLogSubscriptionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLogSubscriptionConfig_basic(rName, domainName), + Check: resource.ComposeTestCheckFunc( + testAccCheckLogSubscriptionExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfds.ResourceLogSubscription(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func testAccCheckLogSubscriptionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_directory_service_log_subscription" { continue } - res, err := conn.ListLogSubscriptionsWithContext(ctx, &directoryservice.ListLogSubscriptionsInput{ - DirectoryId: aws.String(rs.Primary.ID), - }) + _, err := tfds.FindLogSubscriptionByID(ctx, conn, rs.Primary.ID) - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { + if tfresource.NotFound(err) { continue } @@ -72,59 +89,37 @@ func testAccCheckLogSubscriptionDestroy(ctx context.Context) resource.TestCheckF return err } - if len(res.LogSubscriptions) > 0 { - return fmt.Errorf("Expected AWS Directory Service Log Subscription to be gone, but was still found") - } + return fmt.Errorf("Directory Service Log Subscription %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckLogSubscriptionExists(ctx context.Context, name string, logGroupName string) resource.TestCheckFunc { +func testAccCheckLogSubscriptionExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", name) - } - - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") + return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) - res, err := conn.ListLogSubscriptionsWithContext(ctx, &directoryservice.ListLogSubscriptionsInput{ - DirectoryId: aws.String(rs.Primary.ID), - }) + _, err := tfds.FindLogSubscriptionByID(ctx, conn, rs.Primary.ID) - if err != nil { - return err - } - - if len(res.LogSubscriptions) == 0 { - return fmt.Errorf("No Log subscription found") - } - - if *(res.LogSubscriptions[0].LogGroupName) != logGroupName { - return fmt.Errorf("Expected Log subscription not found") - } - - return nil + return err } } -func testAccLogSubscriptionConfig_basic(rName, domain, logGroupName string) string { - return acctest.ConfigCompose( - acctest.ConfigVPCWithSubnets(rName, 2), - fmt.Sprintf(` -resource "aws_directory_service_log_subscription" "subscription" { +func testAccLogSubscriptionConfig_basic(rName, domain string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` +resource "aws_directory_service_log_subscription" "test" { directory_id = aws_directory_service_directory.test.id log_group_name = aws_cloudwatch_log_group.test.name } resource "aws_directory_service_directory" "test" { - name = %[1]q + name = %[2]q password = "SuperSecretPassw0rd" type = "MicrosoftAD" edition = "Standard" @@ -135,16 +130,16 @@ resource "aws_directory_service_directory" "test" { } tags = { - Name = "terraform-testacc-directory-service-log-subscription" + Name = %[1]q } } resource "aws_cloudwatch_log_group" "test" { - name = %[2]q + name = %[1]q retention_in_days = 1 } -data "aws_iam_policy_document" "ad-log-policy" { +data "aws_iam_policy_document" "test" { statement { actions = [ "logs:CreateLogStream", @@ -162,10 +157,9 @@ data "aws_iam_policy_document" "ad-log-policy" { } } -resource "aws_cloudwatch_log_resource_policy" "ad-log-policy" { - policy_document = data.aws_iam_policy_document.ad-log-policy.json - policy_name = "ad-log-policy" +resource "aws_cloudwatch_log_resource_policy" "test" { + policy_document = data.aws_iam_policy_document.test.json + policy_name = %[1]q } -`, domain, logGroupName), - ) +`, rName, domain)) } diff --git a/internal/service/ds/radius_settings.go b/internal/service/ds/radius_settings.go index e93a1070d0c..0d6ed63d262 100644 --- a/internal/service/ds/radius_settings.go +++ b/internal/service/ds/radius_settings.go @@ -8,20 +8,23 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_directory_service_radius_settings") -func ResourceRadiusSettings() *schema.Resource { +// @SDKResource("aws_directory_service_radius_settings", name="RADIUS Settings") +func resourceRadiusSettings() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRadiusSettingsCreate, ReadWithoutTimeout: resourceRadiusSettingsRead, @@ -39,9 +42,9 @@ func ResourceRadiusSettings() *schema.Resource { Schema: map[string]*schema.Schema{ "authentication_protocol": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(directoryservice.RadiusAuthenticationProtocol_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RadiusAuthenticationProtocol](), }, "directory_id": { Type: schema.TypeString, @@ -92,26 +95,24 @@ func ResourceRadiusSettings() *schema.Resource { func resourceRadiusSettingsCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) directoryID := d.Get("directory_id").(string) input := &directoryservice.EnableRadiusInput{ DirectoryId: aws.String(directoryID), - RadiusSettings: &directoryservice.RadiusSettings{ - AuthenticationProtocol: aws.String(d.Get("authentication_protocol").(string)), + RadiusSettings: &awstypes.RadiusSettings{ + AuthenticationProtocol: awstypes.RadiusAuthenticationProtocol(d.Get("authentication_protocol").(string)), DisplayLabel: aws.String(d.Get("display_label").(string)), - RadiusPort: aws.Int64(int64(d.Get("radius_port").(int))), - RadiusRetries: aws.Int64(int64(d.Get("radius_retries").(int))), - RadiusServers: flex.ExpandStringSet(d.Get("radius_servers").(*schema.Set)), - RadiusTimeout: aws.Int64(int64(d.Get("radius_timeout").(int))), + RadiusPort: aws.Int32(int32(d.Get("radius_port").(int))), + RadiusRetries: int32(d.Get("radius_retries").(int)), + RadiusServers: flex.ExpandStringValueSet(d.Get("radius_servers").(*schema.Set)), + RadiusTimeout: aws.Int32(int32(d.Get("radius_timeout").(int))), SharedSecret: aws.String(d.Get("shared_secret").(string)), - UseSameUsername: aws.Bool(d.Get("use_same_username").(bool)), + UseSameUsername: d.Get("use_same_username").(bool), }, } - log.Printf("[DEBUG] Enabling Directory Service Directory RADIUS: %s", input) - _, err := conn.EnableRadiusWithContext(ctx, input) + _, err := conn.EnableRadius(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "enabling Directory Service Directory (%s) RADIUS: %s", directoryID, err) @@ -128,13 +129,12 @@ func resourceRadiusSettingsCreate(ctx context.Context, d *schema.ResourceData, m func resourceRadiusSettingsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) - - output, err := FindRadiusSettings(ctx, conn, d.Id()) + output, err := findRadiusSettingsByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] Directory Service Directory (%s) RADIUS settings not found, removing from state", d.Id()) + log.Printf("[WARN] Directory Service Directory (%s) RADIUS Settings not found, removing from state", d.Id()) d.SetId("") return diags } @@ -148,7 +148,7 @@ func resourceRadiusSettingsRead(ctx context.Context, d *schema.ResourceData, met d.Set("directory_id", d.Id()) d.Set("radius_port", output.RadiusPort) d.Set("radius_retries", output.RadiusRetries) - d.Set("radius_servers", aws.StringValueSlice(output.RadiusServers)) + d.Set("radius_servers", output.RadiusServers) d.Set("radius_timeout", output.RadiusTimeout) d.Set("shared_secret", output.SharedSecret) d.Set("use_same_username", output.UseSameUsername) @@ -158,25 +158,23 @@ func resourceRadiusSettingsRead(ctx context.Context, d *schema.ResourceData, met func resourceRadiusSettingsUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) input := &directoryservice.UpdateRadiusInput{ DirectoryId: aws.String(d.Id()), - RadiusSettings: &directoryservice.RadiusSettings{ - AuthenticationProtocol: aws.String(d.Get("authentication_protocol").(string)), + RadiusSettings: &awstypes.RadiusSettings{ + AuthenticationProtocol: awstypes.RadiusAuthenticationProtocol(d.Get("authentication_protocol").(string)), DisplayLabel: aws.String(d.Get("display_label").(string)), - RadiusPort: aws.Int64(int64(d.Get("radius_port").(int))), - RadiusRetries: aws.Int64(int64(d.Get("radius_retries").(int))), - RadiusServers: flex.ExpandStringSet(d.Get("radius_servers").(*schema.Set)), - RadiusTimeout: aws.Int64(int64(d.Get("radius_timeout").(int))), + RadiusPort: aws.Int32(int32(d.Get("radius_port").(int))), + RadiusRetries: int32(d.Get("radius_retries").(int)), + RadiusServers: flex.ExpandStringValueSet(d.Get("radius_servers").(*schema.Set)), + RadiusTimeout: aws.Int32(int32(d.Get("radius_timeout").(int))), SharedSecret: aws.String(d.Get("shared_secret").(string)), - UseSameUsername: aws.Bool(d.Get("use_same_username").(bool)), + UseSameUsername: d.Get("use_same_username").(bool), }, } - log.Printf("[DEBUG] Updating Directory Service Directory RADIUS: %s", input) - _, err := conn.UpdateRadiusWithContext(ctx, input) + _, err := conn.UpdateRadius(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Directory Service Directory (%s) RADIUS: %s", d.Id(), err) @@ -191,14 +189,14 @@ func resourceRadiusSettingsUpdate(ctx context.Context, d *schema.ResourceData, m func resourceRadiusSettingsDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) - - _, err := conn.DisableRadiusWithContext(ctx, &directoryservice.DisableRadiusInput{ + log.Printf("[DEBUG] Deleting Directory Service RADIUS Settings: %s", d.Id()) + _, err := conn.DisableRadius(ctx, &directoryservice.DisableRadiusInput{ DirectoryId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeDirectoryDoesNotExistException) { + if errs.IsA[*awstypes.DirectoryDoesNotExistException](err) { return diags } @@ -208,3 +206,50 @@ func resourceRadiusSettingsDelete(ctx context.Context, d *schema.ResourceData, m return diags } + +func findRadiusSettingsByID(ctx context.Context, conn *directoryservice.Client, directoryID string) (*awstypes.RadiusSettings, error) { + output, err := findDirectoryByID(ctx, conn, directoryID) + + if err != nil { + return nil, err + } + + if output.RadiusSettings == nil { + return nil, tfresource.NewEmptyResultError(directoryID) + } + + return output.RadiusSettings, nil +} + +func statusRadius(ctx context.Context, conn *directoryservice.Client, directoryID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDirectoryByID(ctx, conn, directoryID) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.RadiusStatus), nil + } +} + +func waitRadiusCompleted(ctx context.Context, conn *directoryservice.Client, directoryID string, timeout time.Duration) (*awstypes.DirectoryDescription, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.RadiusStatusCreating), + Target: enum.Slice(awstypes.RadiusStatusCompleted), + Refresh: statusRadius(ctx, conn, directoryID), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.DirectoryDescription); ok { + return output, err + } + + return nil, err +} diff --git a/internal/service/ds/radius_settings_test.go b/internal/service/ds/radius_settings_test.go index 4fcc8a36d56..fb93c64970b 100644 --- a/internal/service/ds/radius_settings_test.go +++ b/internal/service/ds/radius_settings_test.go @@ -9,7 +9,7 @@ import ( "os" "testing" - "github.com/aws/aws-sdk-go/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -28,7 +28,7 @@ func TestAccDSRadiusSettings_basic(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - var v directoryservice.RadiusSettings + var v awstypes.RadiusSettings resourceName := "aws_directory_service_region.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -76,7 +76,7 @@ func TestAccDSRadiusSettings_disappears(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - var v directoryservice.RadiusSettings + var v awstypes.RadiusSettings resourceName := "aws_directory_service_region.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -104,14 +104,14 @@ func TestAccDSRadiusSettings_disappears(t *testing.T) { func testAccCheckRadiusSettingsDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_directory_service_radius_settings" { continue } - _, err := tfds.FindRadiusSettings(ctx, conn, rs.Primary.ID) + _, err := tfds.FindRadiusSettingsByID(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue @@ -121,27 +121,23 @@ func testAccCheckRadiusSettingsDestroy(ctx context.Context) resource.TestCheckFu return err } - return fmt.Errorf("Directory Service Directory %s RADIUS settings still exists", rs.Primary.ID) + return fmt.Errorf("Directory Service Directory %s RADIUS Settings still exists", rs.Primary.ID) } return nil } } -func testAccCheckRadiusSettingsExists(ctx context.Context, n string, v *directoryservice.RadiusSettings) resource.TestCheckFunc { +func testAccCheckRadiusSettingsExists(ctx context.Context, n string, v *awstypes.RadiusSettings) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Directory Service RADIUS Settings ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) - output, err := tfds.FindRadiusSettings(ctx, conn, rs.Primary.ID) + output, err := tfds.FindRadiusSettingsByID(ctx, conn, rs.Primary.ID) if err != nil { return err diff --git a/internal/service/ds/region.go b/internal/service/ds/region.go index 76821141aa1..4ab7dcee35e 100644 --- a/internal/service/ds/region.go +++ b/internal/service/ds/region.go @@ -10,13 +10,16 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -26,7 +29,7 @@ import ( // @SDKResource("aws_directory_service_region", name="Region") // @Tags -func ResourceRegion() *schema.Resource { +func resourceRegion() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRegionCreate, ReadWithoutTimeout: resourceRegionRead, @@ -92,12 +95,11 @@ func ResourceRegion() *schema.Resource { func resourceRegionCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).DSConn(ctx) + conn := meta.(*conns.AWSClient).DSClient(ctx) directoryID := d.Get("directory_id").(string) regionName := d.Get("region_name").(string) - id := RegionCreateResourceID(directoryID, regionName) + id := regionCreateResourceID(directoryID, regionName) input := &directoryservice.AddRegionInput{ DirectoryId: aws.String(directoryID), RegionName: aws.String(regionName), @@ -107,7 +109,7 @@ func resourceRegionCreate(ctx context.Context, d *schema.ResourceData, meta inte input.VPCSettings = expandDirectoryVpcSettings(v.([]interface{})[0].(map[string]interface{})) } - _, err := conn.AddRegionWithContext(ctx, input) + _, err := conn.AddRegion(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Directory Service Region (%s): %s", id, err) @@ -119,16 +121,18 @@ func resourceRegionCreate(ctx context.Context, d *schema.ResourceData, meta inte return sdkdiag.AppendErrorf(diags, "waiting for Directory Service Region (%s) create: %s", d.Id(), err) } - regionConn := meta.(*conns.AWSClient).DSConnForRegion(ctx, regionName) + optFn := func(o *directoryservice.Options) { + o.Region = regionName + } if tags := getTagsIn(ctx); len(tags) > 0 { - if err := createTags(ctx, regionConn, directoryID, tags); err != nil { + if err := createTags(ctx, conn, directoryID, tags, optFn); err != nil { return sdkdiag.AppendErrorf(diags, "setting Directory Service Directory (%s) tags: %s", directoryID, err) } } if v, ok := d.GetOk("desired_number_of_domain_controllers"); ok { - if err := updateNumberOfDomainControllers(ctx, regionConn, directoryID, v.(int), d.Timeout(schema.TimeoutCreate)); err != nil { + if err := updateNumberOfDomainControllers(ctx, conn, directoryID, v.(int), d.Timeout(schema.TimeoutCreate), optFn); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -138,16 +142,14 @@ func resourceRegionCreate(ctx context.Context, d *schema.ResourceData, meta inte func resourceRegionRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) - - directoryID, regionName, err := RegionParseResourceID(d.Id()) - + directoryID, regionName, err := regionParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - region, err := FindRegion(ctx, conn, directoryID, regionName) + region, err := findRegionByTwoPartKey(ctx, conn, directoryID, regionName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Directory Service Region (%s) not found, removing from state", d.Id()) @@ -170,9 +172,11 @@ func resourceRegionRead(ctx context.Context, d *schema.ResourceData, meta interf d.Set("vpc_settings", nil) } - regionConn := meta.(*conns.AWSClient).DSConnForRegion(ctx, regionName) + optFn := func(o *directoryservice.Options) { + o.Region = regionName + } - tags, err := listTags(ctx, regionConn, directoryID) + tags, err := listTags(ctx, conn, directoryID, optFn) if err != nil { return sdkdiag.AppendErrorf(diags, "listing tags for Directory Service Directory (%s): %s", directoryID, err) @@ -185,17 +189,20 @@ func resourceRegionRead(ctx context.Context, d *schema.ResourceData, meta interf func resourceRegionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - directoryID, regionName, err := RegionParseResourceID(d.Id()) - + directoryID, regionName, err := regionParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - conn := meta.(*conns.AWSClient).DSConnForRegion(ctx, regionName) + // The Region must be updated using a client in the region. + optFn := func(o *directoryservice.Options) { + o.Region = regionName + } if d.HasChange("desired_number_of_domain_controllers") { - if err := updateNumberOfDomainControllers(ctx, conn, directoryID, d.Get("desired_number_of_domain_controllers").(int), d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := updateNumberOfDomainControllers(ctx, conn, directoryID, d.Get("desired_number_of_domain_controllers").(int), d.Timeout(schema.TimeoutUpdate), optFn); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -203,7 +210,7 @@ func resourceRegionUpdate(ctx context.Context, d *schema.ResourceData, meta inte if d.HasChange(names.AttrTagsAll) { o, n := d.GetChange(names.AttrTagsAll) - if err := updateTags(ctx, conn, directoryID, o, n); err != nil { + if err := updateTags(ctx, conn, directoryID, o, n, optFn); err != nil { return sdkdiag.AppendErrorf(diags, "updating Directory Service Directory (%s) tags: %s", directoryID, err) } } @@ -213,21 +220,23 @@ func resourceRegionUpdate(ctx context.Context, d *schema.ResourceData, meta inte func resourceRegionDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - directoryID, regionName, err := RegionParseResourceID(d.Id()) - + directoryID, regionName, err := regionParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } // The Region must be removed using a client in the region. - conn := meta.(*conns.AWSClient).DSConnForRegion(ctx, regionName) + optFn := func(o *directoryservice.Options) { + o.Region = regionName + } - _, err = conn.RemoveRegionWithContext(ctx, &directoryservice.RemoveRegionInput{ + _, err = conn.RemoveRegion(ctx, &directoryservice.RemoveRegionInput{ DirectoryId: aws.String(directoryID), - }) + }, optFn) - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeDirectoryDoesNotExistException) { + if errs.IsA[*awstypes.DirectoryDoesNotExistException](err) { return diags } @@ -235,28 +244,134 @@ func resourceRegionDelete(ctx context.Context, d *schema.ResourceData, meta inte return sdkdiag.AppendErrorf(diags, "deleting Directory Service Region (%s): %s", d.Id(), err) } - if _, err := waitRegionDeleted(ctx, conn, directoryID, regionName, d.Timeout(schema.TimeoutDelete)); err != nil { + if _, err := waitRegionDeleted(ctx, conn, directoryID, regionName, d.Timeout(schema.TimeoutDelete), optFn); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Directory Service Region (%s) delete: %s", d.Id(), err) } return diags } -const regionIDSeparator = "," // nosemgrep:ci.ds-in-const-name,ci.ds-in-var-name +const regionResourceIDSeparator = "," // nosemgrep:ci.ds-in-const-name,ci.ds-in-var-name -func RegionCreateResourceID(directoryID, regionName string) string { +func regionCreateResourceID(directoryID, regionName string) string { parts := []string{directoryID, regionName} - id := strings.Join(parts, regionIDSeparator) + id := strings.Join(parts, regionResourceIDSeparator) return id } -func RegionParseResourceID(id string) (string, string, error) { - parts := strings.Split(id, regionIDSeparator) +func regionParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, regionResourceIDSeparator) if len(parts) == 2 && parts[0] != "" && parts[1] != "" { return parts[0], parts[1], nil } - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected DirectoryID%[2]sRegionName", id, regionIDSeparator) + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected DIRECTORY_ID%[2]sREGION_NAME", id, regionResourceIDSeparator) +} + +func findRegion(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeRegionsInput, optFns ...func(*directoryservice.Options)) (*awstypes.RegionDescription, error) { + output, err := findRegions(ctx, conn, input, optFns...) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findRegions(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeRegionsInput, optFns ...func(*directoryservice.Options)) ([]awstypes.RegionDescription, error) { + var output []awstypes.RegionDescription + + pages := directoryservice.NewDescribeRegionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx, optFns...) + + if errs.IsA[*awstypes.DirectoryDoesNotExistException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.RegionsDescription...) + } + + return output, nil +} + +func findRegionByTwoPartKey(ctx context.Context, conn *directoryservice.Client, directoryID, regionName string, optFns ...func(*directoryservice.Options)) (*awstypes.RegionDescription, error) { + input := &directoryservice.DescribeRegionsInput{ + DirectoryId: aws.String(directoryID), + RegionName: aws.String(regionName), + } + + output, err := findRegion(ctx, conn, input, optFns...) + + if err != nil { + return nil, err + } + + if status := output.Status; status == awstypes.DirectoryStageDeleted { + return nil, &retry.NotFoundError{ + Message: string(status), + LastRequest: input, + } + } + + return output, nil +} + +func statusRegion(ctx context.Context, conn *directoryservice.Client, directoryID, regionName string, optFns ...func(*directoryservice.Options)) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findRegionByTwoPartKey(ctx, conn, directoryID, regionName, optFns...) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitRegionCreated(ctx context.Context, conn *directoryservice.Client, directoryID, regionName string, timeout time.Duration, optFns ...func(*directoryservice.Options)) (*awstypes.RegionDescription, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DirectoryStageRequested, awstypes.DirectoryStageCreating, awstypes.DirectoryStageCreated), + Target: enum.Slice(awstypes.DirectoryStageActive), + Refresh: statusRegion(ctx, conn, directoryID, regionName, optFns...), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.RegionDescription); ok { + return output, err + } + + return nil, err +} + +func waitRegionDeleted(ctx context.Context, conn *directoryservice.Client, directoryID, regionName string, timeout time.Duration, optFns ...func(*directoryservice.Options)) (*awstypes.RegionDescription, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.DirectoryStageActive, awstypes.DirectoryStageDeleting), + Target: []string{}, + Refresh: statusRegion(ctx, conn, directoryID, regionName, optFns...), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.RegionDescription); ok { + return output, err + } + + return nil, err } diff --git a/internal/service/ds/region_test.go b/internal/service/ds/region_test.go index 2d666d44433..fab0e7fdd6c 100644 --- a/internal/service/ds/region_test.go +++ b/internal/service/ds/region_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccDSRegion_basic(t *testing.T) { ctx := acctest.Context(t) - var v directoryservice.RegionDescription + var v awstypes.RegionDescription resourceName := "aws_directory_service_region.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -56,7 +56,7 @@ func TestAccDSRegion_basic(t *testing.T) { func TestAccDSRegion_disappears(t *testing.T) { ctx := acctest.Context(t) - var v directoryservice.RegionDescription + var v awstypes.RegionDescription resourceName := "aws_directory_service_region.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -85,7 +85,7 @@ func TestAccDSRegion_disappears(t *testing.T) { func TestAccDSRegion_tags(t *testing.T) { ctx := acctest.Context(t) - var v directoryservice.RegionDescription + var v awstypes.RegionDescription resourceName := "aws_directory_service_region.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -136,7 +136,7 @@ func TestAccDSRegion_tags(t *testing.T) { func TestAccDSRegion_desiredNumberOfDomainControllers(t *testing.T) { ctx := acctest.Context(t) - var v directoryservice.RegionDescription + var v awstypes.RegionDescription resourceName := "aws_directory_service_region.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -176,20 +176,14 @@ func TestAccDSRegion_desiredNumberOfDomainControllers(t *testing.T) { func testAccCheckRegionDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_directory_service_region" { continue } - directoryID, regionName, err := tfds.RegionParseResourceID(rs.Primary.ID) - - if err != nil { - return err - } - - _, err = tfds.FindRegion(ctx, conn, directoryID, regionName) + _, err := tfds.FindRegionByTwoPartKey(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.Attributes["region_name"]) if tfresource.NotFound(err) { continue @@ -206,26 +200,16 @@ func testAccCheckRegionDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckRegionExists(ctx context.Context, n string, v *directoryservice.RegionDescription) resource.TestCheckFunc { +func testAccCheckRegionExists(ctx context.Context, n string, v *awstypes.RegionDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Directory Service Region ID is set") - } - - directoryID, regionName, err := tfds.RegionParseResourceID(rs.Primary.ID) - - if err != nil { - return err - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) - output, err := tfds.FindRegion(ctx, conn, directoryID, regionName) + output, err := tfds.FindRegionByTwoPartKey(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.Attributes["region_name"]) if err != nil { return err diff --git a/internal/service/ds/service_endpoint_resolver_gen.go b/internal/service/ds/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..971a5cd3e23 --- /dev/null +++ b/internal/service/ds/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ds + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + directoryservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/directoryservice" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ directoryservice_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver directoryservice_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: directoryservice_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params directoryservice_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up directoryservice endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*directoryservice_sdkv2.Options) { + return func(o *directoryservice_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ds/service_endpoints_gen_test.go b/internal/service/ds/service_endpoints_gen_test.go index 38ab27caa91..cd9c8648f13 100644 --- a/internal/service/ds/service_endpoints_gen_test.go +++ b/internal/service/ds/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -16,8 +18,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" directoryservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/directoryservice" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - directoryservice_sdkv1 "github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" @@ -93,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -276,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -288,45 +288,33 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }, } - t.Run("v1", func(t *testing.T) { - for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv - testcase := testcase + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase - t.Run(name, func(t *testing.T) { - testEndpointCase(t, providerRegion, testcase, callServiceV1) - }) - } - }) - - t.Run("v2", func(t *testing.T) { - for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv - testcase := testcase - - t.Run(name, func(t *testing.T) { - testEndpointCase(t, providerRegion, testcase, callServiceV2) - }) - } - }) + t.Run(name, func(t *testing.T) { + testEndpointCase(t, providerRegion, testcase, callService) + }) + } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := directoryservice_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), directoryservice_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := directoryservice_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), directoryservice_sdkv2.EndpointParameters{ @@ -334,17 +322,17 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() client := meta.DSClient(ctx) @@ -369,21 +357,6 @@ func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) api return result } -func callServiceV1(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { - t.Helper() - - client := meta.DSConn(ctx) - - req, _ := client.DescribeDirectoriesRequest(&directoryservice_sdkv1.DescribeDirectoriesInput{}) - - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), - } -} - func withNoConfig(_ *caseSetup) { // no-op } @@ -437,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ds/service_package_gen.go b/internal/service/ds/service_package_gen.go index f491b2f9ab1..22687b4f095 100644 --- a/internal/service/ds/service_package_gen.go +++ b/internal/service/ds/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ds @@ -7,11 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" directoryservice_sdkv2 "github.com/aws/aws-sdk-go-v2/service/directoryservice" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - directoryservice_sdkv1 "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -26,7 +21,8 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { return []*types.ServicePackageFrameworkResource{ { - Factory: newResourceTrust, + Factory: newTrustResource, + Name: "Trust", }, } } @@ -34,8 +30,9 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceDirectory, + Factory: dataSourceDirectory, TypeName: "aws_directory_service_directory", + Name: "Directory", }, } } @@ -43,11 +40,12 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceConditionalForwarder, + Factory: resourceConditionalForwarder, TypeName: "aws_directory_service_conditional_forwarder", + Name: "Conditional Forwarder", }, { - Factory: ResourceDirectory, + Factory: resourceDirectory, TypeName: "aws_directory_service_directory", Name: "Directory", Tags: &types.ServicePackageResourceTags{ @@ -55,26 +53,30 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceLogSubscription, + Factory: resourceLogSubscription, TypeName: "aws_directory_service_log_subscription", + Name: "Log Subscription", }, { - Factory: ResourceRadiusSettings, + Factory: resourceRadiusSettings, TypeName: "aws_directory_service_radius_settings", + Name: "RADIUS Settings", }, { - Factory: ResourceRegion, + Factory: resourceRegion, TypeName: "aws_directory_service_region", Name: "Region", Tags: &types.ServicePackageResourceTags{}, }, { - Factory: ResourceSharedDirectory, + Factory: resourceSharedDirectory, TypeName: "aws_directory_service_shared_directory", + Name: "Shared Directory", }, { - Factory: ResourceSharedDirectoryAccepter, + Factory: resourceSharedDirectoryAccepter, TypeName: "aws_directory_service_shared_directory_accepter", + Name: "Shared Directory Accepter", }, } } @@ -83,44 +85,14 @@ func (p *servicePackage) ServicePackageName() string { return names.DS } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*directoryservice_sdkv1.DirectoryService, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return directoryservice_sdkv1.New(sess.Copy(&cfg)), nil -} - // NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*directoryservice_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return directoryservice_sdkv2.NewFromConfig(cfg, func(o *directoryservice_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return directoryservice_sdkv2.NewFromConfig(cfg, + directoryservice_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/ds/shared_directory.go b/internal/service/ds/shared_directory.go index 91942d3826d..6c630e99bc2 100644 --- a/internal/service/ds/shared_directory.go +++ b/internal/service/ds/shared_directory.go @@ -10,23 +10,22 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - ResNameSharedDirectory = "Shared Directory" -) - -// @SDKResource("aws_directory_service_shared_directory") -func ResourceSharedDirectory() *schema.Resource { +// @SDKResource("aws_directory_service_shared_directory", name="Shared Directory") +func resourceSharedDirectory() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSharedDirectoryCreate, ReadWithoutTimeout: resourceSharedDirectoryRead, @@ -47,11 +46,11 @@ func ResourceSharedDirectory() *schema.Resource { ForceNew: true, }, "method": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: directoryservice.ShareMethodHandshake, - ValidateFunc: validation.StringInSlice(directoryservice.ShareMethod_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.ShareMethodHandshake, + ValidateDiagFunc: enum.Validate[awstypes.ShareMethod](), }, "notes": { Type: schema.TypeString, @@ -76,10 +75,10 @@ func ResourceSharedDirectory() *schema.Resource { ForceNew: true, }, names.AttrType: { - Type: schema.TypeString, - Optional: true, - Default: directoryservice.TargetTypeAccount, - ValidateFunc: validation.StringInSlice(directoryservice.TargetType_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.TargetTypeAccount, + ValidateDiagFunc: enum.Validate[awstypes.TargetType](), }, }, }, @@ -90,13 +89,12 @@ func ResourceSharedDirectory() *schema.Resource { func resourceSharedDirectoryCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) - - dirId := d.Get("directory_id").(string) - input := directoryservice.ShareDirectoryInput{ - DirectoryId: aws.String(dirId), - ShareMethod: aws.String(d.Get("method").(string)), + directoryID := d.Get("directory_id").(string) + input := &directoryservice.ShareDirectoryInput{ + DirectoryId: aws.String(directoryID), + ShareMethod: awstypes.ShareMethod(d.Get("method").(string)), ShareTarget: expandShareTarget(d.Get(names.AttrTarget).([]interface{})[0].(map[string]interface{})), } @@ -104,53 +102,45 @@ func resourceSharedDirectoryCreate(ctx context.Context, d *schema.ResourceData, input.ShareNotes = aws.String(v.(string)) } - log.Printf("[DEBUG] Creating Shared Directory: %s", input) - out, err := conn.ShareDirectoryWithContext(ctx, &input) + output, err := conn.ShareDirectory(ctx, input) if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionCreating, ResNameSharedDirectory, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "creating Directory Service Shared Directory (%s): %s", directoryID, err) } - log.Printf("[DEBUG] Shared Directory created: %s", out) - d.SetId(sharedDirectoryID(dirId, aws.StringValue(out.SharedDirectoryId))) - d.Set("shared_directory_id", out.SharedDirectoryId) + d.SetId(sharedDirectoryCreateResourceID(directoryID, aws.ToString(output.SharedDirectoryId))) - return diags + return append(diags, resourceSharedDirectoryRead(ctx, d, meta)...) } func resourceSharedDirectoryRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) - - ownerDirID, sharedDirID, err := parseSharedDirectoryID(d.Id()) - + ownerDirID, sharedDirID, err := sharedDirectoryParseResourceID(d.Id()) if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionReading, ResNameSharedDirectory, d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - output, err := FindSharedDirectory(ctx, conn, ownerDirID, sharedDirID) + output, err := findSharedDirectoryByTwoPartKey(ctx, conn, ownerDirID, sharedDirID) if !d.IsNewResource() && tfresource.NotFound(err) { - create.LogNotFoundRemoveState(names.DS, create.ErrActionReading, ResNameSharedDirectory, d.Id()) + log.Printf("[WARN] Directory Service Shared Directory (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionReading, ResNameSharedDirectory, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Directory Service Shared Directory (%s): %s", d.Id(), err) } - log.Printf("[DEBUG] Received DS shared directory: %s", output) - d.Set("directory_id", output.OwnerDirectoryId) d.Set("method", output.ShareMethod) d.Set("notes", output.ShareNotes) d.Set("shared_directory_id", output.SharedDirectoryId) - if output.SharedAccountId != nil { if err := d.Set(names.AttrTarget, []interface{}{flattenShareTarget(output)}); err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionSetting, ResNameSharedDirectory, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "setting target: %s", err) } } else { d.Set(names.AttrTarget, nil) @@ -161,74 +151,190 @@ func resourceSharedDirectoryRead(ctx context.Context, d *schema.ResourceData, me func resourceSharedDirectoryDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) - - dirId := d.Get("directory_id").(string) - sharedId := d.Get("shared_directory_id").(string) + ownerDirID, sharedDirID, err := sharedDirectoryParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } - input := directoryservice.UnshareDirectoryInput{ - DirectoryId: aws.String(dirId), + log.Printf("[DEBUG] Deleting Directory Service Shared Directory: %s", d.Id()) + _, err = conn.UnshareDirectory(ctx, &directoryservice.UnshareDirectoryInput{ + DirectoryId: aws.String(ownerDirID), UnshareTarget: expandUnshareTarget(d.Get(names.AttrTarget).([]interface{})[0].(map[string]interface{})), + }) + + if errs.IsA[*awstypes.DirectoryNotSharedException](err) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting Directory Service Shared Directory (%s): %s", d.Id(), err) + } + + if _, err := waitSharedDirectoryDeleted(ctx, conn, ownerDirID, sharedDirID, d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Directory Service Shared Directory (%s) delete: %s", d.Id(), err) } - log.Printf("[DEBUG] Unsharing Directory Service Directory: %s", input) - output, err := conn.UnshareDirectoryWithContext(ctx, &input) + return diags +} + +const sharedDirectoryResourceIDSeparator = "/" // nosemgrep:ci.ds-in-const-name,ci.ds-in-var-name + +func sharedDirectoryCreateResourceID(ownerDirectoryID, sharedDirectoryID string) string { + parts := []string{ownerDirectoryID, sharedDirectoryID} + id := strings.Join(parts, sharedDirectoryResourceIDSeparator) + + return id +} + +func sharedDirectoryParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, sharedDirectoryResourceIDSeparator, 2) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected OWNER_DIRECTORY_ID%[2]sSHARED_DIRECTORY_ID", id, sharedDirectoryResourceIDSeparator) +} + +func findSharedDirectory(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeSharedDirectoriesInput) (*awstypes.SharedDirectory, error) { // nosemgrep:ci.ds-in-func-name + output, err := findSharedDirectories(ctx, conn, input) if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionDeleting, ResNameSharedDirectory, d.Id(), err) + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findSharedDirectories(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeSharedDirectoriesInput) ([]awstypes.SharedDirectory, error) { // nosemgrep:ci.ds-in-func-name + var output []awstypes.SharedDirectory + + pages := directoryservice.NewDescribeSharedDirectoriesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.SharedDirectories...) } - _, err = waitSharedDirectoryDeleted(ctx, conn, dirId, sharedId, d.Timeout(schema.TimeoutDelete)) + return output, nil +} + +func findSharedDirectoryByTwoPartKey(ctx context.Context, conn *directoryservice.Client, ownerDirectoryID, sharedDirectoryID string) (*awstypes.SharedDirectory, error) { // nosemgrep:ci.ds-in-func-name + input := &directoryservice.DescribeSharedDirectoriesInput{ + OwnerDirectoryId: aws.String(ownerDirectoryID), + SharedDirectoryIds: []string{sharedDirectoryID}, + } + + output, err := findSharedDirectory(ctx, conn, input) if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionWaitingForDeletion, ResNameSharedDirectory, d.Id(), err) + return nil, err } - log.Printf("[DEBUG] Unshared Directory Service Directory: %s", output) + if status := output.ShareStatus; status == awstypes.ShareStatusDeleted { + return nil, &retry.NotFoundError{ + Message: string(status), + LastRequest: input, + } + } - return diags + return output, nil +} + +func statusSharedDirectory(ctx context.Context, conn *directoryservice.Client, ownerDirectoryID, sharedDirectoryID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findSharedDirectoryByTwoPartKey(ctx, conn, ownerDirectoryID, sharedDirectoryID) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.ShareStatus), nil + } } -func expandShareTarget(tfMap map[string]interface{}) *directoryservice.ShareTarget { // nosemgrep:ci.ds-in-func-name +func waitSharedDirectoryDeleted(ctx context.Context, conn *directoryservice.Client, ownerDirectoryID, sharedDirectoryID string, timeout time.Duration) (*awstypes.SharedDirectory, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice( + awstypes.ShareStatusDeleting, + awstypes.ShareStatusShared, + awstypes.ShareStatusPendingAcceptance, + awstypes.ShareStatusRejectFailed, + awstypes.ShareStatusRejected, + awstypes.ShareStatusRejecting, + ), + Target: []string{}, + Refresh: statusSharedDirectory(ctx, conn, ownerDirectoryID, sharedDirectoryID), + Timeout: timeout, + MinTimeout: 30 * time.Second, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.SharedDirectory); ok { + return output, err + } + + return nil, err +} + +func expandShareTarget(tfMap map[string]interface{}) *awstypes.ShareTarget { // nosemgrep:ci.ds-in-func-name if tfMap == nil { return nil } - apiObject := &directoryservice.ShareTarget{} + apiObject := &awstypes.ShareTarget{} if v, ok := tfMap[names.AttrID].(string); ok && len(v) > 0 { apiObject.Id = aws.String(v) } if v, ok := tfMap[names.AttrType].(string); ok && len(v) > 0 { - apiObject.Type = aws.String(v) + apiObject.Type = awstypes.TargetType(v) } return apiObject } -func expandUnshareTarget(tfMap map[string]interface{}) *directoryservice.UnshareTarget { +func expandUnshareTarget(tfMap map[string]interface{}) *awstypes.UnshareTarget { if tfMap == nil { return nil } - apiObject := &directoryservice.UnshareTarget{} + apiObject := &awstypes.UnshareTarget{} if v, ok := tfMap[names.AttrID].(string); ok && len(v) > 0 { apiObject.Id = aws.String(v) } if v, ok := tfMap[names.AttrType].(string); ok && len(v) > 0 { - apiObject.Type = aws.String(v) + apiObject.Type = awstypes.TargetType(v) } return apiObject } // flattenShareTarget is not a mirror of expandShareTarget because the API data structures are -// different, with no ShareTarget returned -func flattenShareTarget(apiObject *directoryservice.SharedDirectory) map[string]interface{} { +// different, with no ShareTarget returned. +func flattenShareTarget(apiObject *awstypes.SharedDirectory) map[string]interface{} { if apiObject == nil { return nil } @@ -236,23 +342,10 @@ func flattenShareTarget(apiObject *directoryservice.SharedDirectory) map[string] tfMap := map[string]interface{}{} if apiObject.SharedAccountId != nil { - tfMap[names.AttrID] = aws.StringValue(apiObject.SharedAccountId) + tfMap[names.AttrID] = aws.ToString(apiObject.SharedAccountId) } - tfMap[names.AttrType] = directoryservice.TargetTypeAccount // only type available + tfMap[names.AttrType] = awstypes.TargetTypeAccount // only type available return tfMap } - -func sharedDirectoryID(ownerDirectoryID, sharedDirectoryID string) string { - return fmt.Sprintf("%s/%s", ownerDirectoryID, sharedDirectoryID) -} - -func parseSharedDirectoryID(id string) (string, string, error) { - idParts := strings.SplitN(id, "/", 2) - if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { - return "", "", fmt.Errorf("unexpected format of ID (%q), expected /", id) - } - - return idParts[0], idParts[1], nil -} diff --git a/internal/service/ds/shared_directory_accepter.go b/internal/service/ds/shared_directory_accepter.go index dee7835af99..ab5cc25201e 100644 --- a/internal/service/ds/shared_directory_accepter.go +++ b/internal/service/ds/shared_directory_accepter.go @@ -5,27 +5,25 @@ package ds import ( "context" - "errors" "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - ResNameSharedDirectoryAccepter = "Shared Directory Accepter" -) - -// @SDKResource("aws_directory_service_shared_directory_accepter") -func ResourceSharedDirectoryAccepter() *schema.Resource { +// @SDKResource("aws_directory_service_shared_directory_accepter", name="Shared Directory Accepter") +func resourceSharedDirectoryAccepter() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSharedDirectoryAccepterCreate, ReadWithoutTimeout: resourceSharedDirectoryAccepterRead, @@ -68,33 +66,24 @@ func ResourceSharedDirectoryAccepter() *schema.Resource { func resourceSharedDirectoryAccepterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) - - input := directoryservice.AcceptSharedDirectoryInput{ - SharedDirectoryId: aws.String(d.Get("shared_directory_id").(string)), + sharedDirectoryID := d.Get("shared_directory_id").(string) + input := &directoryservice.AcceptSharedDirectoryInput{ + SharedDirectoryId: aws.String(sharedDirectoryID), } - log.Printf("[DEBUG] Accepting shared directory: %s", input) - - output, err := conn.AcceptSharedDirectoryWithContext(ctx, &input) + output, err := conn.AcceptSharedDirectory(ctx, input) if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionCreating, ResNameSharedDirectoryAccepter, d.Get("shared_directory_id").(string), err) + return sdkdiag.AppendErrorf(diags, "accepting Directory Service Shared Directory (%s): %s", sharedDirectoryID, err) } - if output == nil || output.SharedDirectory == nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionCreating, ResNameSharedDirectoryAccepter, d.Get("shared_directory_id").(string), errors.New("empty output")) - } - - d.SetId(d.Get("shared_directory_id").(string)) - + d.SetId(sharedDirectoryID) d.Set("notes", output.SharedDirectory.ShareNotes) // only available in response to create - _, err = waitDirectoryShared(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) - - if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionWaitingForCreation, ResNameSharedDirectoryAccepter, d.Id(), err) + if _, err := waitSharedDirectoryAccepted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for Directory Service Shared Directory (%s) accept: %s", d.Id(), err) } return append(diags, resourceSharedDirectoryAccepterRead(ctx, d, meta)...) @@ -102,13 +91,18 @@ func resourceSharedDirectoryAccepterCreate(ctx context.Context, d *schema.Resour func resourceSharedDirectoryAccepterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) + dir, err := findSharedDirectoryAccepterByID(ctx, conn, d.Id()) - dir, err := FindDirectoryByID(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] Directory Service Shared Directory Accepter (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionReading, ResNameSharedDirectoryAccepter, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Directory Service Shared Directory Accepter (%s): %s", d.Id(), err) } d.Set("method", dir.ShareMethod) @@ -121,27 +115,74 @@ func resourceSharedDirectoryAccepterRead(ctx context.Context, d *schema.Resource func resourceSharedDirectoryAccepterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).DSClient(ctx) - conn := meta.(*conns.AWSClient).DSConn(ctx) - - log.Printf("[DEBUG] Deleting Directory Service Directory: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, directoryApplicationDeauthorizedPropagationTimeout, func() (interface{}, error) { - return conn.DeleteDirectoryWithContext(ctx, &directoryservice.DeleteDirectoryInput{ + log.Printf("[DEBUG] Deleting Directory Service Shared Directory Accepter: %s", d.Id()) + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ClientException](ctx, directoryApplicationDeauthorizedPropagationTimeout, func() (interface{}, error) { + return conn.DeleteDirectory(ctx, &directoryservice.DeleteDirectoryInput{ DirectoryId: aws.String(d.Id()), }) - }, directoryservice.ErrCodeClientException, "authorized applications") + }, "authorized applications") - if tfawserr.ErrCodeEquals(err, directoryservice.ErrCodeEntityDoesNotExistException) { + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { return diags } if err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionDeleting, ResNameSharedDirectoryAccepter, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting Directory Service Shared Directory Accepter (%s): %s", d.Id(), err) } if _, err := waitDirectoryDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return create.AppendDiagError(diags, names.DS, create.ErrActionWaitingForDeletion, ResNameSharedDirectoryAccepter, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for Directory Service Shared Directory Accepter (%s) delete: %s", d.Id(), err) } return diags } + +func findSharedDirectoryAccepterByID(ctx context.Context, conn *directoryservice.Client, id string) (*awstypes.DirectoryDescription, error) { // nosemgrep:ci.ds-in-func-name + output, err := findDirectoryByID(ctx, conn, id) + + if err != nil { + return nil, err + } + + if output.OwnerDirectoryDescription == nil { + return nil, tfresource.NewEmptyResultError(id) + } + + return output, nil +} + +func statusDirectoryShareStatus(ctx context.Context, conn *directoryservice.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findDirectoryByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.ShareStatus), nil + } +} + +func waitSharedDirectoryAccepted(ctx context.Context, conn *directoryservice.Client, id string, timeout time.Duration) (*awstypes.SharedDirectory, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ShareStatusPendingAcceptance, awstypes.ShareStatusSharing), + Target: enum.Slice(awstypes.ShareStatusShared), + Refresh: statusDirectoryShareStatus(ctx, conn, id), + Timeout: timeout, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.SharedDirectory); ok { + return output, err + } + + return nil, err +} diff --git a/internal/service/ds/shared_directory_accepter_test.go b/internal/service/ds/shared_directory_accepter_test.go index d4abeca1921..1b6c44b17cc 100644 --- a/internal/service/ds/shared_directory_accepter_test.go +++ b/internal/service/ds/shared_directory_accepter_test.go @@ -5,16 +5,15 @@ package ds_test import ( "context" - "errors" + "fmt" "testing" - "github.com/aws/aws-sdk-go/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" tfds "github.com/hashicorp/terraform-provider-aws/internal/service/ds" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -38,7 +37,7 @@ func TestAccDSSharedDirectoryAccepter_basic(t *testing.T) { Config: testAccSharedDirectoryAccepterConfig_basic(rName, domainName), Check: resource.ComposeTestCheckFunc( testAccCheckSharedDirectoryAccepterExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "method", directoryservice.ShareMethodHandshake), + resource.TestCheckResourceAttr(resourceName, "method", string(awstypes.ShareMethodHandshake)), resource.TestCheckResourceAttr(resourceName, "notes", "There were hints and allegations"), resource.TestCheckResourceAttrPair(resourceName, names.AttrOwnerAccountID, "data.aws_caller_identity.current", names.AttrAccountID), resource.TestCheckResourceAttrSet(resourceName, "owner_directory_id"), @@ -61,16 +60,12 @@ func testAccCheckSharedDirectoryAccepterExists(ctx context.Context, n string) re return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { - return create.Error(names.DS, create.ErrActionCheckingExistence, tfds.ResNameSharedDirectoryAccepter, n, errors.New("not found")) + return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return create.Error(names.DS, create.ErrActionCheckingExistence, tfds.ResNameSharedDirectoryAccepter, n, errors.New("no ID is set")) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) - _, err := tfds.FindSharedDirectory(ctx, conn, rs.Primary.Attributes["owner_directory_id"], rs.Primary.Attributes["shared_directory_id"]) + _, err := tfds.FindSharedDirectoryByTwoPartKey(ctx, conn, rs.Primary.Attributes["owner_directory_id"], rs.Primary.Attributes["shared_directory_id"]) return err } diff --git a/internal/service/ds/shared_directory_test.go b/internal/service/ds/shared_directory_test.go index 402229397a9..ecd6eb7dfcc 100644 --- a/internal/service/ds/shared_directory_test.go +++ b/internal/service/ds/shared_directory_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccDSSharedDirectory_basic(t *testing.T) { ctx := acctest.Context(t) - var v directoryservice.SharedDirectory + var v awstypes.SharedDirectory resourceName := "aws_directory_service_shared_directory.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) domainName := acctest.RandomDomainName() @@ -53,20 +53,44 @@ func TestAccDSSharedDirectory_basic(t *testing.T) { }) } -func testAccCheckSharedDirectoryExists(ctx context.Context, n string, v *directoryservice.SharedDirectory) resource.TestCheckFunc { +func TestAccDSSharedDirectory_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.SharedDirectory + resourceName := "aws_directory_service_shared_directory.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + domainName := acctest.RandomDomainName() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckSharedDirectoryDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSharedDirectoryConfig_basic(rName, domainName), + Check: resource.ComposeTestCheckFunc( + testAccCheckSharedDirectoryExists(ctx, resourceName, &v), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfds.ResourceSharedDirectory(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckSharedDirectoryExists(ctx context.Context, n string, v *awstypes.SharedDirectory) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Directory Service Shared Directory ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) - output, err := tfds.FindSharedDirectory(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.Attributes["shared_directory_id"]) + output, err := tfds.FindSharedDirectoryByTwoPartKey(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.Attributes["shared_directory_id"]) if err != nil { return err @@ -80,14 +104,14 @@ func testAccCheckSharedDirectoryExists(ctx context.Context, n string, v *directo func testAccCheckSharedDirectoryDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).DSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_directory_service_shared_directory" { continue } - _, err := tfds.FindSharedDirectory(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.Attributes["shared_directory_id"]) + _, err := tfds.FindSharedDirectoryByTwoPartKey(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.Attributes["shared_directory_id"]) if tfresource.NotFound(err) { continue diff --git a/internal/service/ds/status.go b/internal/service/ds/status.go deleted file mode 100644 index b4af681dd01..00000000000 --- a/internal/service/ds/status.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package ds - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func statusDirectoryStage(ctx context.Context, conn *directoryservice.DirectoryService, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDirectoryByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Stage), nil - } -} - -func statusDirectoryShareStatus(ctx context.Context, conn *directoryservice.DirectoryService, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDirectoryByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.ShareStatus), nil - } -} - -func statusDomainController(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, domainControllerID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDomainController(ctx, conn, directoryID, domainControllerID) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - -func statusRadius(ctx context.Context, conn *directoryservice.DirectoryService, directoryID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindDirectoryByID(ctx, conn, directoryID) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.RadiusStatus), nil - } -} - -func statusRegion(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, regionName string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindRegion(ctx, conn, directoryID, regionName) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - -func statusSharedDirectory(ctx context.Context, conn *directoryservice.DirectoryService, ownerDirectoryID, sharedDirectoryID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindSharedDirectory(ctx, conn, ownerDirectoryID, sharedDirectoryID) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.ShareStatus), nil - } -} diff --git a/internal/service/ds/sweep.go b/internal/service/ds/sweep.go index 14619ad4624..ece22f17c7c 100644 --- a/internal/service/ds/sweep.go +++ b/internal/service/ds/sweep.go @@ -7,13 +7,12 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/go-multierror" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -45,40 +44,36 @@ func sweepDirectories(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.DSConn(ctx) - + conn := client.DSClient(ctx) + input := &directoryservice.DescribeDirectoriesInput{} sweepResources := make([]sweep.Sweepable, 0) - input := &directoryservice.DescribeDirectoriesInput{} - err = describeDirectoriesPages(ctx, conn, input, func(page *directoryservice.DescribeDirectoriesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := directoryservice.NewDescribeDirectoriesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Directory Service Directory sweep for %s: %s", region, err) + return nil } - for _, directory := range page.DirectoryDescriptions { - r := ResourceDirectory() + if err != nil { + return fmt.Errorf("error listing Directory Service Directories (%s): %w", region, err) + } + + for _, v := range page.DirectoryDescriptions { + r := resourceDirectory() d := r.Data(nil) - d.SetId(aws.StringValue(directory.DirectoryId)) + d.SetId(aws.ToString(v.DirectoryId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Directory Service Directory sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("listing Directory Service Directories (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) if err != nil { - return fmt.Errorf("sweeping Directory Service Directories (%s): %w", region, err) + return fmt.Errorf("error sweeping Directory Service Directories (%s): %w", region, err) } return nil @@ -90,71 +85,58 @@ func sweepRegions(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - - conn := client.DSConn(ctx) + conn := client.DSClient(ctx) + input := &directoryservice.DescribeDirectoriesInput{} sweepResources := make([]sweep.Sweepable, 0) - var errs *multierror.Error - input := &directoryservice.DescribeDirectoriesInput{} + pages := directoryservice.NewDescribeDirectoriesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Directory Service Region sweep for %s: %s", region, err) + return nil + } - err = describeDirectoriesPages(ctx, conn, input, func(page *directoryservice.DescribeDirectoriesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + if err != nil { + return fmt.Errorf("error listing Directory Service Directories (%s): %w", region, err) } - for _, directory := range page.DirectoryDescriptions { - if directory == nil { + for _, v := range page.DirectoryDescriptions { + if v.RegionsInfo == nil || len(v.RegionsInfo.AdditionalRegions) == 0 { continue } - if directory.RegionsInfo == nil || len(directory.RegionsInfo.AdditionalRegions) == 0 { - continue + input := &directoryservice.DescribeRegionsInput{ + DirectoryId: v.DirectoryId, } - err := describeRegionsPages(ctx, conn, &directoryservice.DescribeRegionsInput{ - DirectoryId: directory.DirectoryId, - }, func(page *directoryservice.DescribeRegionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := directoryservice.NewDescribeRegionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + continue } - for _, region := range page.RegionsDescription { - if region != nil && aws.StringValue(region.RegionType) != directoryservice.RegionTypePrimary { - r := ResourceRegion() + for _, v := range page.RegionsDescription { + if v.RegionType != awstypes.RegionTypePrimary { + r := resourceRegion() d := r.Data(nil) - d.SetId(RegionCreateResourceID(aws.StringValue(region.DirectoryId), aws.StringValue(region.RegionName))) + d.SetId(regionCreateResourceID(aws.ToString(v.DirectoryId), aws.ToString(v.RegionName))) + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } } - - return !lastPage - }) - - if tfawserr.ErrMessageContains(err, directoryservice.ErrCodeUnsupportedOperationException, "Multi-region replication") { - log.Printf("[INFO] Skipping Directory Service Regions for %s", aws.StringValue(directory.DirectoryId)) - continue - } - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("describing Directory Service Regions for %s: %w", aws.StringValue(directory.DirectoryId), err)) - continue } } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("listing Directory Service Directories for %s: %w", region, err)) } - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("sweeping Directory Service Regions for %s: %w", region, err)) - } + err = sweep.SweepOrchestrator(ctx, sweepResources) - if awsv1.SkipSweepError(errs.ErrorOrNil()) { - log.Printf("[WARN] Skipping Directory Service Regions sweep for %s: %s", region, errs) - return nil + if err != nil { + return fmt.Errorf("error sweeping Directory Service Regions (%s): %w", region, err) } - return errs.ErrorOrNil() + return nil } diff --git a/internal/service/ds/tags_gen.go b/internal/service/ds/tags_gen.go index 6d5db3537d8..a9125322a08 100644 --- a/internal/service/ds/tags_gen.go +++ b/internal/service/ds/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/aws/aws-sdk-go/service/directoryservice/directoryserviceiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/directoryservice" + awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists ds service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn directoryserviceiface.DirectoryServiceAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *directoryservice.Client, identifier string, optFns ...func(*directoryservice.Options)) (tftags.KeyValueTags, error) { input := &directoryservice.ListTagsForResourceInput{ ResourceId: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn directoryserviceiface.DirectoryServiceAP // ListTags lists ds service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).DSConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).DSClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns ds service tags. -func Tags(tags tftags.KeyValueTags) []*directoryservice.Tag { - result := make([]*directoryservice.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &directoryservice.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*directoryservice.Tag { } // KeyValueTags creates tftags.KeyValueTags from directoryservice service tags. -func KeyValueTags(ctx context.Context, tags []*directoryservice.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*directoryservice.Tag) tftags.KeyV // getTagsIn returns ds service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*directoryservice.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,25 +91,25 @@ func getTagsIn(ctx context.Context) []*directoryservice.Tag { } // setTagsOut sets ds service tags in Context. -func setTagsOut(ctx context.Context, tags []*directoryservice.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } // createTags creates ds service tags for new resources. -func createTags(ctx context.Context, conn directoryserviceiface.DirectoryServiceAPI, identifier string, tags []*directoryservice.Tag) error { +func createTags(ctx context.Context, conn *directoryservice.Client, identifier string, tags []awstypes.Tag, optFns ...func(*directoryservice.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates ds service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn directoryserviceiface.DirectoryServiceAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *directoryservice.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*directoryservice.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -120,10 +120,10 @@ func updateTags(ctx context.Context, conn directoryserviceiface.DirectoryService if len(removedTags) > 0 { input := &directoryservice.RemoveTagsFromResourceInput{ ResourceId: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.RemoveTagsFromResourceWithContext(ctx, input) + _, err := conn.RemoveTagsFromResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -138,7 +138,7 @@ func updateTags(ctx context.Context, conn directoryserviceiface.DirectoryService Tags: Tags(updatedTags), } - _, err := conn.AddTagsToResourceWithContext(ctx, input) + _, err := conn.AddTagsToResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -151,5 +151,5 @@ func updateTags(ctx context.Context, conn directoryserviceiface.DirectoryService // UpdateTags updates ds service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).DSConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).DSClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/ds/trust.go b/internal/service/ds/trust.go index 091257f34bc..f591b6947aa 100644 --- a/internal/service/ds/trust.go +++ b/internal/service/ds/trust.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/directoryservice" awstypes "github.com/aws/aws-sdk-go-v2/service/directoryservice/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/path" @@ -21,51 +22,43 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/setplanmodifier" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" "github.com/hashicorp/terraform-provider-aws/internal/framework" - "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" fwvalidators "github.com/hashicorp/terraform-provider-aws/internal/framework/validators" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - trustCreatedTimeout = 10 * time.Minute - trustUpdatedTimeout = 10 * time.Minute - trustDeleteTimeout = 5 * time.Minute -) - -// @FrameworkResource -func newResourceTrust(_ context.Context) (resource.ResourceWithConfigure, error) { - return &resourceTrust{}, nil +// @FrameworkResource(name="Trust") +func newTrustResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &trustResource{}, nil } -const ( - ResNameTrust = "Trust" -) - -type resourceTrust struct { +type trustResource struct { framework.ResourceWithConfigure } -func (r *resourceTrust) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { - resp.TypeName = "aws_directory_service_trust" +func (*trustResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_directory_service_trust" } -func (r *resourceTrust) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { - resp.Schema = schema.Schema{ +func (r *trustResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + trustType := fwtypes.StringEnumType[awstypes.TrustType]() + + response.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "conditional_forwarder_ip_addrs": schema.SetAttribute{ - Optional: true, ElementType: types.StringType, + Optional: true, PlanModifiers: []planmodifier.Set{ setplanmodifier.UseStateForUnknown(), }, @@ -77,7 +70,11 @@ func (r *resourceTrust) Schema(ctx context.Context, req resource.SchemaRequest, }, }, "created_date_time": schema.StringAttribute{ - Computed: true, + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, }, "delete_associated_conditional_forwarder": schema.BoolAttribute{ Computed: true, @@ -95,7 +92,8 @@ func (r *resourceTrust) Schema(ctx context.Context, req resource.SchemaRequest, }, names.AttrID: framework.IDAttribute(), "last_updated_date_time": schema.StringAttribute{ - Computed: true, + CustomType: timetypes.RFC3339Type{}, + Computed: true, }, "remote_domain_name": schema.StringAttribute{ Required: true, @@ -108,20 +106,17 @@ func (r *resourceTrust) Schema(ctx context.Context, req resource.SchemaRequest, }, }, "selective_auth": schema.StringAttribute{ - Optional: true, - Computed: true, - Validators: []validator.String{ - enum.FrameworkValidate[awstypes.SelectiveAuth](), - }, + CustomType: fwtypes.StringEnumType[awstypes.SelectiveAuth](), + Optional: true, + Computed: true, }, "state_last_updated_date_time": schema.StringAttribute{ - Computed: true, + CustomType: timetypes.RFC3339Type{}, + Computed: true, }, "trust_direction": schema.StringAttribute{ - Required: true, - Validators: []validator.String{ - enum.FrameworkValidate[awstypes.TrustDirection](), - }, + CustomType: fwtypes.StringEnumType[awstypes.TrustDirection](), + Required: true, PlanModifiers: []planmodifier.String{ stringplanmodifier.RequiresReplace(), }, @@ -143,146 +138,189 @@ func (r *resourceTrust) Schema(ctx context.Context, req resource.SchemaRequest, Computed: true, }, "trust_type": schema.StringAttribute{ - Optional: true, - Computed: true, - Default: stringdefault.StaticString(string(awstypes.TrustTypeForest)), - Validators: []validator.String{ - enum.FrameworkValidate[awstypes.TrustType](), - }, + CustomType: trustType, + Optional: true, + Computed: true, + Default: trustType.AttributeDefault(awstypes.TrustTypeForest), }, }, } } -func (r *resourceTrust) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { - conn := r.Meta().DSClient(ctx) - - var plan resourceTrustData - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { +func (r *trustResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data trustResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - directoryID := plan.DirectoryID.ValueString() + conn := r.Meta().DSClient(ctx) - input := plan.createInput(ctx) + directoryID := data.DirectoryID.ValueString() + input := &directoryservice.CreateTrustInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } output, err := conn.CreateTrust(ctx, input) + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.DS, create.ErrActionCreating, ResNameTrust, directoryID, nil), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("creating Directory Service Trust (%s)", directoryID), err.Error()) + return } - state := plan - state.ID = types.StringValue(aws.ToString(output.TrustId)) + // Set values for unknowns. + trustID := aws.ToString(output.TrustId) + data.ID = types.StringValue(trustID) - // When Trust Direction is `One-Way: Incoming`, the Trust terminates at Created. Otherwise, it terminates at Verified + // When Trust Direction is `One-Way: Incoming`, the Trust terminates at Created. Otherwise, it terminates at Verified. + const ( + timeout = 10 * time.Minute + ) var trust *awstypes.Trust - if plan.TrustDirection.ValueString() == string(awstypes.TrustDirectionOneWayIncoming) { - trust, err = waitTrustCreated(ctx, conn, state.DirectoryID.ValueString(), state.ID.ValueString(), trustCreatedTimeout) + if data.TrustDirection.ValueEnum() == awstypes.TrustDirectionOneWayIncoming { + trust, err = waitTrustCreated(ctx, conn, directoryID, trustID, timeout) } else { - trust, err = waitTrustVerified(ctx, conn, state.DirectoryID.ValueString(), state.ID.ValueString(), trustCreatedTimeout) + trust, err = waitTrustVerified(ctx, conn, directoryID, trustID, timeout) } + if err != nil { - resp.Diagnostics.Append(create.DiagErrorFramework(names.DS, create.ErrActionCreating, ResNameTrust, state.ID.ValueString(), err)) + response.Diagnostics.AddError(fmt.Sprintf("waiting for Directory Service Trust (%s) create", data.ID.ValueString()), err.Error()) + return } - state.update(ctx, trust) + // Set values for unknowns after creation is complete. + response.Diagnostics.Append(fwflex.Flatten(ctx, trust, &data)...) + if response.Diagnostics.HasError() { + return + } - resp.Diagnostics.Append(resp.State.Set(ctx, state)...) + response.Diagnostics.Append(response.State.Set(ctx, data)...) } -func (r *resourceTrust) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { - conn := r.Meta().DSClient(ctx) - - var data resourceTrustData - resp.Diagnostics.Append(req.State.Get(ctx, &data)...) - if resp.Diagnostics.HasError() { +func (r *trustResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data trustResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - trust, err := FindTrustByID(ctx, conn, data.DirectoryID.ValueString(), data.ID.ValueString()) + conn := r.Meta().DSClient(ctx) + + directoryID := data.DirectoryID.ValueString() + trustID := data.ID.ValueString() + trust, err := findTrustByTwoPartKey(ctx, conn, directoryID, trustID) + if tfresource.NotFound(err) { - resp.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) - resp.State.RemoveResource(ctx) + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + return } + if err != nil { - resp.Diagnostics.Append(create.DiagErrorFramework(names.DS, create.ErrActionReading, ResNameTrust, data.ID.ValueString(), err)) + response.Diagnostics.AddError(fmt.Sprintf("reading Directory Service Trust (%s)", trustID), err.Error()) + return } - data.update(ctx, trust) - - forwarder, err := findConditionalForwarder(ctx, conn, data.DirectoryID.ValueString(), data.RemoteDomainName.ValueString()) - if err != nil { - resp.Diagnostics.Append(create.DiagErrorFramework(names.DS, create.ErrActionReading, ResNameTrust, data.ID.ValueString(), fmt.Errorf("reading Conditional Forwarder: %w", err))) + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, trust, &data)...) + if response.Diagnostics.HasError() { return } - data.updateConditionalForwarder(ctx, forwarder) - - resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) -} + // Directory Trust optionally accepts a remote domain name with a trailing period. + domainName := strings.TrimRight(data.RemoteDomainName.ValueString(), ".") + forwarder, err := findConditionalForwarderByTwoPartKey(ctx, conn, directoryID, domainName) -func (r *resourceTrust) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { - var state, config, plan resourceTrustData + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Directory Service Conditional Forwarder (%s)", conditionalForwarderCreateResourceID(directoryID, domainName)), err.Error()) - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { return } - resp.Diagnostics.Append(req.Config.Get(ctx, &config)...) - if resp.Diagnostics.HasError() { + data.ConditionalForwarderIPAddrs = fwflex.FlattenFrameworkStringValueSet(ctx, forwarder.DnsIpAddrs) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *trustResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new trustResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { return } - - resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if resp.Diagnostics.HasError() { + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { return } conn := r.Meta().DSClient(ctx) - if !plan.SelectiveAuth.IsUnknown() && !state.SelectiveAuth.Equal(plan.SelectiveAuth) { - params := plan.updateInput(ctx) + directoryID := new.DirectoryID.ValueString() + trustID := new.ID.ValueString() + + if !new.SelectiveAuth.IsUnknown() && !old.SelectiveAuth.Equal(new.SelectiveAuth) { + input := &directoryservice.UpdateTrustInput{ + SelectiveAuth: new.SelectiveAuth.ValueEnum(), + TrustId: aws.String(trustID), + } + + _, err := conn.UpdateTrust(ctx, input) - _, err := conn.UpdateTrust(ctx, params) if err != nil { - resp.Diagnostics.AddError( - fmt.Sprintf("updating Cognito User Pool Client (%s)", plan.ID.ValueString()), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("updating Directory Service Trust (%s)", trustID), err.Error()) + return } - trust, err := waitTrustUpdated(ctx, conn, state.DirectoryID.ValueString(), state.ID.ValueString(), trustUpdatedTimeout) + const ( + timeout = 10 * time.Minute + ) + trust, err := waitTrustUpdated(ctx, conn, directoryID, trustID, timeout) + if err != nil { - resp.Diagnostics.Append(create.DiagErrorFramework(names.DS, create.ErrActionUpdating, ResNameTrust, state.ID.ValueString(), err)) + response.Diagnostics.AddError(fmt.Sprintf("waiting for Directory Service Trust (%s) update", trustID), err.Error()) + return } - state.update(ctx, trust) - } + response.Diagnostics.Append(fwflex.Flatten(ctx, trust, &new)...) + if response.Diagnostics.HasError() { + return + } + } else { + // Set values for unknowns. + new.LastUpdatedDateTime = old.LastUpdatedDateTime + new.SelectiveAuth = old.SelectiveAuth + new.StateLastUpdatedDateTime = old.StateLastUpdatedDateTime + new.TrustState = old.TrustState + new.TrustStateReason = old.TrustStateReason + } + + if !new.ConditionalForwarderIPAddrs.IsUnknown() && !old.ConditionalForwarderIPAddrs.Equal(new.ConditionalForwarderIPAddrs) { + input := &directoryservice.UpdateConditionalForwarderInput{ + DirectoryId: aws.String(directoryID), + DnsIpAddrs: fwflex.ExpandFrameworkStringValueSet(ctx, new.ConditionalForwarderIPAddrs), + RemoteDomainName: fwflex.StringFromFramework(ctx, new.RemoteDomainName), + } - if !plan.ConditionalForwarderIpAddrs.IsUnknown() && !state.ConditionalForwarderIpAddrs.Equal(plan.ConditionalForwarderIpAddrs) { - params := plan.updateConditionalForwarderInput(ctx) + _, err := conn.UpdateConditionalForwarder(ctx, input) - _, err := conn.UpdateConditionalForwarder(ctx, params) if err != nil { - resp.Diagnostics.AddError( - fmt.Sprintf("updating Cognito User Pool Client (%s) conditional forwarder IPs", plan.ID.ValueString()), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("updating Directory Service Conditional Forwarder (%s)", conditionalForwarderCreateResourceID(new.DirectoryID.ValueString(), new.RemoteDomainName.ValueString())), err.Error()) + return } - forwarder, err := findConditionalForwarder(ctx, conn, plan.DirectoryID.ValueString(), plan.RemoteDomainName.ValueString()) + // Directory Trust optionally accepts a remote domain name with a trailing period. + domainName := strings.TrimRight(new.RemoteDomainName.ValueString(), ".") + forwarder, err := findConditionalForwarderByTwoPartKey(ctx, conn, directoryID, domainName) + if err != nil { // Outputting a NotFoundError does not include the original error. // Retrieve it to give the user an actionalble error message. @@ -291,53 +329,56 @@ func (r *resourceTrust) Update(ctx context.Context, req resource.UpdateRequest, err = nfe.LastError } } - resp.Diagnostics.Append(create.DiagErrorFramework(names.DS, create.ErrActionReading, ResNameTrust, plan.ID.ValueString(), fmt.Errorf("reading Conditional Forwarder: %w", err))) + + response.Diagnostics.AddError(fmt.Sprintf("reading Directory Service Conditional Forwarder (%s)", conditionalForwarderCreateResourceID(directoryID, domainName)), err.Error()) + return } - state.updateConditionalForwarder(ctx, forwarder) + new.ConditionalForwarderIPAddrs = fwflex.FlattenFrameworkStringValueSet(ctx, forwarder.DnsIpAddrs) } - resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + response.Diagnostics.Append(response.State.Set(ctx, &new)...) } -func (r *resourceTrust) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { - var state resourceTrustData - resp.Diagnostics.Append(req.State.Get(ctx, &state)...) - if resp.Diagnostics.HasError() { +func (r *trustResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data trustResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { return } - params := state.deleteInput(ctx) - conn := r.Meta().DSClient(ctx) - _, err := conn.DeleteTrust(ctx, params) - if isTrustNotFoundErr(err) { + _, err := conn.DeleteTrust(ctx, &directoryservice.DeleteTrustInput{ + DeleteAssociatedConditionalForwarder: data.DeleteAssociatedConditionalForwarder.ValueBool(), + TrustId: fwflex.StringFromFramework(ctx, data.ID), + }) + + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { return } + if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.DS, create.ErrActionDeleting, ResNameTrust, state.ID.ValueString(), err), - err.Error(), - ) + response.Diagnostics.AddError(fmt.Sprintf("deleting Directory Service Trust (%s)", data.ID.ValueString()), err.Error()) + return } - _, err = waitTrustDeleted(ctx, conn, state.DirectoryID.ValueString(), state.ID.ValueString(), trustDeleteTimeout) - if err != nil { - resp.Diagnostics.AddError( - create.ProblemStandardMessage(names.DS, create.ErrActionDeleting, ResNameTrust, state.ID.ValueString(), fmt.Errorf("waiting for completion: %w", err)), - err.Error(), - ) + const ( + timeout = 5 * time.Minute + ) + if _, err := waitTrustDeleted(ctx, conn, data.DirectoryID.ValueString(), data.ID.ValueString(), timeout); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for Directory Service Trust (%s) delete", data.ID.ValueString()), err.Error()) + return } } -func (r *resourceTrust) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { - parts := strings.Split(req.ID, "/") +func (r *trustResource) ImportState(ctx context.Context, request resource.ImportStateRequest, response *resource.ImportStateResponse) { + parts := strings.Split(request.ID, "/") if len(parts) != 2 { - resp.Diagnostics.AddError("Resource Import Invalid ID", fmt.Sprintf("Wrong format for import ID (%s), use: 'directory-id/remote-directory-domain'", req.ID)) + response.Diagnostics.AddError("Resource Import Invalid ID", fmt.Sprintf("Wrong format for import ID (%s), use: 'directory-id/remote-directory-domain'", request.ID)) return } directoryID := parts[0] @@ -345,161 +386,110 @@ func (r *resourceTrust) ImportState(ctx context.Context, req resource.ImportStat trust, err := findTrustByDomain(ctx, r.Meta().DSClient(ctx), directoryID, domain) if err != nil { - resp.Diagnostics.AddError( + response.Diagnostics.AddError( "Importing Resource", err.Error(), ) } - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root(names.AttrID), aws.ToString(trust.TrustId))...) - resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("directory_id"), directoryID)...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root(names.AttrID), aws.ToString(trust.TrustId))...) + response.Diagnostics.Append(response.State.SetAttribute(ctx, path.Root("directory_id"), directoryID)...) } -type resourceTrustData struct { - ConditionalForwarderIpAddrs types.Set `tfsdk:"conditional_forwarder_ip_addrs"` - CreatedDateTime types.String `tfsdk:"created_date_time"` - DeleteAssociatedConditionalForwarder types.Bool `tfsdk:"delete_associated_conditional_forwarder"` - DirectoryID types.String `tfsdk:"directory_id"` - ID types.String `tfsdk:"id"` - LastUpdatedDateTime types.String `tfsdk:"last_updated_date_time"` - RemoteDomainName types.String `tfsdk:"remote_domain_name"` - SelectiveAuth types.String `tfsdk:"selective_auth"` - StateLastUpdatedDateTime types.String `tfsdk:"state_last_updated_date_time"` - TrustDirection types.String `tfsdk:"trust_direction"` - TrustPassword types.String `tfsdk:"trust_password"` - TrustState types.String `tfsdk:"trust_state"` - TrustStateReason types.String `tfsdk:"trust_state_reason"` - TrustType types.String `tfsdk:"trust_type"` +type trustResourceModel struct { + ConditionalForwarderIPAddrs types.Set `tfsdk:"conditional_forwarder_ip_addrs"` + CreatedDateTime timetypes.RFC3339 `tfsdk:"created_date_time"` + DeleteAssociatedConditionalForwarder types.Bool `tfsdk:"delete_associated_conditional_forwarder"` + DirectoryID types.String `tfsdk:"directory_id"` + ID types.String `tfsdk:"id"` + LastUpdatedDateTime timetypes.RFC3339 `tfsdk:"last_updated_date_time"` + RemoteDomainName types.String `tfsdk:"remote_domain_name"` + SelectiveAuth fwtypes.StringEnum[awstypes.SelectiveAuth] `tfsdk:"selective_auth"` + StateLastUpdatedDateTime timetypes.RFC3339 `tfsdk:"state_last_updated_date_time"` + TrustDirection fwtypes.StringEnum[awstypes.TrustDirection] `tfsdk:"trust_direction"` + TrustPassword types.String `tfsdk:"trust_password"` + TrustState types.String `tfsdk:"trust_state"` + TrustStateReason types.String `tfsdk:"trust_state_reason"` + TrustType fwtypes.StringEnum[awstypes.TrustType] `tfsdk:"trust_type"` } -func (data resourceTrustData) createInput(ctx context.Context) *directoryservice.CreateTrustInput { - return &directoryservice.CreateTrustInput{ - ConditionalForwarderIpAddrs: flex.ExpandFrameworkStringValueSet(ctx, data.ConditionalForwarderIpAddrs), - DirectoryId: flex.StringFromFramework(ctx, data.DirectoryID), - RemoteDomainName: flex.StringFromFramework(ctx, data.RemoteDomainName), - SelectiveAuth: stringlikeValueFromFramework[awstypes.SelectiveAuth](ctx, data.SelectiveAuth), - TrustDirection: stringlikeValueFromFramework[awstypes.TrustDirection](ctx, data.TrustDirection), - TrustPassword: flex.StringFromFramework(ctx, data.TrustPassword), - TrustType: stringlikeValueFromFramework[awstypes.TrustType](ctx, data.TrustType), - } -} +func findTrust(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeTrustsInput, filter tfslices.Predicate[*awstypes.Trust]) (*awstypes.Trust, error) { + output, err := findTrusts(ctx, conn, input, filter) -func (data resourceTrustData) updateInput(ctx context.Context) *directoryservice.UpdateTrustInput { - return &directoryservice.UpdateTrustInput{ - TrustId: flex.StringFromFramework(ctx, data.ID), - SelectiveAuth: stringlikeValueFromFramework[awstypes.SelectiveAuth](ctx, data.SelectiveAuth), + if err != nil { + return nil, err } -} -func (data resourceTrustData) updateConditionalForwarderInput(ctx context.Context) *directoryservice.UpdateConditionalForwarderInput { - return &directoryservice.UpdateConditionalForwarderInput{ - DirectoryId: flex.StringFromFramework(ctx, data.DirectoryID), - RemoteDomainName: flex.StringFromFramework(ctx, data.RemoteDomainName), - DnsIpAddrs: flex.ExpandFrameworkStringValueSet(ctx, data.ConditionalForwarderIpAddrs), - } + return tfresource.AssertSingleValueResult(output) } -func (data resourceTrustData) deleteInput(ctx context.Context) *directoryservice.DeleteTrustInput { - return &directoryservice.DeleteTrustInput{ - TrustId: flex.StringFromFramework(ctx, data.ID), - DeleteAssociatedConditionalForwarder: data.DeleteAssociatedConditionalForwarder.ValueBool(), - } -} +func findTrusts(ctx context.Context, conn *directoryservice.Client, input *directoryservice.DescribeTrustsInput, filter tfslices.Predicate[*awstypes.Trust]) ([]awstypes.Trust, error) { + var output []awstypes.Trust -func (data *resourceTrustData) update(ctx context.Context, in *awstypes.Trust) { - data.CreatedDateTime = flex.StringValueToFramework(ctx, in.CreatedDateTime.Format(time.RFC3339)) - data.LastUpdatedDateTime = flex.StringValueToFramework(ctx, in.LastUpdatedDateTime.Format(time.RFC3339)) - data.RemoteDomainName = flex.StringToFramework(ctx, in.RemoteDomainName) - data.SelectiveAuth = flex.StringValueToFramework(ctx, in.SelectiveAuth) - data.StateLastUpdatedDateTime = flex.StringValueToFramework(ctx, in.StateLastUpdatedDateTime.Format(time.RFC3339)) - data.TrustDirection = flex.StringValueToFramework(ctx, in.TrustDirection) - // TrustPassword is not returned - data.TrustState = flex.StringValueToFramework(ctx, in.TrustState) - data.TrustStateReason = flex.StringToFramework(ctx, in.TrustStateReason) - data.TrustType = flex.StringValueToFramework(ctx, in.TrustType) -} + pages := directoryservice.NewDescribeTrustsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) -func (data *resourceTrustData) updateConditionalForwarder(ctx context.Context, in *awstypes.ConditionalForwarder) { - data.ConditionalForwarderIpAddrs = flex.FlattenFrameworkStringValueSet(ctx, in.DnsIpAddrs) -} + if errs.IsA[*awstypes.EntityDoesNotExistException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } -func stringlikeValueFromFramework[T ~string](_ context.Context, v types.String) T { - if v.IsNull() || v.IsUnknown() { - return "" + for _, v := range page.Trusts { + if filter(&v) { + output = append(output, v) + } + } } - return T(v.ValueString()) + return output, nil } -func FindTrustByID(ctx context.Context, conn directoryservice.DescribeTrustsAPIClient, directoryID, trustID string) (*awstypes.Trust, error) { +func findTrustByTwoPartKey(ctx context.Context, conn *directoryservice.Client, directoryID, trustID string) (*awstypes.Trust, error) { input := &directoryservice.DescribeTrustsInput{ DirectoryId: aws.String(directoryID), TrustIds: []string{trustID}, } - output, err := conn.DescribeTrusts(ctx, input) - if isTrustNotFoundErr(err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - trust, err := tfresource.AssertSingleValueResult(output.Trusts) - if err != nil { - return nil, err - } - return trust, nil + return findTrust(ctx, conn, input, tfslices.PredicateTrue[*awstypes.Trust]()) } -func findTrustByDomain(ctx context.Context, conn directoryservice.DescribeTrustsAPIClient, directoryID, domain string) (*awstypes.Trust, error) { +func findTrustByDomain(ctx context.Context, conn *directoryservice.Client, directoryID, domain string) (*awstypes.Trust, error) { input := &directoryservice.DescribeTrustsInput{ DirectoryId: aws.String(directoryID), } - var results []awstypes.Trust - paginator := directoryservice.NewDescribeTrustsPaginator(conn, input) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) - if err != nil { - return nil, err + return findTrust(ctx, conn, input, func(v *awstypes.Trust) bool { + return aws.ToString(v.RemoteDomainName) == domain + }) +} + +func statusTrust(ctx context.Context, conn *directoryservice.Client, directoryID, trustID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findTrustByTwoPartKey(ctx, conn, directoryID, trustID) + + if tfresource.NotFound(err) { + return nil, "", nil } - for _, trust := range page.Trusts { - if aws.ToString(trust.RemoteDomainName) == domain { - results = append(results, trust) - } + if err != nil { + return nil, "", err } - } - trust, err := tfresource.AssertSingleValueResult(results) - if err != nil { - return nil, err + return output, string(output.TrustState), nil } - return trust, nil -} - -func isTrustNotFoundErr(err error) bool { - return errs.IsA[*awstypes.EntityDoesNotExistException](err) -} - -func isConditionalForwarderNotFoundErr(err error) bool { - return errs.IsA[*awstypes.EntityDoesNotExistException](err) } -// waitTrustCreated waits until a Trust is created. -func waitTrustCreated(ctx context.Context, conn directoryservice.DescribeTrustsAPIClient, directoryID, trustID string, timeout time.Duration) (*awstypes.Trust, error) { +func waitTrustCreated(ctx context.Context, conn *directoryservice.Client, directoryID, trustID string, timeout time.Duration) (*awstypes.Trust, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice( - awstypes.TrustStateCreating, - ), - Target: enum.Slice( - awstypes.TrustStateCreated, - ), + Pending: enum.Slice(awstypes.TrustStateCreating), + Target: enum.Slice(awstypes.TrustStateCreated), Refresh: statusTrust(ctx, conn, directoryID, trustID), Timeout: timeout, } @@ -522,15 +512,14 @@ func waitTrustCreated(ctx context.Context, conn directoryservice.DescribeTrustsA return nil, err } -// waitTrustVerified waits until a Trust is created and verified. -// On first side of a Two-Way Trust relationship, `VerifyFailed` is expected. This then gets updated when the second side is created. -func waitTrustVerified(ctx context.Context, conn directoryservice.DescribeTrustsAPIClient, directoryID, trustID string, timeout time.Duration) (*awstypes.Trust, error) { +func waitTrustVerified(ctx context.Context, conn *directoryservice.Client, directoryID, trustID string, timeout time.Duration) (*awstypes.Trust, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice( awstypes.TrustStateCreating, awstypes.TrustStateCreated, awstypes.TrustStateVerifying, ), + // On first side of a Two-Way Trust relationship, `VerifyFailed` is expected. This then gets updated when the second side is created. Target: enum.Slice( awstypes.TrustStateVerified, awstypes.TrustStateVerifyFailed, @@ -557,7 +546,7 @@ func waitTrustVerified(ctx context.Context, conn directoryservice.DescribeTrusts return nil, err } -func waitTrustUpdated(ctx context.Context, conn directoryservice.DescribeTrustsAPIClient, directoryID, trustID string, timeout time.Duration) (*awstypes.Trust, error) { +func waitTrustUpdated(ctx context.Context, conn *directoryservice.Client, directoryID, trustID string, timeout time.Duration) (*awstypes.Trust, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice( awstypes.TrustStateUpdating, @@ -590,7 +579,7 @@ func waitTrustUpdated(ctx context.Context, conn directoryservice.DescribeTrustsA return nil, err } -func waitTrustDeleted(ctx context.Context, conn directoryservice.DescribeTrustsAPIClient, directoryID, trustID string, timeout time.Duration) (*awstypes.Trust, error) { +func waitTrustDeleted(ctx context.Context, conn *directoryservice.Client, directoryID, trustID string, timeout time.Duration) (*awstypes.Trust, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice( awstypes.TrustStateCreated, @@ -619,49 +608,3 @@ func waitTrustDeleted(ctx context.Context, conn directoryservice.DescribeTrustsA return nil, err } - -func statusTrust(ctx context.Context, conn directoryservice.DescribeTrustsAPIClient, directoryID, trustID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindTrustByID(ctx, conn, directoryID, trustID) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, string(output.TrustState), nil - } -} - -func findConditionalForwarder(ctx context.Context, conn *directoryservice.Client, directoryID, remoteDomainName string) (*awstypes.ConditionalForwarder, error) { - // Directory Trust optionally accepts a remote domain name with a trailing period. - // Conditional Forwarders - remoteDomainName = strings.TrimRight(remoteDomainName, ".") - - input := &directoryservice.DescribeConditionalForwardersInput{ - DirectoryId: aws.String(directoryID), - RemoteDomainNames: []string{remoteDomainName}, - } - - output, err := conn.DescribeConditionalForwarders(ctx, input) - if isConditionalForwarderNotFoundErr(err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - forwarder, err := tfresource.AssertSingleValueResult(output.ConditionalForwarders) - if err != nil { - return nil, err - } - - return forwarder, nil -} diff --git a/internal/service/ds/trust_test.go b/internal/service/ds/trust_test.go index 9d588cd32d4..7201b2d4b37 100644 --- a/internal/service/ds/trust_test.go +++ b/internal/service/ds/trust_test.go @@ -71,6 +71,35 @@ func TestAccDSTrust_basic(t *testing.T) { }) } +func TestAccDSTrust_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v awstypes.Trust + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_directory_service_trust.test" + domainName := acctest.RandomDomainName() + domainNameOther := acctest.RandomDomainName() + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckDirectoryService(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.DSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustConfig_basic(rName, domainName, domainNameOther), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTrustExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfds.ResourceTrust, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccDSTrust_Domain_TrailingPeriod(t *testing.T) { ctx := acctest.Context(t) var v awstypes.Trust @@ -490,7 +519,7 @@ func testAccCheckTrustExists(ctx context.Context, n string, v *awstypes.Trust) r conn := acctest.Provider.Meta().(*conns.AWSClient).DSClient(ctx) - output, err := tfds.FindTrustByID(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.ID) + output, err := tfds.FindTrustByTwoPartKey(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.ID) if err != nil { return err @@ -511,7 +540,7 @@ func testAccCheckTrustDestroy(ctx context.Context) resource.TestCheckFunc { continue } - _, err := tfds.FindTrustByID(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.ID) + _, err := tfds.FindTrustByTwoPartKey(ctx, conn, rs.Primary.Attributes["directory_id"], rs.Primary.ID) if tfresource.NotFound(err) { continue diff --git a/internal/service/ds/validate_test.go b/internal/service/ds/validate_test.go index 8208339185e..bf44b10396f 100644 --- a/internal/service/ds/validate_test.go +++ b/internal/service/ds/validate_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package ds_test +package ds import ( "context" @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/schema/validator" "github.com/hashicorp/terraform-plugin-framework/types" - tfds "github.com/hashicorp/terraform-provider-aws/internal/service/ds" ) func TestDirectoryIDValidator(t *testing.T) { @@ -67,7 +66,7 @@ func TestDirectoryIDValidator(t *testing.T) { ConfigValue: test.val, } response := validator.StringResponse{} - tfds.DirectoryIDValidator.ValidateString(ctx, request, &response) + directoryIDValidator.ValidateString(ctx, request, &response) if diff := cmp.Diff(response.Diagnostics, test.expectedDiagnostics); diff != "" { t.Errorf("unexpected diagnostics difference: %s", diff) @@ -121,7 +120,7 @@ func TestDomainWithTrailingDotValidatorValidator(t *testing.T) { ConfigValue: test.val, } response := validator.StringResponse{} - tfds.DomainWithTrailingDotValidator.ValidateString(ctx, request, &response) + domainWithTrailingDotValidator.ValidateString(ctx, request, &response) if diff := cmp.Diff(response.Diagnostics, test.expectedDiagnostics); diff != "" { t.Errorf("unexpected diagnostics difference: %s", diff) @@ -172,7 +171,7 @@ func TestTrustPasswordValidator(t *testing.T) { ConfigValue: test.val, } response := validator.StringResponse{} - tfds.TrustPasswordValidator.ValidateString(ctx, request, &response) + trustPasswordValidator.ValidateString(ctx, request, &response) if diff := cmp.Diff(response.Diagnostics, test.expectedDiagnostics); diff != "" { t.Errorf("unexpected diagnostics difference: %s", diff) diff --git a/internal/service/ds/wait.go b/internal/service/ds/wait.go deleted file mode 100644 index fbba0196e2e..00000000000 --- a/internal/service/ds/wait.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package ds - -import ( - "context" - "errors" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/directoryservice" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func waitDomainControllerCreated(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, domainControllerID string, timeout time.Duration) (*directoryservice.DomainController, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{directoryservice.DomainControllerStatusCreating}, - Target: []string{directoryservice.DomainControllerStatusActive}, - Refresh: statusDomainController(ctx, conn, directoryID, domainControllerID), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*directoryservice.DomainController); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) - - return output, err - } - - return nil, err -} - -func waitDomainControllerDeleted(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, domainControllerID string, timeout time.Duration) (*directoryservice.DomainController, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{directoryservice.DomainControllerStatusDeleting}, - Target: []string{}, - Refresh: statusDomainController(ctx, conn, directoryID, domainControllerID), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*directoryservice.DomainController); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) - - return output, err - } - - return nil, err -} - -func waitRadiusCompleted(ctx context.Context, conn *directoryservice.DirectoryService, directoryID string, timeout time.Duration) (*directoryservice.DirectoryDescription, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{directoryservice.RadiusStatusCreating}, - Target: []string{directoryservice.RadiusStatusCompleted}, - Refresh: statusRadius(ctx, conn, directoryID), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*directoryservice.DirectoryDescription); ok { - return output, err - } - - return nil, err -} - -func waitRegionCreated(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, regionName string, timeout time.Duration) (*directoryservice.RegionDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{directoryservice.DirectoryStageRequested, directoryservice.DirectoryStageCreating, directoryservice.DirectoryStageCreated}, - Target: []string{directoryservice.DirectoryStageActive}, - Refresh: statusRegion(ctx, conn, directoryID, regionName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*directoryservice.RegionDescription); ok { - return output, err - } - - return nil, err -} - -func waitRegionDeleted(ctx context.Context, conn *directoryservice.DirectoryService, directoryID, regionName string, timeout time.Duration) (*directoryservice.RegionDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{directoryservice.DirectoryStageActive, directoryservice.DirectoryStageDeleting}, - Target: []string{}, - Refresh: statusRegion(ctx, conn, directoryID, regionName), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*directoryservice.RegionDescription); ok { - return output, err - } - - return nil, err -} - -func waitSharedDirectoryDeleted(ctx context.Context, conn *directoryservice.DirectoryService, ownerDirectoryID, sharedDirectoryID string, timeout time.Duration) (*directoryservice.SharedDirectory, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ - directoryservice.ShareStatusDeleting, - directoryservice.ShareStatusShared, - directoryservice.ShareStatusPendingAcceptance, - directoryservice.ShareStatusRejectFailed, - directoryservice.ShareStatusRejected, - directoryservice.ShareStatusRejecting, - }, - Target: []string{}, - Refresh: statusSharedDirectory(ctx, conn, ownerDirectoryID, sharedDirectoryID), - Timeout: timeout, - MinTimeout: 30 * time.Second, - ContinuousTargetOccurence: 2, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*directoryservice.SharedDirectory); ok { - return output, err - } - - return nil, err -} - -func waitDirectoryShared(ctx context.Context, conn *directoryservice.DirectoryService, id string, timeout time.Duration) (*directoryservice.SharedDirectory, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{directoryservice.ShareStatusPendingAcceptance, directoryservice.ShareStatusSharing}, - Target: []string{directoryservice.ShareStatusShared}, - Refresh: statusDirectoryShareStatus(ctx, conn, id), - Timeout: timeout, - ContinuousTargetOccurence: 2, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*directoryservice.SharedDirectory); ok { - return output, err - } - - return nil, err -} diff --git a/internal/service/dynamodb/kinesis_streaming_destination.go b/internal/service/dynamodb/kinesis_streaming_destination.go index 2668419fd64..c2577e6a438 100644 --- a/internal/service/dynamodb/kinesis_streaming_destination.go +++ b/internal/service/dynamodb/kinesis_streaming_destination.go @@ -128,6 +128,10 @@ func resourceKinesisStreamingDestinationDelete(ctx context.Context, d *schema.Re return diags } + if err != nil { + return sdkdiag.AppendErrorf(diags, "disabling DynamoDB Kinesis Streaming Destination (%s): %s", d.Id(), err) + } + log.Printf("[DEBUG] Deleting DynamoDB Kinesis Streaming Destination: %s", d.Id()) _, err = conn.DisableKinesisStreamingDestination(ctx, &dynamodb.DisableKinesisStreamingDestinationInput{ TableName: aws.String(tableName), diff --git a/internal/service/dynamodb/service_endpoint_resolver_gen.go b/internal/service/dynamodb/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..49fe698597c --- /dev/null +++ b/internal/service/dynamodb/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package dynamodb + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + dynamodb_sdkv2 "github.com/aws/aws-sdk-go-v2/service/dynamodb" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ dynamodb_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver dynamodb_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: dynamodb_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params dynamodb_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up dynamodb endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*dynamodb_sdkv2.Options) { + return func(o *dynamodb_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/dynamodb/service_endpoints_gen_test.go b/internal/service/dynamodb/service_endpoints_gen_test.go index 00ed085b88b..62835b5ca52 100644 --- a/internal/service/dynamodb/service_endpoints_gen_test.go +++ b/internal/service/dynamodb/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -90,7 +92,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -330,7 +332,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -351,24 +353,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := dynamodb_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), dynamodb_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := dynamodb_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), dynamodb_sdkv2.EndpointParameters{ @@ -376,14 +378,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -453,16 +455,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/dynamodb/service_package.go b/internal/service/dynamodb/service_package.go index 8ac0ae0fcfc..f9589244928 100644 --- a/internal/service/dynamodb/service_package.go +++ b/internal/service/dynamodb/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/dynamodb" awstypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,24 +19,16 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*dynamodb.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return dynamodb.NewFromConfig(cfg, func(o *dynamodb.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "Subscriber limit exceeded:") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return dynamodb.NewFromConfig(cfg, + dynamodb.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *dynamodb.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.LimitExceededException](err, "Subscriber limit exceeded:") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/dynamodb/service_package_gen.go b/internal/service/dynamodb/service_package_gen.go index e8054560af4..68ba88bcbb3 100644 --- a/internal/service/dynamodb/service_package_gen.go +++ b/internal/service/dynamodb/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package dynamodb diff --git a/internal/service/dynamodb/table.go b/internal/service/dynamodb/table.go index f43c6044f75..c7e238e5727 100644 --- a/internal/service/dynamodb/table.go +++ b/internal/service/dynamodb/table.go @@ -17,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/dynamodb" awstypes "github.com/aws/aws-sdk-go-v2/service/dynamodb/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -116,6 +117,7 @@ func resourceTable() *schema.Resource { // https://github.com/hashicorp/terraform-provider-aws/issues/25214 return old.(string) != new.(string) && new.(string) != "" }), + validateTTLCustomDiff, verify.SetTagsDiff, ), @@ -450,7 +452,7 @@ func resourceTable() *schema.Resource { Schema: map[string]*schema.Schema{ "attribute_name": { Type: schema.TypeString, - Required: true, + Optional: true, }, names.AttrEnabled: { Type: schema.TypeBool, @@ -2389,3 +2391,64 @@ func validateProvisionedThroughputField(diff *schema.ResourceDiff, key string) e } return nil } + +func validateTTLCustomDiff(ctx context.Context, d *schema.ResourceDiff, meta any) error { + var diags diag.Diagnostics + + configRaw := d.GetRawConfig() + if !configRaw.IsKnown() || configRaw.IsNull() { + return nil + } + + ttlPath := cty.GetAttrPath("ttl") + ttl := configRaw.GetAttr("ttl") + if ttl.IsKnown() && !ttl.IsNull() { + if ttl.LengthInt() == 1 { + idx := cty.NumberIntVal(0) + ttl := ttl.Index(idx) + ttlPath := ttlPath.Index(idx) + ttlPlantimeValidate(ttlPath, ttl, &diags) + } + } + + return sdkdiag.DiagnosticsError(diags) +} + +func ttlPlantimeValidate(ttlPath cty.Path, ttl cty.Value, diags *diag.Diagnostics) { + attribute := ttl.GetAttr("attribute_name") + if !attribute.IsKnown() { + return + } + + enabled := ttl.GetAttr(names.AttrEnabled) + if !enabled.IsKnown() { + return + } + if enabled.IsNull() { + return + } + + if enabled.True() { + if attribute.IsNull() { + *diags = append(*diags, errs.NewAttributeRequiredWhenError( + ttlPath.GetAttr("attribute_name"), + ttlPath.GetAttr(names.AttrEnabled), + "true", + )) + } else if attribute.AsString() == "" { + *diags = append(*diags, errs.NewInvalidValueAttributeErrorf( + ttlPath.GetAttr("attribute_name"), + "Attribute %q cannot have an empty value", + errs.PathString(ttlPath.GetAttr("attribute_name")), + )) + } + } else { + if !(attribute.IsNull() || attribute.AsString() == "") { + *diags = append(*diags, errs.NewAttributeConflictsWhenError( + ttlPath.GetAttr("attribute_name"), + ttlPath.GetAttr(names.AttrEnabled), + "false", + )) + } + } +} diff --git a/internal/service/dynamodb/table_test.go b/internal/service/dynamodb/table_test.go index ffa84f497c9..9a73db6451a 100644 --- a/internal/service/dynamodb/table_test.go +++ b/internal/service/dynamodb/table_test.go @@ -19,7 +19,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" @@ -372,10 +375,16 @@ func TestAccDynamoDBTable_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "table_class", "STANDARD"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsAllPercent, acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "ttl.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "ttl.0.enabled", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "write_capacity", acctest.Ct1), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("ttl"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "attribute_name": knownvalue.StringExact(""), + names.AttrEnabled: knownvalue.Bool(false), + }), + })), + }, }, { ResourceName: resourceName, @@ -1362,18 +1371,29 @@ func TestAccDynamoDBTable_TTL_enabled(t *testing.T) { CheckDestroy: testAccCheckTableDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTableConfig_timeToLive(rName, true), + Config: testAccTableConfig_timeToLive(rName, rName, true), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckInitialTableExists(ctx, resourceName, &table), - resource.TestCheckResourceAttr(resourceName, "ttl.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "ttl.0.enabled", acctest.CtTrue), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("ttl"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "attribute_name": knownvalue.StringExact(rName), + names.AttrEnabled: knownvalue.Bool(true), + }), + })), + }, }, { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, }, + { + Config: testAccTableConfig_timeToLive_unset(rName), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, }, }) } @@ -1393,12 +1413,60 @@ func TestAccDynamoDBTable_TTL_disabled(t *testing.T) { CheckDestroy: testAccCheckTableDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTableConfig_timeToLive(rName, false), + Config: testAccTableConfig_timeToLive(rName, "", false), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckInitialTableExists(ctx, resourceName, &table), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("ttl"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "attribute_name": knownvalue.StringExact(""), + names.AttrEnabled: knownvalue.Bool(false), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTableConfig_timeToLive_unset(rName), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + }, + }) +} + +// TTL tests must be split since it can only be updated once per hour +// ValidationException: Time to live has been modified multiple times within a fixed interval +func TestAccDynamoDBTable_TTL_update(t *testing.T) { + ctx := acctest.Context(t) + var table awstypes.TableDescription + resourceName := "aws_dynamodb_table.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_timeToLive(rName, "", false), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckInitialTableExists(ctx, resourceName, &table), - resource.TestCheckResourceAttr(resourceName, "ttl.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "ttl.0.enabled", acctest.CtFalse), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("ttl"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "attribute_name": knownvalue.StringExact(""), + names.AttrEnabled: knownvalue.Bool(false), + }), + })), + }, }, { ResourceName: resourceName, @@ -1406,12 +1474,44 @@ func TestAccDynamoDBTable_TTL_disabled(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccTableConfig_timeToLive(rName, true), + Config: testAccTableConfig_timeToLive(rName, rName, true), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckInitialTableExists(ctx, resourceName, &table), - resource.TestCheckResourceAttr(resourceName, "ttl.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "ttl.0.enabled", acctest.CtTrue), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("ttl"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "attribute_name": knownvalue.StringExact(rName), + names.AttrEnabled: knownvalue.Bool(true), + }), + })), + }, + }, + }, + }) +} + +func TestAccDynamoDBTable_TTL_validate(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.DynamoDBServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTableDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTableConfig_timeToLive(rName, "TestTTL", false), + ExpectError: regexache.MustCompile(regexp.QuoteMeta(`Attribute "ttl[0].attribute_name" cannot be specified when "ttl[0].enabled" is "false"`)), + }, + { + Config: testAccTableConfig_timeToLive(rName, "", true), + ExpectError: regexache.MustCompile(regexp.QuoteMeta(`Attribute "ttl[0].attribute_name" cannot have an empty value`)), + }, + { + Config: testAccTableConfig_TTL_missingAttributeName(rName, true), + ExpectError: regexache.MustCompile(regexp.QuoteMeta(`Attribute "ttl[0].attribute_name" cannot have an empty value`)), }, }, }) @@ -3496,7 +3596,44 @@ resource "aws_dynamodb_table" "test" { `, rName) } -func testAccTableConfig_timeToLive(rName string, ttlEnabled bool) string { +func testAccTableConfig_timeToLive(rName, ttlAttribute string, ttlEnabled bool) string { + return fmt.Sprintf(` +resource "aws_dynamodb_table" "test" { + hash_key = "TestTableHashKey" + name = %[1]q + read_capacity = 1 + write_capacity = 1 + + attribute { + name = "TestTableHashKey" + type = "S" + } + + ttl { + attribute_name = %[2]q + enabled = %[3]t + } +} +`, rName, ttlAttribute, ttlEnabled) +} + +func testAccTableConfig_timeToLive_unset(rName string) string { + return fmt.Sprintf(` +resource "aws_dynamodb_table" "test" { + hash_key = "TestTableHashKey" + name = %[1]q + read_capacity = 1 + write_capacity = 1 + + attribute { + name = "TestTableHashKey" + type = "S" + } +} +`, rName) +} + +func testAccTableConfig_TTL_missingAttributeName(rName string, ttlEnabled bool) string { return fmt.Sprintf(` resource "aws_dynamodb_table" "test" { hash_key = "TestTableHashKey" @@ -3510,7 +3647,7 @@ resource "aws_dynamodb_table" "test" { } ttl { - attribute_name = %[2]t ? "TestTTL" : "" + attribute_name = "" enabled = %[2]t } } diff --git a/internal/service/ec2/ec2_capacity_block_reservation.go b/internal/service/ec2/ec2_capacity_block_reservation.go index eecf8cf1a99..d7955409e28 100644 --- a/internal/service/ec2/ec2_capacity_block_reservation.go +++ b/internal/service/ec2/ec2_capacity_block_reservation.go @@ -194,7 +194,7 @@ func (r *resourceCapacityBlockReservation) Create(ctx context.Context, request r output, err := conn.PurchaseCapacityBlock(ctx, input) if err != nil { response.Diagnostics.AddError( - create.ProblemStandardMessage(names.EC2, create.ErrActionCreating, ResNameCapacityBlockReservation, plan.CapacityBlockOfferingID.String(), err), + create.ProblemStandardMessage(names.EC2, create.ErrActionCreating, ResNameCapacityBlockReservation, plan.CapacityBlockOfferingID.ValueString(), err), err.Error(), ) return @@ -202,7 +202,7 @@ func (r *resourceCapacityBlockReservation) Create(ctx context.Context, request r if output == nil || output.CapacityReservation == nil { response.Diagnostics.AddError( - create.ProblemStandardMessage(names.EC2, create.ErrActionCreating, ResNameCapacityBlockReservation, plan.CapacityBlockOfferingID.String(), nil), + create.ProblemStandardMessage(names.EC2, create.ErrActionCreating, ResNameCapacityBlockReservation, plan.CapacityBlockOfferingID.ValueString(), nil), errors.New("empty output").Error(), ) return @@ -217,7 +217,7 @@ func (r *resourceCapacityBlockReservation) Create(ctx context.Context, request r if err != nil { response.Diagnostics.AddError( - create.ProblemStandardMessage(names.EC2, create.ErrActionWaitingForCreation, ResNameCapacityBlockReservation, state.ID.String(), err), + create.ProblemStandardMessage(names.EC2, create.ErrActionWaitingForCreation, ResNameCapacityBlockReservation, state.ID.ValueString(), err), err.Error(), ) return @@ -249,6 +249,13 @@ func (r *resourceCapacityBlockReservation) Read(ctx context.Context, request res response.State.RemoveResource(ctx) return } + if err != nil { + response.Diagnostics.AddError( + create.ProblemStandardMessage(names.EC2, create.ErrActionReading, ResNameCapacityBlockReservation, data.ID.ValueString(), err), + err.Error(), + ) + return + } response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) diff --git a/internal/service/ec2/ec2_capacity_reservation.go b/internal/service/ec2/ec2_capacity_reservation.go index ccf820e9cb0..b053796cf74 100644 --- a/internal/service/ec2/ec2_capacity_reservation.go +++ b/internal/service/ec2/ec2_capacity_reservation.go @@ -41,6 +41,12 @@ func resourceCapacityReservation() *schema.Resource { CustomizeDiff: verify.SetTagsDiff, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -175,7 +181,7 @@ func resourceCapacityReservationCreate(ctx context.Context, d *schema.ResourceDa d.SetId(aws.ToString(output.CapacityReservation.CapacityReservationId)) - if err := waitCapacityReservationActive(ctx, conn, d.Id()); err != nil { + if _, err := waitCapacityReservationActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EC2 Capacity Reservation (%s) create: %s", d.Id(), err) } @@ -245,7 +251,7 @@ func resourceCapacityReservationUpdate(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendErrorf(diags, "updating EC2 Capacity Reservation (%s): %s", d.Id(), err) } - if err := waitCapacityReservationActive(ctx, conn, d.Id()); err != nil { + if _, err := waitCapacityReservationActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EC2 Capacity Reservation (%s) update: %s", d.Id(), err) } } @@ -270,7 +276,7 @@ func resourceCapacityReservationDelete(ctx context.Context, d *schema.ResourceDa return sdkdiag.AppendErrorf(diags, "deleting EC2 Capacity Reservation (%s): %s", d.Id(), err) } - if _, err := waitCapacityReservationDeleted(ctx, conn, d.Id()); err != nil { + if _, err := waitCapacityReservationDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EC2 Capacity Reservation (%s) delete: %s", d.Id(), err) } diff --git a/internal/service/ec2/ec2_fleet.go b/internal/service/ec2/ec2_fleet.go index bd8b98b3db4..ea039bc5ceb 100644 --- a/internal/service/ec2/ec2_fleet.go +++ b/internal/service/ec2/ec2_fleet.go @@ -299,6 +299,11 @@ func resourceFleet() *schema.Resource { ValidateDiagFunc: enum.Validate[awstypes.LocalStorageType](), }, }, + "max_spot_price_as_percentage_of_optimal_on_demand_price": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, "memory_gib_per_vcpu": { Type: schema.TypeList, Optional: true, diff --git a/internal/service/ec2/ec2_fleet_test.go b/internal/service/ec2/ec2_fleet_test.go index 3d0a864f11d..9fb61122a79 100644 --- a/internal/service/ec2/ec2_fleet_test.go +++ b/internal/service/ec2/ec2_fleet_test.go @@ -1561,6 +1561,45 @@ func TestAccEC2Fleet_LaunchTemplateOverride_instanceRequirements_localStorageTyp }) } +func TestAccEC2Fleet_LaunchTemplateOverride_instanceRequirements_maxSpotPriceAsPercentageOfOptimalOnDemandPrice(t *testing.T) { + ctx := acctest.Context(t) + var fleet awstypes.FleetData + resourceName := "aws_ec2_fleet.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckFleet(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFleetDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFleetConfig_launchTemplateOverrideInstanceRequirements(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), + `max_spot_price_as_percentage_of_optimal_on_demand_price = 75 + memory_mib { + min = 500 + } + vcpu_count { + min = 1 + }`), + Check: resource.ComposeTestCheckFunc( + testAccCheckFleetExists(ctx, resourceName, &fleet), + resource.TestCheckResourceAttr(resourceName, "launch_template_config.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "launch_template_config.0.override.#", acctest.Ct1), + + resource.TestCheckResourceAttr(resourceName, "launch_template_config.0.override.0.instance_requirements.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "launch_template_config.0.override.0.instance_requirements.0.max_spot_price_as_percentage_of_optimal_on_demand_price", "75"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"terminate_instances"}, + }, + }, + }) +} + func TestAccEC2Fleet_LaunchTemplateOverride_instanceRequirements_memoryGiBPerVCPU(t *testing.T) { ctx := acctest.Context(t) var fleet awstypes.FleetData diff --git a/internal/service/ec2/ec2_launch_template.go b/internal/service/ec2/ec2_launch_template.go index 469ef9c2554..ca71c429f6a 100644 --- a/internal/service/ec2/ec2_launch_template.go +++ b/internal/service/ec2/ec2_launch_template.go @@ -7,7 +7,6 @@ import ( "context" "fmt" "log" - "strconv" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -486,6 +485,12 @@ func resourceLaunchTemplate() *schema.Resource { ValidateDiagFunc: enum.Validate[awstypes.LocalStorageType](), }, }, + "max_spot_price_as_percentage_of_optimal_on_demand_price": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + ConflictsWith: []string{"instance_requirements.0.spot_max_price_percentage_over_lowest_price"}, + }, "memory_gib_per_vcpu": { Type: schema.TypeList, Optional: true, @@ -572,9 +577,10 @@ func resourceLaunchTemplate() *schema.Resource { Optional: true, }, "spot_max_price_percentage_over_lowest_price": { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(1), + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + ConflictsWith: []string{"instance_requirements.0.max_spot_price_as_percentage_of_optimal_on_demand_price"}, }, "total_local_storage_gb": { Type: schema.TypeList, @@ -823,6 +829,12 @@ func resourceLaunchTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "primary_ipv6": { + Type: nullable.TypeNullableBool, + Optional: true, + DiffSuppressFunc: nullable.DiffSuppressNullableBool, + ValidateFunc: nullable.ValidateTypeStringNullableBool, + }, "private_ip_address": { Type: schema.TypeString, Optional: true, @@ -1027,7 +1039,7 @@ func resourceLaunchTemplateRead(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "reading EC2 Launch Template (%s): %s", d.Id(), err) } - version := strconv.FormatInt(aws.ToInt64(lt.LatestVersionNumber), 10) + version := flex.Int64ToStringValue(lt.LatestVersionNumber) ltv, err := findLaunchTemplateVersionByTwoPartKey(ctx, conn, d.Id(), version) if err != nil { @@ -1127,9 +1139,9 @@ func resourceLaunchTemplateUpdate(ctx context.Context, d *schema.ResourceData, m } if d.Get("update_default_version").(bool) { - input.DefaultVersion = aws.String(strconv.FormatInt(latestVersion, 10)) + input.DefaultVersion = flex.Int64ValueToString(latestVersion) } else if d.HasChange("default_version") { - input.DefaultVersion = aws.String(strconv.Itoa(d.Get("default_version").(int))) + input.DefaultVersion = flex.IntValueToString(d.Get("default_version").(int)) } _, err := conn.ModifyLaunchTemplate(ctx, input) @@ -1600,6 +1612,10 @@ func expandInstanceRequirementsRequest(tfMap map[string]interface{}) *awstypes.I apiObject.LocalStorageTypes = flex.ExpandStringyValueSet[awstypes.LocalStorageType](v) } + if v, ok := tfMap["max_spot_price_as_percentage_of_optimal_on_demand_price"].(int); ok && v != 0 { + apiObject.MaxSpotPriceAsPercentageOfOptimalOnDemandPrice = aws.Int32(int32(v)) + } + if v, ok := tfMap["memory_gib_per_vcpu"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.MemoryGiBPerVCpu = expandMemoryGiBPerVCPURequest(v[0].(map[string]interface{})) } @@ -2008,6 +2024,10 @@ func expandLaunchTemplateInstanceNetworkInterfaceSpecificationRequest(tfMap map[ apiObject.NetworkInterfaceId = aws.String(v) } + if v, null, _ := nullable.Bool(tfMap["primary_ipv6"].(string)).ValueBool(); !null { + apiObject.PrimaryIpv6 = aws.Bool(v) + } + if v, ok := tfMap[names.AttrSecurityGroups].(*schema.Set); ok && v.Len() > 0 { for _, v := range v.List() { apiObject.Groups = append(apiObject.Groups, v.(string)) @@ -2172,7 +2192,7 @@ func flattenResponseLaunchTemplateData(ctx context.Context, conn *ec2.Client, d d.Set("disable_api_stop", apiObject.DisableApiStop) d.Set("disable_api_termination", apiObject.DisableApiTermination) if apiObject.EbsOptimized != nil { - d.Set("ebs_optimized", strconv.FormatBool(aws.ToBool(apiObject.EbsOptimized))) + d.Set("ebs_optimized", flex.BoolToStringValue(apiObject.EbsOptimized)) } else { d.Set("ebs_optimized", "") } @@ -2330,11 +2350,11 @@ func flattenLaunchTemplateEBSBlockDevice(apiObject *awstypes.LaunchTemplateEbsBl tfMap := map[string]interface{}{} if v := apiObject.DeleteOnTermination; v != nil { - tfMap[names.AttrDeleteOnTermination] = strconv.FormatBool(aws.ToBool(v)) + tfMap[names.AttrDeleteOnTermination] = flex.BoolToStringValue(v) } if v := apiObject.Encrypted; v != nil { - tfMap[names.AttrEncrypted] = strconv.FormatBool(aws.ToBool(v)) + tfMap[names.AttrEncrypted] = flex.BoolToStringValue(v) } if v := apiObject.Iops; v != nil { @@ -2565,6 +2585,10 @@ func flattenInstanceRequirements(apiObject *awstypes.InstanceRequirements) map[s tfMap["local_storage_types"] = flex.FlattenStringyValueSet[awstypes.LocalStorageType](v) } + if v := apiObject.MaxSpotPriceAsPercentageOfOptimalOnDemandPrice; v != nil { + tfMap["max_spot_price_as_percentage_of_optimal_on_demand_price"] = aws.ToInt32(v) + } + if v := apiObject.MemoryGiBPerVCpu; v != nil { tfMap["memory_gib_per_vcpu"] = []interface{}{flattenMemoryGiBPerVCPU(v)} } @@ -2864,15 +2888,15 @@ func flattenLaunchTemplateInstanceNetworkInterfaceSpecification(apiObject awstyp tfMap := map[string]interface{}{} if v := apiObject.AssociateCarrierIpAddress; v != nil { - tfMap["associate_carrier_ip_address"] = strconv.FormatBool(aws.ToBool(v)) + tfMap["associate_carrier_ip_address"] = flex.BoolToStringValue(v) } if v := apiObject.AssociatePublicIpAddress; v != nil { - tfMap["associate_public_ip_address"] = strconv.FormatBool(aws.ToBool(v)) + tfMap["associate_public_ip_address"] = flex.BoolToStringValue(v) } if v := apiObject.DeleteOnTermination; v != nil { - tfMap[names.AttrDeleteOnTermination] = strconv.FormatBool(aws.ToBool(v)) + tfMap[names.AttrDeleteOnTermination] = flex.BoolToStringValue(v) } if v := apiObject.Description; v != nil { @@ -2951,6 +2975,10 @@ func flattenLaunchTemplateInstanceNetworkInterfaceSpecification(apiObject awstyp tfMap[names.AttrNetworkInterfaceID] = aws.ToString(v) } + if v := apiObject.PrimaryIpv6; v != nil { + tfMap["primary_ipv6"] = flex.BoolToStringValue(v) + } + if v := apiObject.PrivateIpAddress; v != nil { tfMap["private_ip_address"] = aws.ToString(v) } diff --git a/internal/service/ec2/ec2_launch_template_data_source.go b/internal/service/ec2/ec2_launch_template_data_source.go index 18bae7aaf08..ab12b488072 100644 --- a/internal/service/ec2/ec2_launch_template_data_source.go +++ b/internal/service/ec2/ec2_launch_template_data_source.go @@ -6,7 +6,6 @@ package ec2 import ( "context" "fmt" - "strconv" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -16,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -400,6 +400,10 @@ func dataSourceLaunchTemplate() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "max_spot_price_as_percentage_of_optimal_on_demand_price": { + Type: schema.TypeInt, + Computed: true, + }, "memory_gib_per_vcpu": { Type: schema.TypeList, Computed: true, @@ -669,6 +673,10 @@ func dataSourceLaunchTemplate() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "primary_ipv6": { + Type: schema.TypeString, + Computed: true, + }, "private_ip_address": { Type: schema.TypeString, Computed: true, @@ -815,7 +823,7 @@ func dataSourceLaunchTemplateRead(ctx context.Context, d *schema.ResourceData, m d.SetId(aws.ToString(lt.LaunchTemplateId)) - version := strconv.FormatInt(aws.ToInt64(lt.LatestVersionNumber), 10) + version := flex.Int64ToStringValue(lt.LatestVersionNumber) ltv, err := findLaunchTemplateVersionByTwoPartKey(ctx, conn, d.Id(), version) if err != nil { diff --git a/internal/service/ec2/ec2_launch_template_test.go b/internal/service/ec2/ec2_launch_template_test.go index a7082f3dbf7..d503e0f403c 100644 --- a/internal/service/ec2/ec2_launch_template_test.go +++ b/internal/service/ec2/ec2_launch_template_test.go @@ -875,6 +875,7 @@ func TestAccEC2LaunchTemplate_networkInterface(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.ipv6_prefixes.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.network_card_index", acctest.Ct0), resource.TestCheckResourceAttrSet(resourceName, "network_interfaces.0.network_interface_id"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.primary_ipv6", ""), resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.private_ip_address", ""), resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.security_groups.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.subnet_id", ""), @@ -1391,6 +1392,54 @@ func TestAccEC2LaunchTemplate_instanceMarketOptions(t *testing.T) { }) } +func TestAccEC2LaunchTemplate_primaryIPv6(t *testing.T) { + ctx := acctest.Context(t) + var template awstypes.LaunchTemplate + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_launch_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLaunchTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLaunchTemplateConfig_primaryIPv6(rName, acctest.CtTrue), + Check: resource.ComposeTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", acctest.Ct1), + resource.TestCheckResourceAttrSet(resourceName, "network_interfaces.0.network_interface_id"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.primary_ipv6", acctest.CtTrue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccLaunchTemplateConfig_primaryIPv6(rName, acctest.CtFalse), + Check: resource.ComposeTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", acctest.Ct1), + resource.TestCheckResourceAttrSet(resourceName, "network_interfaces.0.network_interface_id"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.primary_ipv6", acctest.CtFalse), + ), + }, + { + Config: testAccLaunchTemplateConfig_primaryIPv6(rName, "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.#", acctest.Ct1), + resource.TestCheckResourceAttrSet(resourceName, "network_interfaces.0.network_interface_id"), + resource.TestCheckResourceAttr(resourceName, "network_interfaces.0.primary_ipv6", ""), + ), + }, + }, + }) +} + func TestAccEC2LaunchTemplate_instanceRequirements_memoryMiBAndVCPUCount(t *testing.T) { ctx := acctest.Context(t) var template awstypes.LaunchTemplate @@ -2412,6 +2461,41 @@ func TestAccEC2LaunchTemplate_instanceRequirements_localStorageTypes(t *testing. }) } +func TestAccEC2LaunchTemplate_instanceRequirements_maxSpotPriceAsPercentageOfOptimalOnDemandPrice(t *testing.T) { + ctx := acctest.Context(t) + var template awstypes.LaunchTemplate + resourceName := "aws_launch_template.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckLaunchTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLaunchTemplateConfig_instanceRequirements(sdkacctest.RandomWithPrefix(acctest.ResourcePrefix), + `max_spot_price_as_percentage_of_optimal_on_demand_price = 75 + memory_mib { + min = 500 + } + vcpu_count { + min = 1 + }`), + Check: resource.ComposeTestCheckFunc( + testAccCheckLaunchTemplateExists(ctx, resourceName, &template), + resource.TestCheckResourceAttr(resourceName, "instance_requirements.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "instance_requirements.0.max_spot_price_as_percentage_of_optimal_on_demand_price", "75"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccEC2LaunchTemplate_instanceRequirements_memoryGiBPerVCPU(t *testing.T) { ctx := acctest.Context(t) var template awstypes.LaunchTemplate @@ -3799,6 +3883,44 @@ resource "aws_launch_template" "test" { `, rName, associatePublicIPAddress) } +func testAccLaunchTemplateConfig_primaryIPv6(rName, primaryIPv6 string) string { + return fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "10.1.0.0/24" + + tags = { + Name = %[1]q + } +} + +resource "aws_network_interface" "test" { + subnet_id = aws_subnet.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_launch_template" "test" { + name = %[1]q + + network_interfaces { + network_interface_id = aws_network_interface.test.id + primary_ipv6 = %[2]s + } +} +`, rName, primaryIPv6) +} + func testAccLaunchTemplateConfig_associateCarrierIPAddress(rName, associateCarrierIPAddress string) string { return fmt.Sprintf(` resource "aws_vpc" "test" { diff --git a/internal/service/ec2/errors.go b/internal/service/ec2/errors.go index 1e980cf21be..dd813f60c1d 100644 --- a/internal/service/ec2/errors.go +++ b/internal/service/ec2/errors.go @@ -66,6 +66,8 @@ const ( errCodeInvalidLaunchTemplateIdNotFound = "InvalidLaunchTemplateId.NotFound" errCodeInvalidLaunchTemplateIdVersionNotFound = "InvalidLaunchTemplateId.VersionNotFound" errCodeInvalidLaunchTemplateNameNotFoundException = "InvalidLaunchTemplateName.NotFoundException" + errCodeInvalidLocalGatewayRouteTableIDNotFound = "InvalidLocalGatewayRouteTableID.NotFound" + errCodeInvalidLocalGatewayRouteTableVPCAssociationIDNotFound = "InvalidLocalGatewayRouteTableVpcAssociationID.NotFound" errCodeInvalidNetworkACLEntryNotFound = "InvalidNetworkAclEntry.NotFound" errCodeInvalidNetworkACLIDNotFound = "InvalidNetworkAclID.NotFound" errCodeInvalidNetworkInsightsAnalysisIdNotFound = "InvalidNetworkInsightsAnalysisId.NotFound" diff --git a/internal/service/ec2/exports.go b/internal/service/ec2/exports.go index 78067a8dabe..681dc9a9918 100644 --- a/internal/service/ec2/exports.go +++ b/internal/service/ec2/exports.go @@ -12,13 +12,16 @@ var ( FindInstanceByID = findInstanceByID FindNetworkInterfacesByAttachmentInstanceOwnerIDAndDescription = findNetworkInterfacesByAttachmentInstanceOwnerIDAndDescription FindNetworkInterfacesV2 = findNetworkInterfaces + FindSecurityGroupByDescriptionAndVPCID = findSecurityGroupByDescriptionAndVPCID FindSecurityGroupByNameAndVPCID = findSecurityGroupByNameAndVPCID + FindSecurityGroupByNameAndVPCIDAndOwnerID = findSecurityGroupByNameAndVPCIDAndOwnerID FindVPCByIDV2 = findVPCByID FindVPCEndpointByID = findVPCEndpointByID NewCustomFilterListFrameworkV2 = newCustomFilterListFrameworkV2 NewFilter = newFilter NewFilterV2 = newFilterV2 ResourceAMI = resourceAMI + ResourceSecurityGroup = resourceSecurityGroup ResourceTransitGateway = resourceTransitGateway ResourceTransitGatewayConnectPeer = resourceTransitGatewayConnectPeer VPCEndpointCreationTimeout = vpcEndpointCreationTimeout diff --git a/internal/service/ec2/exports_test.go b/internal/service/ec2/exports_test.go index 2139b2e7579..7ddb9f53a2f 100644 --- a/internal/service/ec2/exports_test.go +++ b/internal/service/ec2/exports_test.go @@ -46,9 +46,13 @@ var ( ResourceInstanceState = resourceInstanceState ResourceKeyPair = resourceKeyPair ResourceLaunchTemplate = resourceLaunchTemplate + ResourceLocalGatewayRoute = resourceLocalGatewayRoute + ResourceLocalGatewayRouteTableVPCAssociation = resourceLocalGatewayRouteTableVPCAssociation ResourceMainRouteTableAssociation = resourceMainRouteTableAssociation ResourceNetworkACL = resourceNetworkACL ResourceNetworkACLRule = resourceNetworkACLRule + ResourceNetworkInsightsAnalysis = resourceNetworkInsightsAnalysis + ResourceNetworkInsightsPath = resourceNetworkInsightsPath ResourceNetworkInterface = resourceNetworkInterface ResourcePlacementGroup = resourcePlacementGroup ResourceRoute = resourceRoute @@ -121,8 +125,12 @@ var ( FindInstanceStateByID = findInstanceStateByID FindKeyPairByName = findKeyPairByName FindLaunchTemplateByID = findLaunchTemplateByID + FindLocalGatewayRouteByTwoPartKey = findLocalGatewayRouteByTwoPartKey + FindLocalGatewayRouteTableVPCAssociationByID = findLocalGatewayRouteTableVPCAssociationByID FindMainRouteTableAssociationByID = findMainRouteTableAssociationByID FindNetworkACLByIDV2 = findNetworkACLByID + FindNetworkInsightsAnalysisByID = findNetworkInsightsAnalysisByID + FindNetworkInsightsPathByID = findNetworkInsightsPathByID FindNetworkInterfaceByIDV2 = findNetworkInterfaceByID FindNetworkPerformanceMetricSubscriptionByFourPartKey = findNetworkPerformanceMetricSubscriptionByFourPartKey FindPlacementGroupByName = findPlacementGroupByName diff --git a/internal/service/ec2/find.go b/internal/service/ec2/find.go index 9216faaa95e..af8ed4a134d 100644 --- a/internal/service/ec2/find.go +++ b/internal/service/ec2/find.go @@ -19,149 +19,6 @@ import ( // Move functions to findv2.go as they are migrated to AWS SDK for Go v2. // -func FindCOIPPools(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeCoipPoolsInput) ([]*ec2.CoipPool, error) { - var output []*ec2.CoipPool - - err := conn.DescribeCoipPoolsPagesWithContext(ctx, input, func(page *ec2.DescribeCoipPoolsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.CoipPools { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, errCodeInvalidPoolIDNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - return output, nil -} - -func FindCOIPPool(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeCoipPoolsInput) (*ec2.CoipPool, error) { - output, err := FindCOIPPools(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func FindLocalGatewayRouteTables(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeLocalGatewayRouteTablesInput) ([]*ec2.LocalGatewayRouteTable, error) { - var output []*ec2.LocalGatewayRouteTable - - err := conn.DescribeLocalGatewayRouteTablesPagesWithContext(ctx, input, func(page *ec2.DescribeLocalGatewayRouteTablesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.LocalGatewayRouteTables { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if err != nil { - return nil, err - } - - return output, nil -} - -func FindLocalGatewayRouteTable(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeLocalGatewayRouteTablesInput) (*ec2.LocalGatewayRouteTable, error) { - output, err := FindLocalGatewayRouteTables(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func FindLocalGatewayVirtualInterfaceGroups(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput) ([]*ec2.LocalGatewayVirtualInterfaceGroup, error) { - var output []*ec2.LocalGatewayVirtualInterfaceGroup - - err := conn.DescribeLocalGatewayVirtualInterfaceGroupsPagesWithContext(ctx, input, func(page *ec2.DescribeLocalGatewayVirtualInterfaceGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.LocalGatewayVirtualInterfaceGroups { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if err != nil { - return nil, err - } - - return output, nil -} - -func FindLocalGatewayVirtualInterfaceGroup(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput) (*ec2.LocalGatewayVirtualInterfaceGroup, error) { - output, err := FindLocalGatewayVirtualInterfaceGroups(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func FindLocalGateways(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeLocalGatewaysInput) ([]*ec2.LocalGateway, error) { - var output []*ec2.LocalGateway - - err := conn.DescribeLocalGatewaysPagesWithContext(ctx, input, func(page *ec2.DescribeLocalGatewaysOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.LocalGateways { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if err != nil { - return nil, err - } - - return output, nil -} - -func FindLocalGateway(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeLocalGatewaysInput) (*ec2.LocalGateway, error) { - output, err := FindLocalGateways(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - func FindNetworkACL(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeNetworkAclsInput) (*ec2.NetworkAcl, error) { output, err := FindNetworkACLs(ctx, conn, input) @@ -414,130 +271,6 @@ func FindNetworkInterfaceSecurityGroup(ctx context.Context, conn *ec2.EC2, netwo } } -func FindNetworkInsightsAnalysis(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeNetworkInsightsAnalysesInput) (*ec2.NetworkInsightsAnalysis, error) { - output, err := FindNetworkInsightsAnalyses(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func FindNetworkInsightsAnalyses(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeNetworkInsightsAnalysesInput) ([]*ec2.NetworkInsightsAnalysis, error) { - var output []*ec2.NetworkInsightsAnalysis - - err := conn.DescribeNetworkInsightsAnalysesPagesWithContext(ctx, input, func(page *ec2.DescribeNetworkInsightsAnalysesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.NetworkInsightsAnalyses { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, errCodeInvalidNetworkInsightsAnalysisIdNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - return output, nil -} - -func FindNetworkInsightsAnalysisByID(ctx context.Context, conn *ec2.EC2, id string) (*ec2.NetworkInsightsAnalysis, error) { - input := &ec2.DescribeNetworkInsightsAnalysesInput{ - NetworkInsightsAnalysisIds: aws.StringSlice([]string{id}), - } - - output, err := FindNetworkInsightsAnalysis(ctx, conn, input) - - if err != nil { - return nil, err - } - - // Eventual consistency check. - if aws.StringValue(output.NetworkInsightsAnalysisId) != id { - return nil, &retry.NotFoundError{ - LastRequest: input, - } - } - - return output, nil -} - -func FindNetworkInsightsPath(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeNetworkInsightsPathsInput) (*ec2.NetworkInsightsPath, error) { - output, err := FindNetworkInsightsPaths(ctx, conn, input) - - if err != nil { - return nil, err - } - - return tfresource.AssertSinglePtrResult(output) -} - -func FindNetworkInsightsPaths(ctx context.Context, conn *ec2.EC2, input *ec2.DescribeNetworkInsightsPathsInput) ([]*ec2.NetworkInsightsPath, error) { - var output []*ec2.NetworkInsightsPath - - err := conn.DescribeNetworkInsightsPathsPagesWithContext(ctx, input, func(page *ec2.DescribeNetworkInsightsPathsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.NetworkInsightsPaths { - if v != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, errCodeInvalidNetworkInsightsPathIdNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - return output, nil -} - -func FindNetworkInsightsPathByID(ctx context.Context, conn *ec2.EC2, id string) (*ec2.NetworkInsightsPath, error) { - input := &ec2.DescribeNetworkInsightsPathsInput{ - NetworkInsightsPathIds: aws.StringSlice([]string{id}), - } - - output, err := FindNetworkInsightsPath(ctx, conn, input) - - if err != nil { - return nil, err - } - - // Eventual consistency check. - if aws.StringValue(output.NetworkInsightsPathId) != id { - return nil, &retry.NotFoundError{ - LastRequest: input, - } - } - - return output, nil -} - func FindSecurityGroupByID(ctx context.Context, conn *ec2.EC2, id string) (*ec2.SecurityGroup, error) { input := &ec2.DescribeSecurityGroupsInput{ GroupIds: aws.StringSlice([]string{id}), @@ -559,8 +292,19 @@ func FindSecurityGroupByID(ctx context.Context, conn *ec2.EC2, id string) (*ec2. return output, nil } -// FindSecurityGroupByNameAndVPCIDAndOwnerID looks up a security group by name, VPC ID and owner ID. Returns a retry.NotFoundError if not found. -func FindSecurityGroupByNameAndVPCIDAndOwnerID(ctx context.Context, conn *ec2.EC2, name, vpcID, ownerID string) (*ec2.SecurityGroup, error) { +func findSecurityGroupByDescriptionAndVPCID(ctx context.Context, conn *ec2.EC2, description, vpcID string) (*ec2.SecurityGroup, error) { + input := &ec2.DescribeSecurityGroupsInput{ + Filters: newAttributeFilterList( + map[string]string{ + "description": description, // nosemgrep:ci.literal-description-string-constant + "vpc-id": vpcID, + }, + ), + } + return FindSecurityGroup(ctx, conn, input) +} + +func findSecurityGroupByNameAndVPCIDAndOwnerID(ctx context.Context, conn *ec2.EC2, name, vpcID, ownerID string) (*ec2.SecurityGroup, error) { input := &ec2.DescribeSecurityGroupsInput{ Filters: newAttributeFilterList( map[string]string{ diff --git a/internal/service/ec2/findv2.go b/internal/service/ec2/findv2.go index 59d893b5c91..7588c8c2186 100644 --- a/internal/service/ec2/findv2.go +++ b/internal/service/ec2/findv2.go @@ -13,6 +13,7 @@ import ( awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/types" @@ -137,6 +138,40 @@ func findCapacityReservationByID(ctx context.Context, conn *ec2.Client, id strin return output, nil } +func findCOIPPool(ctx context.Context, conn *ec2.Client, input *ec2.DescribeCoipPoolsInput) (*awstypes.CoipPool, error) { + output, err := findCOIPPools(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findCOIPPools(ctx context.Context, conn *ec2.Client, input *ec2.DescribeCoipPoolsInput) ([]awstypes.CoipPool, error) { + var output []awstypes.CoipPool + + pages := ec2.NewDescribeCoipPoolsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if tfawserr.ErrCodeEquals(err, errCodeInvalidPoolIDNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.CoipPools...) + } + + return output, nil +} + func findFleet(ctx context.Context, conn *ec2.Client, input *ec2.DescribeFleetsInput) (*awstypes.FleetData, error) { output, err := findFleets(ctx, conn, input) @@ -618,6 +653,230 @@ func findLaunchTemplateVersionByTwoPartKey(ctx context.Context, conn *ec2.Client return output, nil } +func findLocalGatewayRouteTable(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayRouteTablesInput) (*awstypes.LocalGatewayRouteTable, error) { + output, err := findLocalGatewayRouteTables(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findLocalGatewayRouteTables(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayRouteTablesInput) ([]awstypes.LocalGatewayRouteTable, error) { + var output []awstypes.LocalGatewayRouteTable + + pages := ec2.NewDescribeLocalGatewayRouteTablesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.LocalGatewayRouteTables...) + } + + return output, nil +} + +func findLocalGatewayRoutes(ctx context.Context, conn *ec2.Client, input *ec2.SearchLocalGatewayRoutesInput) ([]awstypes.LocalGatewayRoute, error) { + var output []awstypes.LocalGatewayRoute + + pages := ec2.NewSearchLocalGatewayRoutesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if tfawserr.ErrCodeEquals(err, errCodeInvalidLocalGatewayRouteTableIDNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.Routes...) + } + + return output, nil +} + +func findLocalGatewayRouteByTwoPartKey(ctx context.Context, conn *ec2.Client, localGatewayRouteTableID, destinationCIDRBlock string) (*awstypes.LocalGatewayRoute, error) { + input := &ec2.SearchLocalGatewayRoutesInput{ + Filters: []awstypes.Filter{ + { + Name: aws.String(names.AttrType), + Values: enum.Slice(awstypes.LocalGatewayRouteTypeStatic), + }, + }, + LocalGatewayRouteTableId: aws.String(localGatewayRouteTableID), + } + + localGatewayRoutes, err := findLocalGatewayRoutes(ctx, conn, input) + + if err != nil { + return nil, err + } + + localGatewayRoutes = tfslices.Filter(localGatewayRoutes, func(v awstypes.LocalGatewayRoute) bool { + return aws.ToString(v.DestinationCidrBlock) == destinationCIDRBlock + }) + + output, err := tfresource.AssertSingleValueResult(localGatewayRoutes) + + if err != nil { + return nil, err + } + + if state := output.State; state == awstypes.LocalGatewayRouteStateDeleted { + return nil, &retry.NotFoundError{ + Message: string(state), + LastRequest: input, + } + } + + return output, nil +} + +func findLocalGatewayRouteTableVPCAssociation(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput) (*awstypes.LocalGatewayRouteTableVpcAssociation, error) { + output, err := findLocalGatewayRouteTableVPCAssociations(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findLocalGatewayRouteTableVPCAssociations(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput) ([]awstypes.LocalGatewayRouteTableVpcAssociation, error) { + var output []awstypes.LocalGatewayRouteTableVpcAssociation + + pages := ec2.NewDescribeLocalGatewayRouteTableVpcAssociationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.LocalGatewayRouteTableVpcAssociations...) + } + + return output, nil +} + +func findLocalGatewayRouteTableVPCAssociationByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.LocalGatewayRouteTableVpcAssociation, error) { + input := &ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput{ + LocalGatewayRouteTableVpcAssociationIds: []string{id}, + } + + output, err := findLocalGatewayRouteTableVPCAssociation(ctx, conn, input) + + if err != nil { + return nil, err + } + + if state := aws.ToString(output.State); state == string(awstypes.RouteTableAssociationStateCodeDisassociated) { + return nil, &retry.NotFoundError{ + Message: state, + LastRequest: input, + } + } + + // Eventual consistency check. + if aws.ToString(output.LocalGatewayRouteTableVpcAssociationId) != id { + return nil, &retry.NotFoundError{ + LastRequest: input, + } + } + + return output, nil +} + +func findLocalGatewayVirtualInterface(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayVirtualInterfacesInput) (*awstypes.LocalGatewayVirtualInterface, error) { + output, err := findLocalGatewayVirtualInterfaces(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findLocalGatewayVirtualInterfaces(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayVirtualInterfacesInput) ([]awstypes.LocalGatewayVirtualInterface, error) { + var output []awstypes.LocalGatewayVirtualInterface + + pages := ec2.NewDescribeLocalGatewayVirtualInterfacesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.LocalGatewayVirtualInterfaces...) + } + + return output, nil +} + +func findLocalGatewayVirtualInterfaceGroup(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput) (*awstypes.LocalGatewayVirtualInterfaceGroup, error) { + output, err := findLocalGatewayVirtualInterfaceGroups(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findLocalGatewayVirtualInterfaceGroups(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput) ([]awstypes.LocalGatewayVirtualInterfaceGroup, error) { + var output []awstypes.LocalGatewayVirtualInterfaceGroup + + pages := ec2.NewDescribeLocalGatewayVirtualInterfaceGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.LocalGatewayVirtualInterfaceGroups...) + } + + return output, nil +} + +func findLocalGateway(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewaysInput) (*awstypes.LocalGateway, error) { + output, err := findLocalGateways(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findLocalGateways(ctx context.Context, conn *ec2.Client, input *ec2.DescribeLocalGatewaysInput) ([]awstypes.LocalGateway, error) { + var output []awstypes.LocalGateway + + pages := ec2.NewDescribeLocalGatewaysPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + output = append(output, page.LocalGateways...) + } + + return output, nil +} + func findPlacementGroup(ctx context.Context, conn *ec2.Client, input *ec2.DescribePlacementGroupsInput) (*awstypes.PlacementGroup, error) { output, err := findPlacementGroups(ctx, conn, input) @@ -3209,7 +3468,7 @@ func findTransitGateways(ctx context.Context, conn *ec2.Client, input *ec2.Descr var output []awstypes.TransitGateway pages := ec2.NewDescribeTransitGatewaysPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayIDNotFound) { @@ -3271,7 +3530,7 @@ func findTransitGatewayAttachments(ctx context.Context, conn *ec2.Client, input var output []awstypes.TransitGatewayAttachment pages := ec2.NewDescribeTransitGatewayAttachmentsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayAttachmentIDNotFound) { @@ -3326,7 +3585,7 @@ func findTransitGatewayConnects(ctx context.Context, conn *ec2.Client, input *ec var output []awstypes.TransitGatewayConnect pages := ec2.NewDescribeTransitGatewayConnectsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayAttachmentIDNotFound) { @@ -3393,7 +3652,7 @@ func findTransitGatewayConnectPeers(ctx context.Context, conn *ec2.Client, input var output []awstypes.TransitGatewayConnectPeer pages := ec2.NewDescribeTransitGatewayConnectPeersPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayConnectPeerIDNotFound) { @@ -3455,7 +3714,7 @@ func findTransitGatewayMulticastDomains(ctx context.Context, conn *ec2.Client, i var output []awstypes.TransitGatewayMulticastDomain pages := ec2.NewDescribeTransitGatewayMulticastDomainsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayMulticastDomainIdNotFound) { @@ -3517,7 +3776,7 @@ func findTransitGatewayMulticastDomainAssociations(ctx context.Context, conn *ec var output []awstypes.TransitGatewayMulticastDomainAssociation pages := ec2.NewGetTransitGatewayMulticastDomainAssociationsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayMulticastDomainIdNotFound) { @@ -3573,7 +3832,7 @@ func findTransitGatewayMulticastGroups(ctx context.Context, conn *ec2.Client, in var output []awstypes.TransitGatewayMulticastGroup pages := ec2.NewSearchTransitGatewayMulticastGroupsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayMulticastDomainIdNotFound) { @@ -3682,7 +3941,7 @@ func findTransitGatewayPeeringAttachments(ctx context.Context, conn *ec2.Client, var output []awstypes.TransitGatewayPeeringAttachment pages := ec2.NewDescribeTransitGatewayPeeringAttachmentsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayAttachmentIDNotFound) { @@ -3748,7 +4007,7 @@ func findTransitGatewayPrefixListReferences(ctx context.Context, conn *ec2.Clien var output []awstypes.TransitGatewayPrefixListReference pages := ec2.NewGetTransitGatewayPrefixListReferencesPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidRouteTableIDNotFound) { @@ -3870,7 +4129,7 @@ func findTransitGatewayPolicyTables(ctx context.Context, conn *ec2.Client, input var output []awstypes.TransitGatewayPolicyTable pages := ec2.NewDescribeTransitGatewayPolicyTablesPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayPolicyTableIdNotFound) { @@ -3894,7 +4153,7 @@ func findTransitGatewayRouteTables(ctx context.Context, conn *ec2.Client, input var output []awstypes.TransitGatewayRouteTable pages := ec2.NewDescribeTransitGatewayRouteTablesPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidRouteTableIDNotFound) { @@ -4039,7 +4298,7 @@ func findTransitGatewayPolicyTableAssociations(ctx context.Context, conn *ec2.Cl var output []awstypes.TransitGatewayPolicyTableAssociation pages := ec2.NewGetTransitGatewayPolicyTableAssociationsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayPolicyTableIdNotFound) { @@ -4073,7 +4332,7 @@ func findTransitGatewayRouteTableAssociations(ctx context.Context, conn *ec2.Cli var output []awstypes.TransitGatewayRouteTableAssociation pages := ec2.NewGetTransitGatewayRouteTableAssociationsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidRouteTableIDNotFound) { @@ -4138,7 +4397,7 @@ func findTransitGatewayRouteTablePropagations(ctx context.Context, conn *ec2.Cli var output []awstypes.TransitGatewayRouteTablePropagation pages := ec2.NewGetTransitGatewayRouteTablePropagationsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidRouteTableIDNotFound) { @@ -4172,7 +4431,7 @@ func findTransitGatewayVPCAttachments(ctx context.Context, conn *ec2.Client, inp var output []awstypes.TransitGatewayVpcAttachment pages := ec2.NewDescribeTransitGatewayVpcAttachmentsPaginator(conn, input) - if pages.HasMorePages() { + for pages.HasMorePages() { page, err := pages.NextPage(ctx) if tfawserr.ErrCodeEquals(err, errCodeInvalidTransitGatewayAttachmentIDNotFound) { @@ -5265,3 +5524,113 @@ func findTrafficMirrorTargetByID(ctx context.Context, conn *ec2.Client, id strin return output, nil } + +func findNetworkInsightsPath(ctx context.Context, conn *ec2.Client, input *ec2.DescribeNetworkInsightsPathsInput) (*awstypes.NetworkInsightsPath, error) { + output, err := findNetworkInsightsPaths(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findNetworkInsightsAnalysis(ctx context.Context, conn *ec2.Client, input *ec2.DescribeNetworkInsightsAnalysesInput) (*awstypes.NetworkInsightsAnalysis, error) { + output, err := findNetworkInsightsAnalyses(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findNetworkInsightsAnalyses(ctx context.Context, conn *ec2.Client, input *ec2.DescribeNetworkInsightsAnalysesInput) ([]awstypes.NetworkInsightsAnalysis, error) { + var output []awstypes.NetworkInsightsAnalysis + + pages := ec2.NewDescribeNetworkInsightsAnalysesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if tfawserr.ErrCodeEquals(err, errCodeInvalidNetworkInsightsAnalysisIdNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.NetworkInsightsAnalyses...) + } + + return output, nil +} + +func findNetworkInsightsAnalysisByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.NetworkInsightsAnalysis, error) { + input := &ec2.DescribeNetworkInsightsAnalysesInput{ + NetworkInsightsAnalysisIds: []string{id}, + } + + output, err := findNetworkInsightsAnalysis(ctx, conn, input) + + if err != nil { + return nil, err + } + + // Eventual consistency check. + if aws.ToString(output.NetworkInsightsAnalysisId) != id { + return nil, &retry.NotFoundError{ + LastRequest: input, + } + } + + return output, nil +} + +func findNetworkInsightsPaths(ctx context.Context, conn *ec2.Client, input *ec2.DescribeNetworkInsightsPathsInput) ([]awstypes.NetworkInsightsPath, error) { + var output []awstypes.NetworkInsightsPath + + pages := ec2.NewDescribeNetworkInsightsPathsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if tfawserr.ErrCodeEquals(err, errCodeInvalidNetworkInsightsPathIdNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.NetworkInsightsPaths...) + } + + return output, nil +} + +func findNetworkInsightsPathByID(ctx context.Context, conn *ec2.Client, id string) (*awstypes.NetworkInsightsPath, error) { + input := &ec2.DescribeNetworkInsightsPathsInput{ + NetworkInsightsPathIds: []string{id}, + } + + output, err := findNetworkInsightsPath(ctx, conn, input) + + if err != nil { + return nil, err + } + + // Eventual consistency check. + if aws.ToString(output.NetworkInsightsPathId) != id { + return nil, &retry.NotFoundError{ + LastRequest: input, + } + } + + return output, nil +} diff --git a/internal/service/ec2/outposts_coip_pool_data_source.go b/internal/service/ec2/outposts_coip_pool_data_source.go index 851db7b179c..25bbaf16149 100644 --- a/internal/service/ec2/outposts_coip_pool_data_source.go +++ b/internal/service/ec2/outposts_coip_pool_data_source.go @@ -5,21 +5,23 @@ package ec2 import ( "context" - "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_coip_pool") -func DataSourceCoIPPool() *schema.Resource { +// @SDKDataSource("aws_ec2_coip_pool", name="COIP Pool") +// @Tags +// @Testing(tagsTest=false) +func dataSourceCoIPPool() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceCoIPPoolRead, @@ -28,98 +30,75 @@ func DataSourceCoIPPool() *schema.Resource { }, Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrFilter: customFiltersSchema(), "local_gateway_route_table_id": { Type: schema.TypeString, Optional: true, Computed: true, }, - "pool_cidrs": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, Computed: true, - Set: schema.HashString, }, - "pool_id": { Type: schema.TypeString, Optional: true, Computed: true, }, - - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrTags: tftags.TagsSchemaComputed(), - - names.AttrFilter: customFiltersSchema(), }, } } func dataSourceCoIPPoolRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).EC2Client(ctx) - req := &ec2.DescribeCoipPoolsInput{} + input := &ec2.DescribeCoipPoolsInput{} if v, ok := d.GetOk("pool_id"); ok { - req.PoolIds = []*string{aws.String(v.(string))} + input.PoolIds = []string{v.(string)} } - filters := map[string]string{} - if v, ok := d.GetOk("local_gateway_route_table_id"); ok { - filters["coip-pool.local-gateway-route-table-id"] = v.(string) + input.Filters = append(input.Filters, newAttributeFilterListV2(map[string]string{ + "coip-pool.local-gateway-route-table-id": v.(string), + })...) } - req.Filters = newAttributeFilterList(filters) - if tags, tagsOk := d.GetOk(names.AttrTags); tagsOk { - req.Filters = append(req.Filters, newTagFilterList( - Tags(tftags.New(ctx, tags.(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, tags.(map[string]interface{}))), )...) } - req.Filters = append(req.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) - if len(req.Filters) == 0 { + + if len(input.Filters) == 0 { // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil + input.Filters = nil } - log.Printf("[DEBUG] Reading AWS COIP Pool: %s", req) - resp, err := conn.DescribeCoipPoolsWithContext(ctx, req) + coip, err := findCOIPPool(ctx, conn, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "describing EC2 COIP Pools: %s", err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EC2 COIP Pool", err)) } - if resp == nil || len(resp.CoipPools) == 0 { - return sdkdiag.AppendErrorf(diags, "no matching COIP Pool found") - } - if len(resp.CoipPools) > 1 { - return sdkdiag.AppendErrorf(diags, "multiple Coip Pools matched; use additional constraints to reduce matches to a single COIP Pool") - } - - coip := resp.CoipPools[0] - d.SetId(aws.StringValue(coip.PoolId)) - - d.Set("local_gateway_route_table_id", coip.LocalGatewayRouteTableId) + d.SetId(aws.ToString(coip.PoolId)) d.Set(names.AttrARN, coip.PoolArn) - - if err := d.Set("pool_cidrs", aws.StringValueSlice(coip.PoolCidrs)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting pool_cidrs: %s", err) - } - + d.Set("local_gateway_route_table_id", coip.LocalGatewayRouteTableId) + d.Set("pool_cidrs", coip.PoolCidrs) d.Set("pool_id", coip.PoolId) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, coip.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOutV2(ctx, coip.Tags) return diags } diff --git a/internal/service/ec2/outposts_coip_pools_data_source.go b/internal/service/ec2/outposts_coip_pools_data_source.go index 31d61825e35..8d5b871091a 100644 --- a/internal/service/ec2/outposts_coip_pools_data_source.go +++ b/internal/service/ec2/outposts_coip_pools_data_source.go @@ -7,18 +7,20 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_coip_pools") -func DataSourceCoIPPools() *schema.Resource { +// @SDKDataSource("aws_ec2_coip_pools", name="COIP Pools") +func dataSourceCoIPPools() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceCoIPPoolsRead, @@ -33,22 +35,22 @@ func DataSourceCoIPPools() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - names.AttrTags: tftags.TagsSchemaComputed(), + names.AttrTags: tftags.TagsSchema(), }, } } func dataSourceCoIPPoolsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DescribeCoipPoolsInput{} - input.Filters = append(input.Filters, newTagFilterList( - Tags(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), )...) - input.Filters = append(input.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) @@ -56,20 +58,16 @@ func dataSourceCoIPPoolsRead(ctx context.Context, d *schema.ResourceData, meta i input.Filters = nil } - output, err := FindCOIPPools(ctx, conn, input) + output, err := findCOIPPools(ctx, conn, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading EC2 COIP Pools: %s", err) } - var poolIDs []string - - for _, v := range output { - poolIDs = append(poolIDs, aws.StringValue(v.PoolId)) - } - d.SetId(meta.(*conns.AWSClient).Region) - d.Set("pool_ids", poolIDs) + d.Set("pool_ids", tfslices.ApplyToAll(output, func(v awstypes.CoipPool) string { + return aws.ToString(v.PoolId) + })) return diags } diff --git a/internal/service/ec2/outposts_local_gateway_data_source.go b/internal/service/ec2/outposts_local_gateway_data_source.go index ec107565b70..6cb705c2501 100644 --- a/internal/service/ec2/outposts_local_gateway_data_source.go +++ b/internal/service/ec2/outposts_local_gateway_data_source.go @@ -5,21 +5,23 @@ package ec2 import ( "context" - "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_local_gateway") -func DataSourceLocalGateway() *schema.Resource { +// @SDKDataSource("aws_ec2_local_gateway", name="Local Gateway") +// @Tags +// @Testing(tagsTest=false) +func dataSourceLocalGateway() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLocalGatewayRead, @@ -33,83 +35,68 @@ func DataSourceLocalGateway() *schema.Resource { Optional: true, Computed: true, }, - + names.AttrFilter: customFiltersSchema(), "outpost_arn": { Type: schema.TypeString, Computed: true, }, - - names.AttrFilter: customFiltersSchema(), - - names.AttrState: { + names.AttrOwnerID: { Type: schema.TypeString, - Optional: true, Computed: true, }, - - names.AttrTags: tftags.TagsSchemaComputed(), - - names.AttrOwnerID: { + names.AttrState: { Type: schema.TypeString, + Optional: true, Computed: true, }, + names.AttrTags: tftags.TagsSchemaComputed(), }, } } func dataSourceLocalGatewayRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).EC2Client(ctx) - req := &ec2.DescribeLocalGatewaysInput{} + input := &ec2.DescribeLocalGatewaysInput{} if v, ok := d.GetOk(names.AttrID); ok { - req.LocalGatewayIds = []*string{aws.String(v.(string))} + input.LocalGatewayIds = []string{v.(string)} } - req.Filters = newAttributeFilterList( + input.Filters = newAttributeFilterListV2( map[string]string{ names.AttrState: d.Get(names.AttrState).(string), }, ) if tags, tagsOk := d.GetOk(names.AttrTags); tagsOk { - req.Filters = append(req.Filters, newTagFilterList( - Tags(tftags.New(ctx, tags.(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, tags.(map[string]interface{}))), )...) } - req.Filters = append(req.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) - if len(req.Filters) == 0 { + + if len(input.Filters) == 0 { // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil + input.Filters = nil } - log.Printf("[DEBUG] Reading AWS LOCAL GATEWAY: %s", req) - resp, err := conn.DescribeLocalGatewaysWithContext(ctx, req) + localGateway, err := findLocalGateway(ctx, conn, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "describing EC2 Local Gateways: %s", err) - } - if resp == nil || len(resp.LocalGateways) == 0 { - return sdkdiag.AppendErrorf(diags, "no matching Local Gateway found") - } - if len(resp.LocalGateways) > 1 { - return sdkdiag.AppendErrorf(diags, "multiple Local Gateways matched; use additional constraints to reduce matches to a single Local Gateway") + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EC2 Local Gateway", err)) } - localGateway := resp.LocalGateways[0] - - d.SetId(aws.StringValue(localGateway.LocalGatewayId)) + d.SetId(aws.ToString(localGateway.LocalGatewayId)) d.Set("outpost_arn", localGateway.OutpostArn) d.Set(names.AttrOwnerID, localGateway.OwnerId) d.Set(names.AttrState, localGateway.State) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, localGateway.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOutV2(ctx, localGateway.Tags) return diags } diff --git a/internal/service/ec2/outposts_local_gateway_route.go b/internal/service/ec2/outposts_local_gateway_route.go index fd7537c200b..b8804e9afaf 100644 --- a/internal/service/ec2/outposts_local_gateway_route.go +++ b/internal/service/ec2/outposts_local_gateway_route.go @@ -10,29 +10,25 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" - "github.com/hashicorp/terraform-provider-aws/names" -) - -const ( - localGatewayRouteEventualConsistencyTimeout = 1 * time.Minute ) // @SDKResource("aws_ec2_local_gateway_route") -func ResourceLocalGatewayRoute() *schema.Resource { +func resourceLocalGatewayRoute() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLocalGatewayRouteCreate, ReadWithoutTimeout: resourceLocalGatewayRouteRead, DeleteWithoutTimeout: resourceLocalGatewayRouteDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -60,62 +56,43 @@ func ResourceLocalGatewayRoute() *schema.Resource { func resourceLocalGatewayRouteCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) - destination := d.Get("destination_cidr_block").(string) + destinationCIDRBlock := d.Get("destination_cidr_block").(string) localGatewayRouteTableID := d.Get("local_gateway_route_table_id").(string) - + id := localGatewayRouteCreateResourceID(localGatewayRouteTableID, destinationCIDRBlock) input := &ec2.CreateLocalGatewayRouteInput{ - DestinationCidrBlock: aws.String(destination), + DestinationCidrBlock: aws.String(destinationCIDRBlock), LocalGatewayRouteTableId: aws.String(localGatewayRouteTableID), LocalGatewayVirtualInterfaceGroupId: aws.String(d.Get("local_gateway_virtual_interface_group_id").(string)), } - _, err := conn.CreateLocalGatewayRouteWithContext(ctx, input) + _, err := conn.CreateLocalGatewayRoute(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating EC2 Local Gateway Route: %s", err) + return sdkdiag.AppendErrorf(diags, "creating EC2 Local Gateway Route (%s): %s", id, err) } - d.SetId(fmt.Sprintf("%s_%s", localGatewayRouteTableID, destination)) + d.SetId(id) return append(diags, resourceLocalGatewayRouteRead(ctx, d, meta)...) } func resourceLocalGatewayRouteRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) - localGatewayRouteTableID, destination, err := DecodeLocalGatewayRouteID(d.Id()) + localGatewayRouteTableID, destination, err := localGatewayRouteParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Local Gateway Route (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - var localGatewayRoute *ec2.LocalGatewayRoute - err = retry.RetryContext(ctx, localGatewayRouteEventualConsistencyTimeout, func() *retry.RetryError { - var err error - localGatewayRoute, err = GetLocalGatewayRoute(ctx, conn, localGatewayRouteTableID, destination) - - if err != nil { - return retry.NonRetryableError(err) - } - - if d.IsNewResource() && localGatewayRoute == nil { - return retry.RetryableError(&retry.NotFoundError{}) - } - - return nil - }) - - if tfresource.TimedOut(err) { - localGatewayRoute, err = GetLocalGatewayRoute(ctx, conn, localGatewayRouteTableID, destination) - } - - if tfawserr.ErrCodeEquals(err, "InvalidRouteTableID.NotFound") { - log.Printf("[WARN] EC2 Local Gateway Route Table (%s) not found, removing from state", localGatewayRouteTableID) - d.SetId("") - return diags - } + const ( + timeout = 1 * time.Minute + ) + outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, timeout, func() (interface{}, error) { + return findLocalGatewayRouteByTwoPartKey(ctx, conn, localGatewayRouteTableID, destination) + }, d.IsNewResource()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EC2 Local Gateway Route (%s) not found, removing from state", d.Id()) @@ -127,18 +104,7 @@ func resourceLocalGatewayRouteRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading EC2 Local Gateway Route (%s): %s", d.Id(), err) } - if localGatewayRoute == nil { - log.Printf("[WARN] EC2 Local Gateway Route (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } - - state := aws.StringValue(localGatewayRoute.State) - if state == ec2.LocalGatewayRouteStateDeleted || state == ec2.LocalGatewayRouteStateDeleting { - log.Printf("[WARN] EC2 Local Gateway Route (%s) deleted, removing from state", d.Id()) - d.SetId("") - return diags - } + localGatewayRoute := outputRaw.(*awstypes.LocalGatewayRoute) d.Set("destination_cidr_block", localGatewayRoute.DestinationCidrBlock) d.Set("local_gateway_virtual_interface_group_id", localGatewayRoute.LocalGatewayVirtualInterfaceGroupId) @@ -149,22 +115,20 @@ func resourceLocalGatewayRouteRead(ctx context.Context, d *schema.ResourceData, func resourceLocalGatewayRouteDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) - localGatewayRouteTableID, destination, err := DecodeLocalGatewayRouteID(d.Id()) + localGatewayRouteTableID, destination, err := localGatewayRouteParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting EC2 Local Gateway Route (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - input := &ec2.DeleteLocalGatewayRouteInput{ + log.Printf("[DEBUG] Deleting EC2 Local Gateway Route: %s", d.Id()) + _, err = conn.DeleteLocalGatewayRoute(ctx, &ec2.DeleteLocalGatewayRouteInput{ DestinationCidrBlock: aws.String(destination), LocalGatewayRouteTableId: aws.String(localGatewayRouteTableID), - } - - log.Printf("[DEBUG] Deleting EC2 Local Gateway Route (%s): %s", d.Id(), input) - _, err = conn.DeleteLocalGatewayRouteWithContext(ctx, input) + }) - if tfawserr.ErrCodeEquals(err, "InvalidRoute.NotFound") || tfawserr.ErrCodeEquals(err, "InvalidRouteTableID.NotFound") { + if tfawserr.ErrCodeEquals(err, errCodeInvalidRouteNotFound, errCodeInvalidLocalGatewayRouteTableIDNotFound) { return diags } @@ -172,49 +136,28 @@ func resourceLocalGatewayRouteDelete(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "deleting EC2 Local Gateway Route (%s): %s", d.Id(), err) } - return diags -} - -func DecodeLocalGatewayRouteID(id string) (string, string, error) { - parts := strings.Split(id, "_") - - if len(parts) != 2 { - return "", "", fmt.Errorf("Unexpected format of ID (%q), expected tgw-rtb-ID_DESTINATION", id) + if _, err := waitLocalGatewayRouteDeleted(ctx, conn, localGatewayRouteTableID, destination); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EC2 Local Gateway Route (%s) delete: %s", d.Id(), err) } - return parts[0], parts[1], nil + return diags } -func GetLocalGatewayRoute(ctx context.Context, conn *ec2.EC2, localGatewayRouteTableID, destination string) (*ec2.LocalGatewayRoute, error) { - input := &ec2.SearchLocalGatewayRoutesInput{ - Filters: []*ec2.Filter{ - { - Name: aws.String(names.AttrType), - Values: aws.StringSlice([]string{"static"}), - }, - }, - LocalGatewayRouteTableId: aws.String(localGatewayRouteTableID), - } - - output, err := conn.SearchLocalGatewayRoutesWithContext(ctx, input) +const localGatewayRouteResourceIDSeparator = "_" - if err != nil { - return nil, err - } +func localGatewayRouteCreateResourceID(localGatewayRouteTableID, destinationCIDRBlock string) string { + parts := []string{localGatewayRouteTableID, destinationCIDRBlock} + id := strings.Join(parts, localGatewayRouteResourceIDSeparator) - if output == nil || len(output.Routes) == 0 { - return nil, nil - } + return id +} - for _, route := range output.Routes { - if route == nil { - continue - } +func localGatewayRouteParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, localGatewayRouteResourceIDSeparator) - if aws.StringValue(route.DestinationCidrBlock) == destination { - return route, nil - } + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil } - return nil, nil + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected LOCAL-GATEWAY-ROUTE-TABLE-ID%[2]sDESTINATION", id, localGatewayRouteResourceIDSeparator) } diff --git a/internal/service/ec2/outposts_local_gateway_route_table_data_source.go b/internal/service/ec2/outposts_local_gateway_route_table_data_source.go index 95c1e3da3f4..3ce74a8c09c 100644 --- a/internal/service/ec2/outposts_local_gateway_route_table_data_source.go +++ b/internal/service/ec2/outposts_local_gateway_route_table_data_source.go @@ -5,21 +5,23 @@ package ec2 import ( "context" - "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_local_gateway_route_table") -func DataSourceLocalGatewayRouteTable() *schema.Resource { +// @SDKDataSource("aws_ec2_local_gateway_route_table", name="Local Gateway Route Table") +// @Tags +// @Testing(tagsTest=false) +func dataSourceLocalGatewayRouteTable() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLocalGatewayRouteTableRead, @@ -28,49 +30,43 @@ func DataSourceLocalGatewayRouteTable() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "local_gateway_route_table_id": { + names.AttrFilter: customFiltersSchema(), + "local_gateway_id": { Type: schema.TypeString, Optional: true, Computed: true, }, - - "local_gateway_id": { + "local_gateway_route_table_id": { Type: schema.TypeString, Optional: true, Computed: true, }, - "outpost_arn": { Type: schema.TypeString, Optional: true, Computed: true, }, - names.AttrState: { Type: schema.TypeString, Optional: true, Computed: true, }, - names.AttrTags: tftags.TagsSchemaComputed(), - - names.AttrFilter: customFiltersSchema(), }, } } func dataSourceLocalGatewayRouteTableRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).EC2Client(ctx) - req := &ec2.DescribeLocalGatewayRouteTablesInput{} + input := &ec2.DescribeLocalGatewayRouteTablesInput{} if v, ok := d.GetOk("local_gateway_route_table_id"); ok { - req.LocalGatewayRouteTableIds = []*string{aws.String(v.(string))} + input.LocalGatewayRouteTableIds = []string{v.(string)} } - req.Filters = newAttributeFilterList( + input.Filters = newAttributeFilterListV2( map[string]string{ "local-gateway-id": d.Get("local_gateway_id").(string), "outpost-arn": d.Get("outpost_arn").(string), @@ -78,41 +74,32 @@ func dataSourceLocalGatewayRouteTableRead(ctx context.Context, d *schema.Resourc }, ) - req.Filters = append(req.Filters, newTagFilterList( - Tags(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), )...) - req.Filters = append(req.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) - if len(req.Filters) == 0 { + + if len(input.Filters) == 0 { // Don't send an empty filters list; the EC2 API won't accept it. - req.Filters = nil + input.Filters = nil } - log.Printf("[DEBUG] Reading AWS Local Gateway Route Table: %s", req) - resp, err := conn.DescribeLocalGatewayRouteTablesWithContext(ctx, req) + localGatewayRouteTable, err := findLocalGatewayRouteTable(ctx, conn, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "describing EC2 Local Gateway Route Tables: %s", err) - } - if resp == nil || len(resp.LocalGatewayRouteTables) == 0 { - return sdkdiag.AppendErrorf(diags, "no matching Local Gateway Route Table found") - } - if len(resp.LocalGatewayRouteTables) > 1 { - return sdkdiag.AppendErrorf(diags, "multiple Local Gateway Route Tables matched; use additional constraints to reduce matches to a single Local Gateway Route Table") + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EC2 Local Gateway Route Table", err)) } - localgatewayroutetable := resp.LocalGatewayRouteTables[0] + d.SetId(aws.ToString(localGatewayRouteTable.LocalGatewayRouteTableId)) + d.Set("local_gateway_id", localGatewayRouteTable.LocalGatewayId) + d.Set("local_gateway_route_table_id", localGatewayRouteTable.LocalGatewayRouteTableId) + d.Set("outpost_arn", localGatewayRouteTable.OutpostArn) + d.Set(names.AttrState, localGatewayRouteTable.State) - d.SetId(aws.StringValue(localgatewayroutetable.LocalGatewayRouteTableId)) - d.Set("local_gateway_id", localgatewayroutetable.LocalGatewayId) - d.Set("local_gateway_route_table_id", localgatewayroutetable.LocalGatewayRouteTableId) - d.Set("outpost_arn", localgatewayroutetable.OutpostArn) - d.Set(names.AttrState, localgatewayroutetable.State) - - if err := d.Set(names.AttrTags, KeyValueTags(ctx, localgatewayroutetable.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOutV2(ctx, localGatewayRouteTable.Tags) return diags } diff --git a/internal/service/ec2/outposts_local_gateway_route_table_vpc_association.go b/internal/service/ec2/outposts_local_gateway_route_table_vpc_association.go index 7ccc70fdc44..13b59d5e8f8 100644 --- a/internal/service/ec2/outposts_local_gateway_route_table_vpc_association.go +++ b/internal/service/ec2/outposts_local_gateway_route_table_vpc_association.go @@ -5,17 +5,18 @@ package ec2 import ( "context" - "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -23,12 +24,13 @@ import ( // @SDKResource("aws_ec2_local_gateway_route_table_vpc_association", name="Local Gateway Route Table VPC Association") // @Tags(identifierAttribute="id") // @Testing(tagsTest=false) -func ResourceLocalGatewayRouteTableVPCAssociation() *schema.Resource { +func resourceLocalGatewayRouteTableVPCAssociation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLocalGatewayRouteTableVPCAssociationCreate, ReadWithoutTimeout: resourceLocalGatewayRouteTableVPCAssociationRead, UpdateWithoutTimeout: resourceLocalGatewayRouteTableVPCAssociationUpdate, DeleteWithoutTimeout: resourceLocalGatewayRouteTableVPCAssociationDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -58,24 +60,24 @@ func ResourceLocalGatewayRouteTableVPCAssociation() *schema.Resource { func resourceLocalGatewayRouteTableVPCAssociationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) - req := &ec2.CreateLocalGatewayRouteTableVpcAssociationInput{ + input := &ec2.CreateLocalGatewayRouteTableVpcAssociationInput{ LocalGatewayRouteTableId: aws.String(d.Get("local_gateway_route_table_id").(string)), - TagSpecifications: getTagSpecificationsIn(ctx, ec2.ResourceTypeLocalGatewayRouteTableVpcAssociation), + TagSpecifications: getTagSpecificationsInV2(ctx, awstypes.ResourceTypeLocalGatewayRouteTableVpcAssociation), VpcId: aws.String(d.Get(names.AttrVPCID).(string)), } - output, err := conn.CreateLocalGatewayRouteTableVpcAssociationWithContext(ctx, req) + output, err := conn.CreateLocalGatewayRouteTableVpcAssociation(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating EC2 Local Gateway Route Table VPC Association: %s", err) } - d.SetId(aws.StringValue(output.LocalGatewayRouteTableVpcAssociation.LocalGatewayRouteTableVpcAssociationId)) + d.SetId(aws.ToString(output.LocalGatewayRouteTableVpcAssociation.LocalGatewayRouteTableVpcAssociationId)) - if _, err := WaitLocalGatewayRouteTableVPCAssociationAssociated(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EC2 Local Gateway Route Table VPC Association (%s) to associate: %s", d.Id(), err) + if _, err := waitLocalGatewayRouteTableVPCAssociationAssociated(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EC2 Local Gateway Route Table VPC Association (%s) create: %s", d.Id(), err) } return append(diags, resourceLocalGatewayRouteTableVPCAssociationRead(ctx, d, meta)...) @@ -83,33 +85,26 @@ func resourceLocalGatewayRouteTableVPCAssociationCreate(ctx context.Context, d * func resourceLocalGatewayRouteTableVPCAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) - association, err := GetLocalGatewayRouteTableVPCAssociation(ctx, conn, d.Id()) + association, err := findLocalGatewayRouteTableVPCAssociationByID(ctx, conn, d.Id()) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EC2 Local Gateway Route Table VPC Association (%s): %s", d.Id(), err) - } - - if association == nil { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EC2 Local Gateway Route Table VPC Association (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if aws.StringValue(association.State) != ec2.RouteTableAssociationStateCodeAssociated { - log.Printf("[WARN] EC2 Local Gateway Route Table VPC Association (%s) status (%s), removing from state", d.Id(), aws.StringValue(association.State)) - d.SetId("") - return diags + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Local Gateway Route Table VPC Association (%s): %s", d.Id(), err) } d.Set("local_gateway_id", association.LocalGatewayId) d.Set("local_gateway_route_table_id", association.LocalGatewayRouteTableId) - - setTagsOut(ctx, association.Tags) - d.Set(names.AttrVPCID, association.VpcId) + setTagsOutV2(ctx, association.Tags) + return diags } @@ -123,15 +118,15 @@ func resourceLocalGatewayRouteTableVPCAssociationUpdate(ctx context.Context, d * func resourceLocalGatewayRouteTableVPCAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DeleteLocalGatewayRouteTableVpcAssociationInput{ LocalGatewayRouteTableVpcAssociationId: aws.String(d.Id()), } - _, err := conn.DeleteLocalGatewayRouteTableVpcAssociationWithContext(ctx, input) + _, err := conn.DeleteLocalGatewayRouteTableVpcAssociation(ctx, input) - if tfawserr.ErrCodeEquals(err, "InvalidLocalGatewayRouteTableVpcAssociationID.NotFound") { + if tfawserr.ErrCodeEquals(err, errCodeInvalidLocalGatewayRouteTableVPCAssociationIDNotFound) { return diags } @@ -139,40 +134,9 @@ func resourceLocalGatewayRouteTableVPCAssociationDelete(ctx context.Context, d * return sdkdiag.AppendErrorf(diags, "deleting EC2 Local Gateway Route Table VPC Association (%s): %s", d.Id(), err) } - if _, err := WaitLocalGatewayRouteTableVPCAssociationDisassociated(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EC2 Local Gateway Route Table VPC Association (%s) to disassociate: %s", d.Id(), err) + if _, err := waitLocalGatewayRouteTableVPCAssociationDisassociated(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for EC2 Local Gateway Route Table VPC Association (%s) delete: %s", d.Id(), err) } return diags } - -func GetLocalGatewayRouteTableVPCAssociation(ctx context.Context, conn *ec2.EC2, localGatewayRouteTableVpcAssociationID string) (*ec2.LocalGatewayRouteTableVpcAssociation, error) { - input := &ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput{ - LocalGatewayRouteTableVpcAssociationIds: aws.StringSlice([]string{localGatewayRouteTableVpcAssociationID}), - } - - output, err := conn.DescribeLocalGatewayRouteTableVpcAssociationsWithContext(ctx, input) - - if err != nil { - return nil, err - } - - if output == nil { - return nil, fmt.Errorf("empty response") - } - - var association *ec2.LocalGatewayRouteTableVpcAssociation - - for _, outputAssociation := range output.LocalGatewayRouteTableVpcAssociations { - if outputAssociation == nil { - continue - } - - if aws.StringValue(outputAssociation.LocalGatewayRouteTableVpcAssociationId) == localGatewayRouteTableVpcAssociationID { - association = outputAssociation - break - } - } - - return association, nil -} diff --git a/internal/service/ec2/outposts_local_gateway_route_table_vpc_association_test.go b/internal/service/ec2/outposts_local_gateway_route_table_vpc_association_test.go index 93d212dcd65..5c152e08d92 100644 --- a/internal/service/ec2/outposts_local_gateway_route_table_vpc_association_test.go +++ b/internal/service/ec2/outposts_local_gateway_route_table_vpc_association_test.go @@ -8,14 +8,13 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -119,55 +118,41 @@ func TestAccEC2OutpostsLocalGatewayRouteTableVPCAssociation_tags(t *testing.T) { }) } -func testAccCheckLocalGatewayRouteTableVPCAssociationExists(ctx context.Context, resourceName string) resource.TestCheckFunc { +func testAccCheckLocalGatewayRouteTableVPCAssociationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("%s: missing resource ID", resourceName) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) - association, err := tfec2.GetLocalGatewayRouteTableVPCAssociation(ctx, conn, rs.Primary.ID) - - if err != nil { - return err - } + _, err := tfec2.FindLocalGatewayRouteTableVPCAssociationByID(ctx, conn, rs.Primary.ID) - if association == nil { - return fmt.Errorf("EC2 Local Gateway Route Table VPC Association (%s) not found", rs.Primary.ID) - } - - if aws.StringValue(association.State) != ec2.RouteTableAssociationStateCodeAssociated { - return fmt.Errorf("EC2 Local Gateway Route Table VPC Association (%s) not in associated state: %s", rs.Primary.ID, aws.StringValue(association.State)) - } - - return nil + return err } } func testAccCheckLocalGatewayRouteTableVPCAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_ec2_local_gateway_route_table_vpc_association" { continue } - association, err := tfec2.GetLocalGatewayRouteTableVPCAssociation(ctx, conn, rs.Primary.ID) + _, err := tfec2.FindLocalGatewayRouteTableVPCAssociationByID(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } if err != nil { return err } - if association != nil && aws.StringValue(association.State) != ec2.RouteTableAssociationStateCodeDisassociated { - return fmt.Errorf("EC2 Local Gateway Route Table VPC Association (%s) still exists in state: %s", rs.Primary.ID, aws.StringValue(association.State)) - } + return fmt.Errorf("EC2 Local Gateway Route Table VPC Association still exists: %s", rs.Primary.ID) } return nil diff --git a/internal/service/ec2/outposts_local_gateway_route_tables_data_source.go b/internal/service/ec2/outposts_local_gateway_route_tables_data_source.go index e5527a4e4e6..2857fab2869 100644 --- a/internal/service/ec2/outposts_local_gateway_route_tables_data_source.go +++ b/internal/service/ec2/outposts_local_gateway_route_tables_data_source.go @@ -7,18 +7,20 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_local_gateway_route_tables") -func DataSourceLocalGatewayRouteTables() *schema.Resource { +// @SDKDataSource("aws_ec2_local_gateway_route_tables", name="Local Gateway Route Table") +func dataSourceLocalGatewayRouteTables() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLocalGatewayRouteTablesRead, @@ -40,15 +42,15 @@ func DataSourceLocalGatewayRouteTables() *schema.Resource { func dataSourceLocalGatewayRouteTablesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DescribeLocalGatewayRouteTablesInput{} - input.Filters = append(input.Filters, newTagFilterList( - Tags(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), )...) - input.Filters = append(input.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) @@ -56,20 +58,16 @@ func dataSourceLocalGatewayRouteTablesRead(ctx context.Context, d *schema.Resour input.Filters = nil } - output, err := FindLocalGatewayRouteTables(ctx, conn, input) + output, err := findLocalGatewayRouteTables(ctx, conn, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading EC2 Local Gateway Route Tables: %s", err) } - var routeTableIDs []string - - for _, v := range output { - routeTableIDs = append(routeTableIDs, aws.StringValue(v.LocalGatewayRouteTableId)) - } - d.SetId(meta.(*conns.AWSClient).Region) - d.Set(names.AttrIDs, routeTableIDs) + d.Set(names.AttrIDs, tfslices.ApplyToAll(output, func(v awstypes.LocalGatewayRouteTable) string { + return aws.ToString(v.LocalGatewayRouteTableId) + })) return diags } diff --git a/internal/service/ec2/outposts_local_gateway_route_test.go b/internal/service/ec2/outposts_local_gateway_route_test.go index 19926c4611a..016b5d70543 100644 --- a/internal/service/ec2/outposts_local_gateway_route_test.go +++ b/internal/service/ec2/outposts_local_gateway_route_test.go @@ -8,13 +8,13 @@ import ( "fmt" "testing" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -74,57 +74,33 @@ func TestAccEC2OutpostsLocalGatewayRoute_disappears(t *testing.T) { }) } -func testAccCheckLocalGatewayRouteExists(ctx context.Context, resourceName string) resource.TestCheckFunc { +func testAccCheckLocalGatewayRouteExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No EC2 Local Gateway Route ID is set") - } - - localGatewayRouteTableID, destination, err := tfec2.DecodeLocalGatewayRouteID(rs.Primary.ID) - - if err != nil { - return err - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) - route, err := tfec2.GetLocalGatewayRoute(ctx, conn, localGatewayRouteTableID, destination) - - if err != nil { - return err - } - - if route == nil { - return fmt.Errorf("EC2 Local Gateway Route (%s) not found", rs.Primary.ID) - } + _, err := tfec2.FindLocalGatewayRouteByTwoPartKey(ctx, conn, rs.Primary.Attributes["local_gateway_route_table_id"], rs.Primary.Attributes["destination_cidr_block"]) - return nil + return err } } func testAccCheckLocalGatewayRouteDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_ec2_local_gateway_route" { continue } - localGatewayRouteTableID, destination, err := tfec2.DecodeLocalGatewayRouteID(rs.Primary.ID) - - if err != nil { - return err - } + _, err := tfec2.FindLocalGatewayRouteByTwoPartKey(ctx, conn, rs.Primary.Attributes["local_gateway_route_table_id"], rs.Primary.Attributes["destination_cidr_block"]) - route, err := tfec2.GetLocalGatewayRoute(ctx, conn, localGatewayRouteTableID, destination) - - if tfawserr.ErrCodeEquals(err, "InvalidRouteTableID.NotFound") { + if tfresource.NotFound(err) { continue } @@ -132,11 +108,7 @@ func testAccCheckLocalGatewayRouteDestroy(ctx context.Context) resource.TestChec return err } - if route == nil { - continue - } - - return fmt.Errorf("EC2 Local Gateway Route (%s) still exists", rs.Primary.ID) + return fmt.Errorf("EC2 Local Gateway Route still exists: %s", rs.Primary.ID) } return nil diff --git a/internal/service/ec2/outposts_local_gateway_virtual_interface_data_source.go b/internal/service/ec2/outposts_local_gateway_virtual_interface_data_source.go index f0339b7f755..2c35a38d32b 100644 --- a/internal/service/ec2/outposts_local_gateway_virtual_interface_data_source.go +++ b/internal/service/ec2/outposts_local_gateway_virtual_interface_data_source.go @@ -7,18 +7,21 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_local_gateway_virtual_interface") -func DataSourceLocalGatewayVirtualInterface() *schema.Resource { +// @SDKDataSource("aws_ec2_local_gateway_virtual_interface", name="Local Gateway Virtual Interface") +// @Tags +// @Testing(tagsTest=false) +func dataSourceLocalGatewayVirtualInterface() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLocalGatewayVirtualInterfaceRead, @@ -69,20 +72,19 @@ func DataSourceLocalGatewayVirtualInterface() *schema.Resource { func dataSourceLocalGatewayVirtualInterfaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DescribeLocalGatewayVirtualInterfacesInput{} if v, ok := d.GetOk(names.AttrID); ok { - input.LocalGatewayVirtualInterfaceIds = []*string{aws.String(v.(string))} + input.LocalGatewayVirtualInterfaceIds = []string{v.(string)} } - input.Filters = append(input.Filters, newTagFilterList( - Tags(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), )...) - input.Filters = append(input.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) @@ -91,34 +93,21 @@ func dataSourceLocalGatewayVirtualInterfaceRead(ctx context.Context, d *schema.R input.Filters = nil } - output, err := conn.DescribeLocalGatewayVirtualInterfacesWithContext(ctx, input) + localGatewayVirtualInterface, err := findLocalGatewayVirtualInterface(ctx, conn, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "describing EC2 Local Gateway Virtual Interfaces: %s", err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EC2 Local Gateway Virtual Interface", err)) } - if output == nil || len(output.LocalGatewayVirtualInterfaces) == 0 { - return sdkdiag.AppendErrorf(diags, "no matching EC2 Local Gateway Virtual Interface found") - } - - if len(output.LocalGatewayVirtualInterfaces) > 1 { - return sdkdiag.AppendErrorf(diags, "multiple EC2 Local Gateway Virtual Interfaces matched; use additional constraints to reduce matches to a single EC2 Local Gateway Virtual Interface") - } - - localGatewayVirtualInterface := output.LocalGatewayVirtualInterfaces[0] - - d.SetId(aws.StringValue(localGatewayVirtualInterface.LocalGatewayVirtualInterfaceId)) + d.SetId(aws.ToString(localGatewayVirtualInterface.LocalGatewayVirtualInterfaceId)) d.Set("local_address", localGatewayVirtualInterface.LocalAddress) d.Set("local_bgp_asn", localGatewayVirtualInterface.LocalBgpAsn) d.Set("local_gateway_id", localGatewayVirtualInterface.LocalGatewayId) d.Set("peer_address", localGatewayVirtualInterface.PeerAddress) d.Set("peer_bgp_asn", localGatewayVirtualInterface.PeerBgpAsn) - - if err := d.Set(names.AttrTags, KeyValueTags(ctx, localGatewayVirtualInterface.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } - d.Set("vlan", localGatewayVirtualInterface.Vlan) + setTagsOutV2(ctx, localGatewayVirtualInterface.Tags) + return diags } diff --git a/internal/service/ec2/outposts_local_gateway_virtual_interface_group_data_source.go b/internal/service/ec2/outposts_local_gateway_virtual_interface_group_data_source.go index 81c0117bbd5..92cf4f43df0 100644 --- a/internal/service/ec2/outposts_local_gateway_virtual_interface_group_data_source.go +++ b/internal/service/ec2/outposts_local_gateway_virtual_interface_group_data_source.go @@ -7,18 +7,21 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_local_gateway_virtual_interface_group") -func DataSourceLocalGatewayVirtualInterfaceGroup() *schema.Resource { +// @SDKDataSource("aws_ec2_local_gateway_virtual_interface_group", name="Local Gateway Virtual Interface Group") +// @Tags +// @Testing(tagsTest=false) +func dataSourceLocalGatewayVirtualInterfaceGroup() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLocalGatewayVirtualInterfaceGroupRead, @@ -50,26 +53,25 @@ func DataSourceLocalGatewayVirtualInterfaceGroup() *schema.Resource { func dataSourceLocalGatewayVirtualInterfaceGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput{} if v, ok := d.GetOk(names.AttrID); ok { - input.LocalGatewayVirtualInterfaceGroupIds = []*string{aws.String(v.(string))} + input.LocalGatewayVirtualInterfaceGroupIds = []string{v.(string)} } - input.Filters = newAttributeFilterList( + input.Filters = newAttributeFilterListV2( map[string]string{ "local-gateway-id": d.Get("local_gateway_id").(string), }, ) - input.Filters = append(input.Filters, newTagFilterList( - Tags(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), )...) - input.Filters = append(input.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) @@ -78,33 +80,18 @@ func dataSourceLocalGatewayVirtualInterfaceGroupRead(ctx context.Context, d *sch input.Filters = nil } - output, err := conn.DescribeLocalGatewayVirtualInterfaceGroupsWithContext(ctx, input) + localGatewayVirtualInterfaceGroup, err := findLocalGatewayVirtualInterfaceGroup(ctx, conn, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "describing EC2 Local Gateway Virtual Interface Groups: %s", err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EC2 Local Gateway Virtual Interface Group", err)) } - if output == nil || len(output.LocalGatewayVirtualInterfaceGroups) == 0 { - return sdkdiag.AppendErrorf(diags, "no matching EC2 Local Gateway Virtual Interface Group found") - } - - if len(output.LocalGatewayVirtualInterfaceGroups) > 1 { - return sdkdiag.AppendErrorf(diags, "multiple EC2 Local Gateway Virtual Interface Groups matched; use additional constraints to reduce matches to a single EC2 Local Gateway Virtual Interface Group") - } - - localGatewayVirtualInterfaceGroup := output.LocalGatewayVirtualInterfaceGroups[0] - - d.SetId(aws.StringValue(localGatewayVirtualInterfaceGroup.LocalGatewayVirtualInterfaceGroupId)) + d.SetId(aws.ToString(localGatewayVirtualInterfaceGroup.LocalGatewayVirtualInterfaceGroupId)) d.Set("local_gateway_id", localGatewayVirtualInterfaceGroup.LocalGatewayId) d.Set("local_gateway_virtual_interface_group_id", localGatewayVirtualInterfaceGroup.LocalGatewayVirtualInterfaceGroupId) + d.Set("local_gateway_virtual_interface_ids", localGatewayVirtualInterfaceGroup.LocalGatewayVirtualInterfaceIds) - if err := d.Set("local_gateway_virtual_interface_ids", aws.StringValueSlice(localGatewayVirtualInterfaceGroup.LocalGatewayVirtualInterfaceIds)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting local_gateway_virtual_interface_ids: %s", err) - } - - if err := d.Set(names.AttrTags, KeyValueTags(ctx, localGatewayVirtualInterfaceGroup.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOutV2(ctx, localGatewayVirtualInterfaceGroup.Tags) return diags } diff --git a/internal/service/ec2/outposts_local_gateway_virtual_interface_groups_data_source.go b/internal/service/ec2/outposts_local_gateway_virtual_interface_groups_data_source.go index 7e436aadda5..562fb6ad5ab 100644 --- a/internal/service/ec2/outposts_local_gateway_virtual_interface_groups_data_source.go +++ b/internal/service/ec2/outposts_local_gateway_virtual_interface_groups_data_source.go @@ -7,8 +7,8 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -17,8 +17,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_local_gateway_virtual_interface_groups") -func DataSourceLocalGatewayVirtualInterfaceGroups() *schema.Resource { +// @SDKDataSource("aws_ec2_local_gateway_virtual_interface_groups", name="Local Gateway Virtual Interface Groups") +func dataSourceLocalGatewayVirtualInterfaceGroups() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLocalGatewayVirtualInterfaceGroupsRead, @@ -38,22 +38,22 @@ func DataSourceLocalGatewayVirtualInterfaceGroups() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - names.AttrTags: tftags.TagsSchemaComputed(), + names.AttrTags: tftags.TagsSchema(), }, } } func dataSourceLocalGatewayVirtualInterfaceGroupsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DescribeLocalGatewayVirtualInterfaceGroupsInput{} - input.Filters = append(input.Filters, newTagFilterList( - Tags(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), )...) - input.Filters = append(input.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) @@ -61,7 +61,7 @@ func dataSourceLocalGatewayVirtualInterfaceGroupsRead(ctx context.Context, d *sc input.Filters = nil } - output, err := FindLocalGatewayVirtualInterfaceGroups(ctx, conn, input) + output, err := findLocalGatewayVirtualInterfaceGroups(ctx, conn, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading EC2 Local Gateway Virtual Interface Groups: %s", err) @@ -70,8 +70,8 @@ func dataSourceLocalGatewayVirtualInterfaceGroupsRead(ctx context.Context, d *sc var groupIDs, interfaceIDs []string for _, v := range output { - groupIDs = append(groupIDs, aws.StringValue(v.LocalGatewayVirtualInterfaceGroupId)) - interfaceIDs = append(interfaceIDs, aws.StringValueSlice(v.LocalGatewayVirtualInterfaceIds)...) + groupIDs = append(groupIDs, aws.ToString(v.LocalGatewayVirtualInterfaceGroupId)) + interfaceIDs = append(interfaceIDs, v.LocalGatewayVirtualInterfaceIds...) } d.SetId(meta.(*conns.AWSClient).Region) diff --git a/internal/service/ec2/outposts_local_gateways_data_source.go b/internal/service/ec2/outposts_local_gateways_data_source.go index 6d03dcb99b7..bd05d2b533d 100644 --- a/internal/service/ec2/outposts_local_gateways_data_source.go +++ b/internal/service/ec2/outposts_local_gateways_data_source.go @@ -7,18 +7,20 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_local_gateways") -func DataSourceLocalGateways() *schema.Resource { +// @SDKDataSource("aws_ec2_local_gateways", name="Local Gateways") +func dataSourceLocalGateways() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLocalGatewaysRead, @@ -40,15 +42,15 @@ func DataSourceLocalGateways() *schema.Resource { func dataSourceLocalGatewaysRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DescribeLocalGatewaysInput{} - input.Filters = append(input.Filters, newTagFilterList( - Tags(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), + input.Filters = append(input.Filters, newTagFilterListV2( + TagsV2(tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{}))), )...) - input.Filters = append(input.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) @@ -56,20 +58,16 @@ func dataSourceLocalGatewaysRead(ctx context.Context, d *schema.ResourceData, me input.Filters = nil } - output, err := FindLocalGateways(ctx, conn, input) + output, err := findLocalGateways(ctx, conn, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading EC2 Local Gateways: %s", err) } - var gatewayIDs []string - - for _, v := range output { - gatewayIDs = append(gatewayIDs, aws.StringValue(v.LocalGatewayId)) - } - d.SetId(meta.(*conns.AWSClient).Region) - d.Set(names.AttrIDs, gatewayIDs) + d.Set(names.AttrIDs, tfslices.ApplyToAll(output, func(v awstypes.LocalGateway) string { + return aws.ToString(v.LocalGatewayId) + })) return diags } diff --git a/internal/service/ec2/service_endpoint_resolver_gen.go b/internal/service/ec2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..0b1b24d201c --- /dev/null +++ b/internal/service/ec2/service_endpoint_resolver_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ec2 + +import ( + "context" + "fmt" + "net" + "net/url" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ec2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ec2" + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} + +var _ ec2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver ec2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: ec2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params ec2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up ec2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*ec2_sdkv2.Options) { + return func(o *ec2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ec2/service_endpoints_gen_test.go b/internal/service/ec2/service_endpoints_gen_test.go index b68337d9be6..6eab3c485f4 100644 --- a/internal/service/ec2/service_endpoints_gen_test.go +++ b/internal/service/ec2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -255,24 +257,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }) } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := ec2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ec2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := ec2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ec2_sdkv2.EndpointParameters{ @@ -280,14 +282,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -364,16 +366,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ec2/service_package.go b/internal/service/ec2/service_package.go index 4dc23ca9249..2fc47c981f2 100644 --- a/internal/service/ec2/service_package.go +++ b/internal/service/ec2/service_package.go @@ -10,11 +10,8 @@ import ( retry_sdkv2 "github.com/aws/aws-sdk-go-v2/aws/retry" ec2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ec2" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - request_sdkv1 "github.com/aws/aws-sdk-go/aws/request" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" ec2_sdkv1 "github.com/aws/aws-sdk-go/service/ec2" - tfawserr_sdkv1 "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" tfawserr_sdkv2 "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -32,70 +29,48 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*e "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return ec2_sdkv1.New(sess.Copy(&cfg)), nil } -// CustomizeConn customizes a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) CustomizeConn(ctx context.Context, conn *ec2_sdkv1.EC2) (*ec2_sdkv1.EC2, error) { - conn.Handlers.Retry.PushBack(func(r *request_sdkv1.Request) { - switch err := r.Error; r.Operation.Name { - case "RunInstances": - // `InsufficientInstanceCapacity` error has status code 500 and AWS SDK try retry this error by default. - if tfawserr_sdkv1.ErrCodeEquals(err, errCodeInsufficientInstanceCapacity) { - r.Retryable = aws_sdkv1.Bool(false) - } - } - }) - - return conn, nil -} - // NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*ec2_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return ec2_sdkv2.NewFromConfig(cfg, func(o *ec2_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws_sdkv2.RetryerV2), retry_sdkv2.IsErrorRetryableFunc(func(err error) aws_sdkv2.Ternary { - if tfawserr_sdkv2.ErrMessageContains(err, errCodeInvalidParameterValue, "This call cannot be completed because there are pending VPNs or Virtual Interfaces") { // AttachVpnGateway, DetachVpnGateway - return aws_sdkv2.TrueTernary - } - - if tfawserr_sdkv2.ErrMessageContains(err, errCodeOperationNotPermitted, "Endpoint cannot be created while another endpoint is being created") { // CreateClientVpnEndpoint - return aws_sdkv2.TrueTernary - } - - if tfawserr_sdkv2.ErrMessageContains(err, errCodeConcurrentMutationLimitExceeded, "Cannot initiate another change for this endpoint at this time") { // CreateClientVpnRoute, DeleteClientVpnRoute - return aws_sdkv2.TrueTernary - } - - if tfawserr_sdkv2.ErrMessageContains(err, errCodeVPNConnectionLimitExceeded, "maximum number of mutating objects has been reached") { // CreateVpnConnection - return aws_sdkv2.TrueTernary - } - - if tfawserr_sdkv2.ErrMessageContains(err, errCodeVPNGatewayLimitExceeded, "maximum number of mutating objects has been reached") { // CreateVpnGateway - return aws_sdkv2.TrueTernary - } - - return aws_sdkv2.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return ec2_sdkv2.NewFromConfig(cfg, + ec2_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *ec2_sdkv2.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws_sdkv2.RetryerV2), retry_sdkv2.IsErrorRetryableFunc(func(err error) aws_sdkv2.Ternary { + if tfawserr_sdkv2.ErrMessageContains(err, errCodeInvalidParameterValue, "This call cannot be completed because there are pending VPNs or Virtual Interfaces") { // AttachVpnGateway, DetachVpnGateway + return aws_sdkv2.TrueTernary + } + + if tfawserr_sdkv2.ErrCodeEquals(err, errCodeInsufficientInstanceCapacity) { // CreateCapacityReservation, RunInstances + return aws_sdkv2.TrueTernary + } + + if tfawserr_sdkv2.ErrMessageContains(err, errCodeOperationNotPermitted, "Endpoint cannot be created while another endpoint is being created") { // CreateClientVpnEndpoint + return aws_sdkv2.TrueTernary + } + + if tfawserr_sdkv2.ErrMessageContains(err, errCodeConcurrentMutationLimitExceeded, "Cannot initiate another change for this endpoint at this time") { // CreateClientVpnRoute, DeleteClientVpnRoute + return aws_sdkv2.TrueTernary + } + + if tfawserr_sdkv2.ErrMessageContains(err, errCodeVPNConnectionLimitExceeded, "maximum number of mutating objects has been reached") { // CreateVpnConnection + return aws_sdkv2.TrueTernary + } + + if tfawserr_sdkv2.ErrMessageContains(err, errCodeVPNGatewayLimitExceeded, "maximum number of mutating objects has been reached") { // CreateVpnGateway + return aws_sdkv2.TrueTernary + } + + return aws_sdkv2.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/ec2/service_package_gen.go b/internal/service/ec2/service_package_gen.go index 54d1fe64244..3bc46b33c5d 100644 --- a/internal/service/ec2/service_package_gen.go +++ b/internal/service/ec2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ec2 @@ -150,12 +150,15 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceCoIPPool, + Factory: dataSourceCoIPPool, TypeName: "aws_ec2_coip_pool", + Name: "COIP Pool", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceCoIPPools, + Factory: dataSourceCoIPPools, TypeName: "aws_ec2_coip_pools", + Name: "COIP Pools", }, { Factory: dataSourceHost, @@ -184,32 +187,43 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Name: "Instance Types", }, { - Factory: DataSourceLocalGateway, + Factory: dataSourceLocalGateway, TypeName: "aws_ec2_local_gateway", + Name: "Local Gateway", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceLocalGatewayRouteTable, + Factory: dataSourceLocalGatewayRouteTable, TypeName: "aws_ec2_local_gateway_route_table", + Name: "Local Gateway Route Table", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceLocalGatewayRouteTables, + Factory: dataSourceLocalGatewayRouteTables, TypeName: "aws_ec2_local_gateway_route_tables", + Name: "Local Gateway Route Table", }, { - Factory: DataSourceLocalGatewayVirtualInterface, + Factory: dataSourceLocalGatewayVirtualInterface, TypeName: "aws_ec2_local_gateway_virtual_interface", + Name: "Local Gateway Virtual Interface", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceLocalGatewayVirtualInterfaceGroup, + Factory: dataSourceLocalGatewayVirtualInterfaceGroup, TypeName: "aws_ec2_local_gateway_virtual_interface_group", + Name: "Local Gateway Virtual Interface Group", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceLocalGatewayVirtualInterfaceGroups, + Factory: dataSourceLocalGatewayVirtualInterfaceGroups, TypeName: "aws_ec2_local_gateway_virtual_interface_groups", + Name: "Local Gateway Virtual Interface Groups", }, { - Factory: DataSourceLocalGateways, + Factory: dataSourceLocalGateways, TypeName: "aws_ec2_local_gateways", + Name: "Local Gateways", }, { Factory: DataSourceManagedPrefixList, @@ -220,12 +234,16 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_ec2_managed_prefix_lists", }, { - Factory: DataSourceNetworkInsightsAnalysis, + Factory: dataSourceNetworkInsightsAnalysis, TypeName: "aws_ec2_network_insights_analysis", + Name: "Network Insights Analysis", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceNetworkInsightsPath, + Factory: dataSourceNetworkInsightsPath, TypeName: "aws_ec2_network_insights_path", + Name: "Network Insights Path", + Tags: &types.ServicePackageResourceTags{}, }, { Factory: dataSourcePublicIPv4Pool, @@ -294,6 +312,11 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Name: "Transit Gateway Peering Attachment", Tags: &types.ServicePackageResourceTags{}, }, + { + Factory: dataSourceTransitGatewayPeeringAttachments, + TypeName: "aws_ec2_transit_gateway_peering_attachments", + Name: "Transit Gateway Peering Attachments", + }, { Factory: dataSourceTransitGatewayRouteTable, TypeName: "aws_ec2_transit_gateway_route_table", @@ -690,11 +713,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka Name: "Instance State", }, { - Factory: ResourceLocalGatewayRoute, + Factory: resourceLocalGatewayRoute, TypeName: "aws_ec2_local_gateway_route", }, { - Factory: ResourceLocalGatewayRouteTableVPCAssociation, + Factory: resourceLocalGatewayRouteTableVPCAssociation, TypeName: "aws_ec2_local_gateway_route_table_vpc_association", Name: "Local Gateway Route Table VPC Association", Tags: &types.ServicePackageResourceTags{ @@ -714,7 +737,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka TypeName: "aws_ec2_managed_prefix_list_entry", }, { - Factory: ResourceNetworkInsightsAnalysis, + Factory: resourceNetworkInsightsAnalysis, TypeName: "aws_ec2_network_insights_analysis", Name: "Network Insights Analysis", Tags: &types.ServicePackageResourceTags{ @@ -722,7 +745,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceNetworkInsightsPath, + Factory: resourceNetworkInsightsPath, TypeName: "aws_ec2_network_insights_path", Name: "Network Insights Path", Tags: &types.ServicePackageResourceTags{ @@ -1029,7 +1052,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka TypeName: "aws_route_table_association", }, { - Factory: ResourceSecurityGroup, + Factory: resourceSecurityGroup, TypeName: "aws_security_group", Name: "Security Group", Tags: &types.ServicePackageResourceTags{ diff --git a/internal/service/ec2/status.go b/internal/service/ec2/status.go index 8099827ce96..ae16810ca6c 100644 --- a/internal/service/ec2/status.go +++ b/internal/service/ec2/status.go @@ -17,40 +17,6 @@ import ( // Move functions to statusv2.go as they are migrated to AWS SDK for Go v2. // -// StatusLocalGatewayRouteTableVPCAssociationState fetches the LocalGatewayRouteTableVpcAssociation and its State -func StatusLocalGatewayRouteTableVPCAssociationState(ctx context.Context, conn *ec2.EC2, localGatewayRouteTableVpcAssociationID string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - input := &ec2.DescribeLocalGatewayRouteTableVpcAssociationsInput{ - LocalGatewayRouteTableVpcAssociationIds: aws.StringSlice([]string{localGatewayRouteTableVpcAssociationID}), - } - - output, err := conn.DescribeLocalGatewayRouteTableVpcAssociationsWithContext(ctx, input) - - if err != nil { - return nil, "", err - } - - var association *ec2.LocalGatewayRouteTableVpcAssociation - - for _, outputAssociation := range output.LocalGatewayRouteTableVpcAssociations { - if outputAssociation == nil { - continue - } - - if aws.StringValue(outputAssociation.LocalGatewayRouteTableVpcAssociationId) == localGatewayRouteTableVpcAssociationID { - association = outputAssociation - break - } - } - - if association == nil { - return association, ec2.RouteTableAssociationStateCodeDisassociated, nil - } - - return association, aws.StringValue(association.State), nil - } -} - func StatusNATGatewayState(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindNATGatewayByID(ctx, conn, id) @@ -378,22 +344,6 @@ func StatusManagedPrefixListState(ctx context.Context, conn *ec2.EC2, id string) } } -func StatusNetworkInsightsAnalysis(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindNetworkInsightsAnalysisByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - func StatusNetworkInterfaceStatus(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := FindNetworkInterfaceByID(ctx, conn, id) diff --git a/internal/service/ec2/statusv2.go b/internal/service/ec2/statusv2.go index 93ef7bd3e25..a9131d8892c 100644 --- a/internal/service/ec2/statusv2.go +++ b/internal/service/ec2/statusv2.go @@ -220,6 +220,38 @@ func statusInstanceRootBlockDeviceDeleteOnTermination(ctx context.Context, conn } } +func statusLocalGatewayRoute(ctx context.Context, conn *ec2.Client, localGatewayRouteTableID, destinationCIDRBlock string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findLocalGatewayRouteByTwoPartKey(ctx, conn, localGatewayRouteTableID, destinationCIDRBlock) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil + } +} + +func statusLocalGatewayRouteTableVPCAssociation(ctx context.Context, conn *ec2.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findLocalGatewayRouteTableVPCAssociationByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.ToString(output.State), nil + } +} + func statusPlacementGroup(ctx context.Context, conn *ec2.Client, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findPlacementGroupByName(ctx, conn, name) @@ -1213,3 +1245,19 @@ func statusEBSSnapshotImport(ctx context.Context, conn *ec2.Client, id string) r return output.SnapshotTaskDetail, aws.ToString(output.SnapshotTaskDetail.Status), nil } } + +func statusNetworkInsightsAnalysis(ctx context.Context, conn *ec2.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findNetworkInsightsAnalysisByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} diff --git a/internal/service/ec2/sweep.go b/internal/service/ec2/sweep.go index 0061f66bd95..56172128e7e 100644 --- a/internal/service/ec2/sweep.go +++ b/internal/service/ec2/sweep.go @@ -174,6 +174,16 @@ func RegisterSweepers() { }, }) + resource.AddTestSweepers("aws_ec2_managed_prefix_list", &resource.Sweeper{ + Name: "aws_ec2_managed_prefix_list", + F: sweepManagedPrefixLists, + Dependencies: []string{ + "aws_route_table", + "aws_security_group", + "aws_networkfirewall_rule_group", + }, + }) + resource.AddTestSweepers("aws_ec2_network_insights_path", &resource.Sweeper{ Name: "aws_ec2_network_insights_path", F: sweepNetworkInsightsPaths, @@ -1355,6 +1365,51 @@ func sweepNetworkInterfaces(region string) error { return nil } +func sweepManagedPrefixLists(region string) error { + ctx := sweep.Context(region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) + if err != nil { + return fmt.Errorf("error getting client: %s", err) + } + conn := client.EC2Client(ctx) + sweepResources := make([]sweep.Sweepable, 0) + + pages := ec2.NewDescribeManagedPrefixListsPaginator(conn, &ec2.DescribeManagedPrefixListsInput{}) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EC2 Managed Prefix List sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing EC2 Managed Prefix Lists (%s): %w", region, err) + } + + for _, v := range page.PrefixLists { + if aws.ToString(v.OwnerId) == "AWS" { + log.Printf("[DEBUG] Skipping AWS-managed prefix list: %s", aws.ToString(v.PrefixListName)) + continue + } + + r := ResourceManagedPrefixList() + d := r.Data(nil) + d.SetId(aws.ToString(v.PrefixListId)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + + if err != nil { + return fmt.Errorf("error sweeping EC2 Managed Prefix Lists (%s): %w", region, err) + } + + return nil +} + func sweepNetworkInsightsPaths(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) @@ -1378,13 +1433,11 @@ func sweepNetworkInsightsPaths(region string) error { errs = multierror.Append(errs, fmt.Errorf("error listing Network Insights Paths for %s: %w", region, err)) } - for _, nip := range page.NetworkInsightsPaths { - id := aws.ToString(nip.NetworkInsightsPathId) - - r := ResourceNetworkInsightsPath() + for _, v := range page.NetworkInsightsPaths { + r := resourceNetworkInsightsPath() d := r.Data(nil) + d.SetId(aws.ToString(v.NetworkInsightsPathId)) - d.SetId(id) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } } diff --git a/internal/service/ec2/transitgateway_.go b/internal/service/ec2/transitgateway_.go index 6f033936cee..e68649de615 100644 --- a/internal/service/ec2/transitgateway_.go +++ b/internal/service/ec2/transitgateway_.go @@ -290,7 +290,10 @@ func resourceTransitGatewayDelete(ctx context.Context, d *schema.ResourceData, m conn := meta.(*conns.AWSClient).EC2Client(ctx) log.Printf("[DEBUG] Deleting EC2 Transit Gateway: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, TransitGatewayIncorrectStateTimeout, func() (interface{}, error) { + const ( + timeout = 5 * time.Minute + ) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { return conn.DeleteTransitGateway(ctx, &ec2.DeleteTransitGatewayInput{ TransitGatewayId: aws.String(d.Id()), }) diff --git a/internal/service/ec2/transitgateway_data_source_test.go b/internal/service/ec2/transitgateway_data_source_test.go index aa9abb203b9..a59231d3ed9 100644 --- a/internal/service/ec2/transitgateway_data_source_test.go +++ b/internal/service/ec2/transitgateway_data_source_test.go @@ -53,6 +53,9 @@ func TestAccTransitGatewayDataSource_serial(t *testing.T) { "IDDifferentAccount": testAccTransitGatewayPeeringAttachmentDataSource_ID_differentAccount, "Tags": testAccTransitGatewayPeeringAttachmentDataSource_Tags, }, + "PeeringAttachments": { + "Filter": testAccTransitGatewayPeeringAttachmentsDataSource_Filter, + }, "RouteTable": { "Filter": testAccTransitGatewayRouteTableDataSource_Filter, "ID": testAccTransitGatewayRouteTableDataSource_ID, diff --git a/internal/service/ec2/transitgateway_peering_attachment.go b/internal/service/ec2/transitgateway_peering_attachment.go index 900ead85ecd..760de81e61b 100644 --- a/internal/service/ec2/transitgateway_peering_attachment.go +++ b/internal/service/ec2/transitgateway_peering_attachment.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -66,6 +67,22 @@ func resourceTransitGatewayPeeringAttachment() *schema.Resource { Required: true, ForceNew: true, }, + "options": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dynamic_routing": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.DynamicRoutingValue](), + }, + }, + }, + }, }, } } @@ -86,7 +103,10 @@ func resourceTransitGatewayPeeringAttachmentCreate(ctx context.Context, d *schem TransitGatewayId: aws.String(d.Get(names.AttrTransitGatewayID).(string)), } - log.Printf("[DEBUG] Creating EC2 Transit Gateway Peering Attachment: %+v", input) + if v, ok := d.GetOk("options"); ok { + input.Options = expandCreateTransitGatewayPeeringAttachmentRequestOptions(v.([]interface{})) + } + output, err := conn.CreateTransitGatewayPeeringAttachment(ctx, input) if err != nil { @@ -124,6 +144,10 @@ func resourceTransitGatewayPeeringAttachmentRead(ctx context.Context, d *schema. d.Set(names.AttrState, transitGatewayPeeringAttachment.State) d.Set(names.AttrTransitGatewayID, transitGatewayPeeringAttachment.RequesterTgwInfo.TransitGatewayId) + if err := d.Set("options", flattenTransitGatewayPeeringAttachmentOptions(transitGatewayPeeringAttachment.Options)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting options: %s", err) + } + setTagsOutV2(ctx, transitGatewayPeeringAttachment.Tags) return diags @@ -160,3 +184,29 @@ func resourceTransitGatewayPeeringAttachmentDelete(ctx context.Context, d *schem return diags } + +func expandCreateTransitGatewayPeeringAttachmentRequestOptions(tfMap []interface{}) *awstypes.CreateTransitGatewayPeeringAttachmentRequestOptions { + if len(tfMap) == 0 || tfMap[0] == nil { + return nil + } + + apiObject := &awstypes.CreateTransitGatewayPeeringAttachmentRequestOptions{} + + m := tfMap[0].(map[string]interface{}) + + if v, ok := m["dynamic_routing"].(string); ok { + apiObject.DynamicRouting = awstypes.DynamicRoutingValue(v) + } + + return apiObject +} + +func flattenTransitGatewayPeeringAttachmentOptions(apiObject *awstypes.TransitGatewayPeeringAttachmentOptions) []interface{} { + if apiObject == nil { + return nil + } + + return []interface{}{map[string]interface{}{ + "dynamic_routing": apiObject.DynamicRouting, + }} +} diff --git a/internal/service/ec2/transitgateway_peering_attachment_test.go b/internal/service/ec2/transitgateway_peering_attachment_test.go index 27ef1107a7e..b611795de13 100644 --- a/internal/service/ec2/transitgateway_peering_attachment_test.go +++ b/internal/service/ec2/transitgateway_peering_attachment_test.go @@ -43,6 +43,7 @@ func testAccTransitGatewayPeeringAttachment_basic(t *testing.T, semaphore tfsync Config: testAccTransitGatewayPeeringAttachmentConfig_sameAccount(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTransitGatewayPeeringAttachmentExists(ctx, resourceName, &transitGatewayPeeringAttachment), + resource.TestCheckResourceAttr(resourceName, "options.#", acctest.Ct0), acctest.CheckResourceAttrAccountID(resourceName, "peer_account_id"), resource.TestCheckResourceAttr(resourceName, "peer_region", acctest.AlternateRegion()), resource.TestCheckResourceAttrPair(resourceName, "peer_transit_gateway_id", transitGatewayResourceNamePeer, names.AttrID), @@ -61,6 +62,43 @@ func testAccTransitGatewayPeeringAttachment_basic(t *testing.T, semaphore tfsync }) } +func testAccTransitGatewayPeeringAttachment_options(t *testing.T, semaphore tfsync.Semaphore) { + acctest.Skip(t, "IncorrectState: You cannot create a dynamic peering attachment") + + ctx := acctest.Context(t) + var transitGatewayPeeringAttachment awstypes.TransitGatewayPeeringAttachment + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ec2_transit_gateway_peering_attachment.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckTransitGatewaySynchronize(t, semaphore) + acctest.PreCheck(ctx, t) + testAccPreCheckTransitGateway(ctx, t) + acctest.PreCheckMultipleRegion(t, 2) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckTransitGatewayPeeringAttachmentDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTransitGatewayPeeringAttachmentConfig_options_sameAccount(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTransitGatewayPeeringAttachmentExists(ctx, resourceName, &transitGatewayPeeringAttachment), + resource.TestCheckResourceAttr(resourceName, "options.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "options.dynamic_routing", "enable"), + ), + }, + { + Config: testAccTransitGatewayPeeringAttachmentConfig_options_sameAccount(rName), + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccTransitGatewayPeeringAttachment_disappears(t *testing.T, semaphore tfsync.Semaphore) { ctx := acctest.Context(t) var transitGatewayPeeringAttachment awstypes.TransitGatewayPeeringAttachment @@ -284,6 +322,20 @@ resource "aws_ec2_transit_gateway_peering_attachment" "test" { `, acctest.AlternateRegion())) } +func testAccTransitGatewayPeeringAttachmentConfig_options_sameAccount(rName string) string { + return acctest.ConfigCompose(testAccTransitGatewayPeeringAttachmentConfig_sameAccount_base(rName), fmt.Sprintf(` +resource "aws_ec2_transit_gateway_peering_attachment" "test" { + peer_region = %[1]q + peer_transit_gateway_id = aws_ec2_transit_gateway.peer.id + transit_gateway_id = aws_ec2_transit_gateway.test.id + + options { + dynamic_routing = "enable" + } +} +`, acctest.AlternateRegion())) +} + func testAccTransitGatewayPeeringAttachmentConfig_differentAccount(rName string) string { return acctest.ConfigCompose(testAccTransitGatewayPeeringAttachmentConfig_differentAccount_base(rName), fmt.Sprintf(` resource "aws_ec2_transit_gateway_peering_attachment" "test" { diff --git a/internal/service/ec2/transitgateway_peering_attachments_data_source.go b/internal/service/ec2/transitgateway_peering_attachments_data_source.go new file mode 100644 index 00000000000..4dc81275258 --- /dev/null +++ b/internal/service/ec2/transitgateway_peering_attachments_data_source.go @@ -0,0 +1,68 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2 + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKDataSource("aws_ec2_transit_gateway_peering_attachments", name="Transit Gateway Peering Attachments") +func dataSourceTransitGatewayPeeringAttachments() *schema.Resource { + return &schema.Resource{ + ReadWithoutTimeout: dataSourceTransitGatewayPeeringAttachmentsRead, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + names.AttrFilter: customFiltersSchema(), + names.AttrIDs: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceTransitGatewayPeeringAttachmentsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).EC2Client(ctx) + + input := &ec2.DescribeTransitGatewayPeeringAttachmentsInput{} + + input.Filters = append(input.Filters, newCustomFilterListV2( + d.Get(names.AttrFilter).(*schema.Set), + )...) + + if len(input.Filters) == 0 { + // Don't send an empty filters list; the EC2 API won't accept it. + input.Filters = nil + } + + output, err := findTransitGatewayPeeringAttachments(ctx, conn, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EC2 Transit Gateway Peering Attachments: %s", err) + } + + d.SetId(meta.(*conns.AWSClient).Region) + d.Set(names.AttrIDs, tfslices.ApplyToAll(output, func(v awstypes.TransitGatewayPeeringAttachment) string { + return aws.ToString(v.TransitGatewayAttachmentId) + })) + + return diags +} diff --git a/internal/service/ec2/transitgateway_peering_attachments_data_source_test.go b/internal/service/ec2/transitgateway_peering_attachments_data_source_test.go new file mode 100644 index 00000000000..4a8010762ab --- /dev/null +++ b/internal/service/ec2/transitgateway_peering_attachments_data_source_test.go @@ -0,0 +1,54 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ec2_test + +import ( + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfsync "github.com/hashicorp/terraform-provider-aws/internal/experimental/sync" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func testAccTransitGatewayPeeringAttachmentsDataSource_Filter(t *testing.T, semaphore tfsync.Semaphore) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheckTransitGatewaySynchronize(t, semaphore) + acctest.PreCheck(ctx, t) + testAccPreCheckTransitGateway(ctx, t) + acctest.PreCheckMultipleRegion(t, 2) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccTransitGatewayPeeringAttachmentsDataSourceConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + acctest.CheckResourceAttrGreaterThanOrEqualValue("data.aws_ec2_transit_gateway_peering_attachments.all", "ids.#", 1), + resource.TestCheckResourceAttr("data.aws_ec2_transit_gateway_peering_attachments.by_attachment_id", "ids.#", acctest.Ct1), + ), + }, + }, + }) +} + +func testAccTransitGatewayPeeringAttachmentsDataSourceConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccTransitGatewayPeeringAttachmentConfig_sameAccount(rName), ` +data "aws_ec2_transit_gateway_peering_attachments" "all" { + depends_on = [aws_ec2_transit_gateway_peering_attachment.test] +} + +data "aws_ec2_transit_gateway_peering_attachments" "by_attachment_id" { + filter { + name = "transit-gateway-attachment-id" + values = [aws_ec2_transit_gateway_peering_attachment.test.id] + } +} +`) +} diff --git a/internal/service/ec2/transitgateway_test.go b/internal/service/ec2/transitgateway_test.go index beda26d785e..ee450eef24a 100644 --- a/internal/service/ec2/transitgateway_test.go +++ b/internal/service/ec2/transitgateway_test.go @@ -88,6 +88,7 @@ func TestAccTransitGateway_serial(t *testing.T) { acctest.CtDisappears: testAccTransitGatewayPeeringAttachment_disappears, "tags": testAccTransitGatewayPeeringAttachment_tags, "DifferentAccount": testAccTransitGatewayPeeringAttachment_differentAccount, + "options": testAccTransitGatewayPeeringAttachment_options, }, "PeeringAttachmentAccepter": { acctest.CtBasic: testAccTransitGatewayPeeringAttachmentAccepter_basic, diff --git a/internal/service/ec2/vpc_default_route_table_test.go b/internal/service/ec2/vpc_default_route_table_test.go index e4a4378d231..4c10ca7c3e7 100644 --- a/internal/service/ec2/vpc_default_route_table_test.go +++ b/internal/service/ec2/vpc_default_route_table_test.go @@ -11,7 +11,7 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -1237,11 +1237,11 @@ resource "aws_default_route_table" "test" { } func testAccPreCheckELBv2GatewayLoadBalancer(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) - input := &elbv2.DescribeAccountLimitsInput{} + input := &elasticloadbalancingv2.DescribeAccountLimitsInput{} - output, err := conn.DescribeAccountLimitsWithContext(ctx, input) + output, err := conn.DescribeAccountLimits(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) @@ -1256,10 +1256,6 @@ func testAccPreCheckELBv2GatewayLoadBalancer(ctx context.Context, t *testing.T) } for _, limit := range output.Limits { - if limit == nil { - continue - } - if aws.ToString(limit.Name) == "gateway-load-balancers" { return } diff --git a/internal/service/ec2/vpc_network_insights_analysis.go b/internal/service/ec2/vpc_network_insights_analysis.go index d262b061990..7078ab583a0 100644 --- a/internal/service/ec2/vpc_network_insights_analysis.go +++ b/internal/service/ec2/vpc_network_insights_analysis.go @@ -8,9 +8,10 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -25,7 +26,7 @@ import ( // @SDKResource("aws_ec2_network_insights_analysis", name="Network Insights Analysis") // @Tags(identifierAttribute="id") // @Testing(tagsTest=false) -func ResourceNetworkInsightsAnalysis() *schema.Resource { +func resourceNetworkInsightsAnalysis() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceNetworkInsightsAnalysisCreate, ReadWithoutTimeout: resourceNetworkInsightsAnalysisRead, @@ -36,1400 +37,1404 @@ func ResourceNetworkInsightsAnalysis() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, - Schema: map[string]*schema.Schema{ - "alternate_path_hints": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "component_arn": { - Type: schema.TypeString, - Computed: true, - }, - "component_id": { - Type: schema.TypeString, - Computed: true, + SchemaFunc: func() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "alternate_path_hints": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_arn": { + Type: schema.TypeString, + Computed: true, + }, + "component_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - "explanations": networkInsightsAnalysisExplanationsSchema, - "filter_in_arns": { - Type: schema.TypeSet, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: verify.ValidARN, + names.AttrARN: { + Type: schema.TypeString, + Computed: true, }, - }, - "forward_path_components": networkInsightsAnalysisPathComponentsSchema, - "network_insights_path_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "path_found": { - Type: schema.TypeBool, - Computed: true, - }, - "return_path_components": networkInsightsAnalysisPathComponentsSchema, - "start_date": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrStatus: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrStatusMessage: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), - "wait_for_completion": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "warning_message": { - Type: schema.TypeString, - Computed: true, - }, + "explanations": networkInsightsAnalysisExplanationsSchema(), + "filter_in_arns": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidARN, + }, + }, + "forward_path_components": networkInsightsAnalysisPathComponentsSchema(), + "network_insights_path_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "path_found": { + Type: schema.TypeBool, + Computed: true, + }, + "return_path_components": networkInsightsAnalysisPathComponentsSchema(), + "start_date": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrStatus: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrStatusMessage: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "wait_for_completion": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "warning_message": { + Type: schema.TypeString, + Computed: true, + }, + } }, CustomizeDiff: verify.SetTagsDiff, } } -var networkInsightsAnalysisPathComponentsSchema = &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "acl_rule": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr": { - Type: schema.TypeString, - Computed: true, - }, - "egress": { - Type: schema.TypeBool, - Computed: true, - }, - "port_range": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, +func networkInsightsAnalysisPathComponentsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "acl_rule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": { + Type: schema.TypeString, + Computed: true, + }, + "egress": { + Type: schema.TypeBool, + Computed: true, + }, + "port_range": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - names.AttrProtocol: { - Type: schema.TypeString, - Computed: true, - }, - "rule_action": { - Type: schema.TypeString, - Computed: true, - }, - "rule_number": { - Type: schema.TypeInt, - Computed: true, + names.AttrProtocol: { + Type: schema.TypeString, + Computed: true, + }, + "rule_action": { + Type: schema.TypeString, + Computed: true, + }, + "rule_number": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "additional_details": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "additional_detail_type": { - Type: schema.TypeString, - Computed: true, - }, - "component": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "additional_details": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "additional_detail_type": { + Type: schema.TypeString, + Computed: true, + }, + "component": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, }, }, }, - }, - "attached_to": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "attached_to": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "component": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "component": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "destination_vpc": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "destination_vpc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "inbound_header": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "destination_addresses": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "destination_port_ranges": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, + "inbound_header": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "destination_port_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - names.AttrProtocol: { - Type: schema.TypeString, - Computed: true, - }, - "source_addresses": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "source_port_ranges": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, + names.AttrProtocol: { + Type: schema.TypeString, + Computed: true, + }, + "source_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "source_port_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, }, }, }, - }, - "outbound_header": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "destination_addresses": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "destination_port_ranges": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, + "outbound_header": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "destination_port_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - names.AttrProtocol: { - Type: schema.TypeString, - Computed: true, - }, - "source_addresses": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "source_port_ranges": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, + names.AttrProtocol: { + Type: schema.TypeString, + Computed: true, + }, + "source_addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "source_port_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, }, }, }, - }, - "route_table_route": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "destination_cidr": { - Type: schema.TypeString, - Computed: true, - }, - "destination_prefix_list_id": { - Type: schema.TypeString, - Computed: true, - }, - "egress_only_internet_gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - "gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrInstanceID: { - Type: schema.TypeString, - Computed: true, - }, - "nat_gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrNetworkInterfaceID: { - Type: schema.TypeString, - Computed: true, - }, - "origin": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrTransitGatewayID: { - Type: schema.TypeString, - Computed: true, - }, - "vpc_peering_connection_id": { - Type: schema.TypeString, - Computed: true, + "route_table_route": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_cidr": { + Type: schema.TypeString, + Computed: true, + }, + "destination_prefix_list_id": { + Type: schema.TypeString, + Computed: true, + }, + "egress_only_internet_gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + "gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrInstanceID: { + Type: schema.TypeString, + Computed: true, + }, + "nat_gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrNetworkInterfaceID: { + Type: schema.TypeString, + Computed: true, + }, + "origin": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTransitGatewayID: { + Type: schema.TypeString, + Computed: true, + }, + "vpc_peering_connection_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "security_group_rule": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr": { - Type: schema.TypeString, - Computed: true, - }, - "direction": { - Type: schema.TypeString, - Computed: true, - }, - "port_range": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, + "security_group_rule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": { + Type: schema.TypeString, + Computed: true, + }, + "direction": { + Type: schema.TypeString, + Computed: true, + }, + "port_range": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "prefix_list_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrProtocol: { - Type: schema.TypeString, - Computed: true, - }, - "security_group_id": { - Type: schema.TypeString, - Computed: true, + "prefix_list_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrProtocol: { + Type: schema.TypeString, + Computed: true, + }, + "security_group_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "sequence_number": { - Type: schema.TypeInt, - Computed: true, - }, - "source_vpc": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "sequence_number": { + Type: schema.TypeInt, + Computed: true, + }, + "source_vpc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "subnet": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "subnet": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "transit_gateway": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "transit_gateway": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "transit_gateway_route_table_route": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attachment_id": { - Type: schema.TypeString, - Computed: true, - }, - "destination_cidr": { - Type: schema.TypeString, - Computed: true, - }, - "prefix_list_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrResourceID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrResourceType: { - Type: schema.TypeString, - Computed: true, - }, - "route_origin": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrState: { - Type: schema.TypeString, - Computed: true, + "transit_gateway_route_table_route": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attachment_id": { + Type: schema.TypeString, + Computed: true, + }, + "destination_cidr": { + Type: schema.TypeString, + Computed: true, + }, + "prefix_list_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrResourceID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrResourceType: { + Type: schema.TypeString, + Computed: true, + }, + "route_origin": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrState: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "vpc": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "vpc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, }, }, - }, + } } -var networkInsightsAnalysisExplanationsSchema = &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "acl": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, +func networkInsightsAnalysisExplanationsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "acl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "acl_rule": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr": { - Type: schema.TypeString, - Computed: true, - }, - "egress": { - Type: schema.TypeBool, - Computed: true, - }, - "port_range": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, + "acl_rule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": { + Type: schema.TypeString, + Computed: true, + }, + "egress": { + Type: schema.TypeBool, + Computed: true, + }, + "port_range": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - names.AttrProtocol: { - Type: schema.TypeString, - Computed: true, - }, - "rule_action": { - Type: schema.TypeString, - Computed: true, - }, - "rule_number": { - Type: schema.TypeInt, - Computed: true, + names.AttrProtocol: { + Type: schema.TypeString, + Computed: true, + }, + "rule_action": { + Type: schema.TypeString, + Computed: true, + }, + "rule_number": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - names.AttrAddress: { - Type: schema.TypeString, - Computed: true, - }, - "addresses": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "attached_to": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + names.AttrAddress: { + Type: schema.TypeString, + Computed: true, + }, + "addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "attached_to": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrAvailabilityZones: { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "cidrs": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "classic_load_balancer_listener": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_port": { - Type: schema.TypeInt, - Computed: true, - }, - "load_balancer_port": { - Type: schema.TypeInt, - Computed: true, + names.AttrAvailabilityZones: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "cidrs": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "classic_load_balancer_listener": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_port": { + Type: schema.TypeInt, + Computed: true, + }, + "load_balancer_port": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "component": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "component": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "customer_gateway": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "customer_gateway": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrDestination: { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + names.AttrDestination: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "destination_vpc": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "destination_vpc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "direction": { - Type: schema.TypeString, - Computed: true, - }, - "elastic_load_balancer_listener": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "direction": { + Type: schema.TypeString, + Computed: true, + }, + "elastic_load_balancer_listener": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "explanation_code": { - Type: schema.TypeString, - Computed: true, - }, - "ingress_route_table": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "explanation_code": { + Type: schema.TypeString, + Computed: true, + }, + "ingress_route_table": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "internet_gateway": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "internet_gateway": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "load_balancer_arn": { - Type: schema.TypeString, - Computed: true, - }, - "load_balancer_listener_port": { - Type: schema.TypeInt, - Computed: true, - }, - "load_balancer_target_group": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "load_balancer_arn": { + Type: schema.TypeString, + Computed: true, + }, + "load_balancer_listener_port": { + Type: schema.TypeInt, + Computed: true, + }, + "load_balancer_target_group": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "load_balancer_target_groups": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "load_balancer_target_groups": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "load_balancer_target_port": { - Type: schema.TypeInt, - Computed: true, - }, - "missing_component": { - Type: schema.TypeString, - Computed: true, - }, - "nat_gateway": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "load_balancer_target_port": { + Type: schema.TypeInt, + Computed: true, + }, + "missing_component": { + Type: schema.TypeString, + Computed: true, + }, + "nat_gateway": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "network_interface": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "network_interface": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "packet_field": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrPort: { - Type: schema.TypeInt, - Computed: true, - }, - "port_ranges": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, + "packet_field": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrPort: { + Type: schema.TypeInt, + Computed: true, + }, + "port_ranges": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "prefix_list": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "prefix_list": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "protocols": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "route_table": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "protocols": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "route_table": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "route_table_route": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "destination_cidr": { - Type: schema.TypeString, - Computed: true, - }, - "destination_prefix_list_id": { - Type: schema.TypeString, - Computed: true, - }, - "egress_only_internet_gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - "gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrInstanceID: { - Type: schema.TypeString, - Computed: true, - }, - "nat_gateway_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrNetworkInterfaceID: { - Type: schema.TypeString, - Computed: true, - }, - "origin": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrTransitGatewayID: { - Type: schema.TypeString, - Computed: true, - }, - "vpc_peering_connection_id": { - Type: schema.TypeString, - Computed: true, + "route_table_route": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_cidr": { + Type: schema.TypeString, + Computed: true, + }, + "destination_prefix_list_id": { + Type: schema.TypeString, + Computed: true, + }, + "egress_only_internet_gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + "gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrInstanceID: { + Type: schema.TypeString, + Computed: true, + }, + "nat_gateway_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrNetworkInterfaceID: { + Type: schema.TypeString, + Computed: true, + }, + "origin": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTransitGatewayID: { + Type: schema.TypeString, + Computed: true, + }, + "vpc_peering_connection_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "security_group": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "security_group": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "security_group_rule": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cidr": { - Type: schema.TypeString, - Computed: true, - }, - "direction": { - Type: schema.TypeString, - Computed: true, - }, - "port_range": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from": { - Type: schema.TypeInt, - Computed: true, - }, - "to": { - Type: schema.TypeInt, - Computed: true, + "security_group_rule": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr": { + Type: schema.TypeString, + Computed: true, + }, + "direction": { + Type: schema.TypeString, + Computed: true, + }, + "port_range": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from": { + Type: schema.TypeInt, + Computed: true, + }, + "to": { + Type: schema.TypeInt, + Computed: true, + }, }, }, }, - }, - "prefix_list_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrProtocol: { - Type: schema.TypeString, - Computed: true, - }, - "security_group_id": { - Type: schema.TypeString, - Computed: true, + "prefix_list_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrProtocol: { + Type: schema.TypeString, + Computed: true, + }, + "security_group_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrSecurityGroups: { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + names.AttrSecurityGroups: { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "source_vpc": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "source_vpc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrState: { - Type: schema.TypeString, - Computed: true, - }, - "subnet": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + names.AttrState: { + Type: schema.TypeString, + Computed: true, + }, + "subnet": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "subnet_route_table": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "subnet_route_table": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "transit_gateway": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "transit_gateway": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "transit_gateway_attachment": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "transit_gateway_attachment": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "transit_gateway_route_table": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "transit_gateway_route_table": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "transit_gateway_route_table_route": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "attachment_id": { - Type: schema.TypeString, - Computed: true, - }, - "destination_cidr": { - Type: schema.TypeString, - Computed: true, - }, - "prefix_list_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrResourceID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrResourceType: { - Type: schema.TypeString, - Computed: true, - }, - "route_origin": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrState: { - Type: schema.TypeString, - Computed: true, + "transit_gateway_route_table_route": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attachment_id": { + Type: schema.TypeString, + Computed: true, + }, + "destination_cidr": { + Type: schema.TypeString, + Computed: true, + }, + "prefix_list_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrResourceID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrResourceType: { + Type: schema.TypeString, + Computed: true, + }, + "route_origin": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrState: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "vpc": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "vpc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "vpc_endpoint": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "vpc_endpoint": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "vpc_peering_connection": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "vpc_peering_connection": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "vpn_connection": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "vpn_connection": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "vpn_gateway": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrID: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrName: { - Type: schema.TypeString, - Computed: true, + "vpn_gateway": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrID: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrName: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, }, }, - }, + } } func resourceNetworkInsightsAnalysisCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.StartNetworkInsightsAnalysisInput{ NetworkInsightsPathId: aws.String(d.Get("network_insights_path_id").(string)), - TagSpecifications: getTagSpecificationsIn(ctx, ec2.ResourceTypeNetworkInsightsAnalysis), + TagSpecifications: getTagSpecificationsInV2(ctx, awstypes.ResourceTypeNetworkInsightsAnalysis), } if v, ok := d.GetOk("filter_in_arns"); ok && v.(*schema.Set).Len() > 0 { - input.FilterInArns = flex.ExpandStringSet(v.(*schema.Set)) + input.FilterInArns = flex.ExpandStringValueSet(v.(*schema.Set)) } - log.Printf("[DEBUG] Creating EC2 Network Insights Analysis: %s", input) - output, err := conn.StartNetworkInsightsAnalysisWithContext(ctx, input) + output, err := conn.StartNetworkInsightsAnalysis(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating EC2 Network Insights Analysis: %s", err) } - d.SetId(aws.StringValue(output.NetworkInsightsAnalysis.NetworkInsightsAnalysisId)) + d.SetId(aws.ToString(output.NetworkInsightsAnalysis.NetworkInsightsAnalysisId)) if d.Get("wait_for_completion").(bool) { - if _, err := WaitNetworkInsightsAnalysisCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err := waitNetworkInsightsAnalysisCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EC2 Network Insights Analysis (%s) create: %s", d.Id(), err) } } @@ -1439,10 +1444,9 @@ func resourceNetworkInsightsAnalysisCreate(ctx context.Context, d *schema.Resour func resourceNetworkInsightsAnalysisRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).EC2Client(ctx) - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - - output, err := FindNetworkInsightsAnalysisByID(ctx, conn, d.Id()) + output, err := findNetworkInsightsAnalysisByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EC2 Network Insights Analysis (%s) not found, removing from state", d.Id()) @@ -1461,7 +1465,7 @@ func resourceNetworkInsightsAnalysisRead(ctx context.Context, d *schema.Resource if err := d.Set("explanations", flattenExplanations(output.Explanations)); err != nil { return sdkdiag.AppendErrorf(diags, "setting explanations: %s", err) } - d.Set("filter_in_arns", aws.StringValueSlice(output.FilterInArns)) + d.Set("filter_in_arns", output.FilterInArns) if err := d.Set("forward_path_components", flattenPathComponents(output.ForwardPathComponents)); err != nil { return sdkdiag.AppendErrorf(diags, "setting forward_path_components: %s", err) } @@ -1475,7 +1479,7 @@ func resourceNetworkInsightsAnalysisRead(ctx context.Context, d *schema.Resource d.Set(names.AttrStatusMessage, output.StatusMessage) d.Set("warning_message", output.WarningMessage) - setTagsOut(ctx, output.Tags) + setTagsOutV2(ctx, output.Tags) return diags } @@ -1487,11 +1491,10 @@ func resourceNetworkInsightsAnalysisUpdate(ctx context.Context, d *schema.Resour func resourceNetworkInsightsAnalysisDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) log.Printf("[DEBUG] Deleting EC2 Network Insights Analysis: %s", d.Id()) - _, err := conn.DeleteNetworkInsightsAnalysisWithContext(ctx, &ec2.DeleteNetworkInsightsAnalysisInput{ + _, err := conn.DeleteNetworkInsightsAnalysis(ctx, &ec2.DeleteNetworkInsightsAnalysisInput{ NetworkInsightsAnalysisId: aws.String(d.Id()), }) @@ -1506,7 +1509,7 @@ func resourceNetworkInsightsAnalysisDelete(ctx context.Context, d *schema.Resour return diags } -func flattenAdditionalDetail(apiObject *ec2.AdditionalDetail) map[string]interface{} { +func flattenAdditionalDetail(apiObject *awstypes.AdditionalDetail) map[string]interface{} { if apiObject == nil { return nil } @@ -1514,7 +1517,7 @@ func flattenAdditionalDetail(apiObject *ec2.AdditionalDetail) map[string]interfa tfMap := map[string]interface{}{} if v := apiObject.AdditionalDetailType; v != nil { - tfMap["additional_detail_type"] = aws.StringValue(v) + tfMap["additional_detail_type"] = aws.ToString(v) } if v := apiObject.Component; v != nil { @@ -1524,7 +1527,7 @@ func flattenAdditionalDetail(apiObject *ec2.AdditionalDetail) map[string]interfa return tfMap } -func flattenAdditionalDetails(apiObjects []*ec2.AdditionalDetail) []interface{} { +func flattenAdditionalDetails(apiObjects []awstypes.AdditionalDetail) []interface{} { if len(apiObjects) == 0 { return nil } @@ -1532,17 +1535,13 @@ func flattenAdditionalDetails(apiObjects []*ec2.AdditionalDetail) []interface{} var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - tfList = append(tfList, flattenAdditionalDetail(apiObject)) + tfList = append(tfList, flattenAdditionalDetail(&apiObject)) } return tfList } -func flattenAlternatePathHint(apiObject *ec2.AlternatePathHint) map[string]interface{} { +func flattenAlternatePathHint(apiObject *awstypes.AlternatePathHint) map[string]interface{} { if apiObject == nil { return nil } @@ -1550,17 +1549,17 @@ func flattenAlternatePathHint(apiObject *ec2.AlternatePathHint) map[string]inter tfMap := map[string]interface{}{} if v := apiObject.ComponentArn; v != nil { - tfMap["component_arn"] = aws.StringValue(v) + tfMap["component_arn"] = aws.ToString(v) } if v := apiObject.ComponentId; v != nil { - tfMap["component_id"] = aws.StringValue(v) + tfMap["component_id"] = aws.ToString(v) } return tfMap } -func flattenAlternatePathHints(apiObjects []*ec2.AlternatePathHint) []interface{} { +func flattenAlternatePathHints(apiObjects []awstypes.AlternatePathHint) []interface{} { if len(apiObjects) == 0 { return nil } @@ -1568,17 +1567,13 @@ func flattenAlternatePathHints(apiObjects []*ec2.AlternatePathHint) []interface{ var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - tfList = append(tfList, flattenAlternatePathHint(apiObject)) + tfList = append(tfList, flattenAlternatePathHint(&apiObject)) } return tfList } -func flattenAnalysisAclRule(apiObject *ec2.AnalysisAclRule) map[string]interface{} { // nosemgrep:ci.caps2-in-func-name +func flattenAnalysisAclRule(apiObject *awstypes.AnalysisAclRule) map[string]interface{} { // nosemgrep:ci.caps2-in-func-name if apiObject == nil { return nil } @@ -1586,11 +1581,11 @@ func flattenAnalysisAclRule(apiObject *ec2.AnalysisAclRule) map[string]interface tfMap := map[string]interface{}{} if v := apiObject.Cidr; v != nil { - tfMap["cidr"] = aws.StringValue(v) + tfMap["cidr"] = aws.ToString(v) } if v := apiObject.Egress; v != nil { - tfMap["egress"] = aws.BoolValue(v) + tfMap["egress"] = aws.ToBool(v) } if v := apiObject.PortRange; v != nil { @@ -1598,21 +1593,21 @@ func flattenAnalysisAclRule(apiObject *ec2.AnalysisAclRule) map[string]interface } if v := apiObject.Protocol; v != nil { - tfMap[names.AttrProtocol] = aws.StringValue(v) + tfMap[names.AttrProtocol] = aws.ToString(v) } if v := apiObject.RuleAction; v != nil { - tfMap["rule_action"] = aws.StringValue(v) + tfMap["rule_action"] = aws.ToString(v) } if v := apiObject.RuleNumber; v != nil { - tfMap["rule_number"] = aws.Int64Value(v) + tfMap["rule_number"] = aws.ToInt32(v) } return tfMap } -func flattenAnalysisLoadBalancerListener(apiObject *ec2.AnalysisLoadBalancerListener) map[string]interface{} { +func flattenAnalysisLoadBalancerListener(apiObject *awstypes.AnalysisLoadBalancerListener) map[string]interface{} { if apiObject == nil { return nil } @@ -1620,17 +1615,17 @@ func flattenAnalysisLoadBalancerListener(apiObject *ec2.AnalysisLoadBalancerList tfMap := map[string]interface{}{} if v := apiObject.InstancePort; v != nil { - tfMap["instance_port"] = aws.Int64Value(v) + tfMap["instance_port"] = aws.ToInt32(v) } if v := apiObject.LoadBalancerPort; v != nil { - tfMap["load_balancer_port"] = aws.Int64Value(v) + tfMap["load_balancer_port"] = aws.ToInt32(v) } return tfMap } -func flattenAnalysisComponent(apiObject *ec2.AnalysisComponent) map[string]interface{} { +func flattenAnalysisComponent(apiObject *awstypes.AnalysisComponent) map[string]interface{} { if apiObject == nil { return nil } @@ -1638,21 +1633,21 @@ func flattenAnalysisComponent(apiObject *ec2.AnalysisComponent) map[string]inter tfMap := map[string]interface{}{} if v := apiObject.Arn; v != nil { - tfMap[names.AttrARN] = aws.StringValue(v) + tfMap[names.AttrARN] = aws.ToString(v) } if v := apiObject.Id; v != nil { - tfMap[names.AttrID] = aws.StringValue(v) + tfMap[names.AttrID] = aws.ToString(v) } if v := apiObject.Name; v != nil { - tfMap[names.AttrName] = aws.StringValue(v) + tfMap[names.AttrName] = aws.ToString(v) } return tfMap } -func flattenAnalysisComponents(apiObjects []*ec2.AnalysisComponent) []interface{} { +func flattenAnalysisComponents(apiObjects []awstypes.AnalysisComponent) []interface{} { if len(apiObjects) == 0 { return nil } @@ -1660,17 +1655,13 @@ func flattenAnalysisComponents(apiObjects []*ec2.AnalysisComponent) []interface{ var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - tfList = append(tfList, flattenAnalysisComponent(apiObject)) + tfList = append(tfList, flattenAnalysisComponent(&apiObject)) } return tfList } -func flattenAnalysisLoadBalancerTarget(apiObject *ec2.AnalysisLoadBalancerTarget) map[string]interface{} { +func flattenAnalysisLoadBalancerTarget(apiObject *awstypes.AnalysisLoadBalancerTarget) map[string]interface{} { if apiObject == nil { return nil } @@ -1678,11 +1669,11 @@ func flattenAnalysisLoadBalancerTarget(apiObject *ec2.AnalysisLoadBalancerTarget tfMap := map[string]interface{}{} if v := apiObject.Address; v != nil { - tfMap[names.AttrAddress] = aws.StringValue(v) + tfMap[names.AttrAddress] = aws.ToString(v) } if v := apiObject.AvailabilityZone; v != nil { - tfMap[names.AttrAvailabilityZone] = aws.StringValue(v) + tfMap[names.AttrAvailabilityZone] = aws.ToString(v) } if v := apiObject.Instance; v != nil { @@ -1690,13 +1681,13 @@ func flattenAnalysisLoadBalancerTarget(apiObject *ec2.AnalysisLoadBalancerTarget } if v := apiObject.Port; v != nil { - tfMap[names.AttrPort] = aws.Int64Value(v) + tfMap[names.AttrPort] = aws.ToInt32(v) } return tfMap } -func flattenAnalysisPacketHeader(apiObject *ec2.AnalysisPacketHeader) map[string]interface{} { +func flattenAnalysisPacketHeader(apiObject *awstypes.AnalysisPacketHeader) map[string]interface{} { if apiObject == nil { return nil } @@ -1704,7 +1695,7 @@ func flattenAnalysisPacketHeader(apiObject *ec2.AnalysisPacketHeader) map[string tfMap := map[string]interface{}{} if v := apiObject.DestinationAddresses; v != nil { - tfMap["destination_addresses"] = aws.StringValueSlice(v) + tfMap["destination_addresses"] = v } if v := apiObject.DestinationPortRanges; v != nil { @@ -1712,11 +1703,11 @@ func flattenAnalysisPacketHeader(apiObject *ec2.AnalysisPacketHeader) map[string } if v := apiObject.Protocol; v != nil { - tfMap[names.AttrProtocol] = aws.StringValue(v) + tfMap[names.AttrProtocol] = aws.ToString(v) } if v := apiObject.SourceAddresses; v != nil { - tfMap["source_addresses"] = aws.StringValueSlice(v) + tfMap["source_addresses"] = v } if v := apiObject.SourcePortRanges; v != nil { @@ -1726,7 +1717,7 @@ func flattenAnalysisPacketHeader(apiObject *ec2.AnalysisPacketHeader) map[string return tfMap } -func flattenAnalysisRouteTableRoute(apiObject *ec2.AnalysisRouteTableRoute) map[string]interface{} { +func flattenAnalysisRouteTableRoute(apiObject *awstypes.AnalysisRouteTableRoute) map[string]interface{} { if apiObject == nil { return nil } @@ -1734,49 +1725,49 @@ func flattenAnalysisRouteTableRoute(apiObject *ec2.AnalysisRouteTableRoute) map[ tfMap := map[string]interface{}{} if v := apiObject.DestinationCidr; v != nil { - tfMap["destination_cidr"] = aws.StringValue(v) + tfMap["destination_cidr"] = aws.ToString(v) } if v := apiObject.DestinationPrefixListId; v != nil { - tfMap["destination_prefix_list_id"] = aws.StringValue(v) + tfMap["destination_prefix_list_id"] = aws.ToString(v) } if v := apiObject.EgressOnlyInternetGatewayId; v != nil { - tfMap["egress_only_internet_gateway_id"] = aws.StringValue(v) + tfMap["egress_only_internet_gateway_id"] = aws.ToString(v) } if v := apiObject.GatewayId; v != nil { - tfMap["gateway_id"] = aws.StringValue(v) + tfMap["gateway_id"] = aws.ToString(v) } if v := apiObject.InstanceId; v != nil { - tfMap[names.AttrInstanceID] = aws.StringValue(v) + tfMap[names.AttrInstanceID] = aws.ToString(v) } if v := apiObject.NatGatewayId; v != nil { - tfMap["nat_gateway_id"] = aws.StringValue(v) + tfMap["nat_gateway_id"] = aws.ToString(v) } if v := apiObject.NetworkInterfaceId; v != nil { - tfMap[names.AttrNetworkInterfaceID] = aws.StringValue(v) + tfMap[names.AttrNetworkInterfaceID] = aws.ToString(v) } if v := apiObject.Origin; v != nil { - tfMap["origin"] = aws.StringValue(v) + tfMap["origin"] = aws.ToString(v) } if v := apiObject.TransitGatewayId; v != nil { - tfMap[names.AttrTransitGatewayID] = aws.StringValue(v) + tfMap[names.AttrTransitGatewayID] = aws.ToString(v) } if v := apiObject.VpcPeeringConnectionId; v != nil { - tfMap["vpc_peering_connection_id"] = aws.StringValue(v) + tfMap["vpc_peering_connection_id"] = aws.ToString(v) } return tfMap } -func flattenAnalysisSecurityGroupRule(apiObject *ec2.AnalysisSecurityGroupRule) map[string]interface{} { +func flattenAnalysisSecurityGroupRule(apiObject *awstypes.AnalysisSecurityGroupRule) map[string]interface{} { if apiObject == nil { return nil } @@ -1784,7 +1775,7 @@ func flattenAnalysisSecurityGroupRule(apiObject *ec2.AnalysisSecurityGroupRule) tfMap := map[string]interface{}{} if v := apiObject.Cidr; v != nil { - tfMap["cidr"] = aws.StringValue(v) + tfMap["cidr"] = aws.ToString(v) } if v := apiObject.PortRange; v != nil { @@ -1792,21 +1783,21 @@ func flattenAnalysisSecurityGroupRule(apiObject *ec2.AnalysisSecurityGroupRule) } if v := apiObject.PrefixListId; v != nil { - tfMap["prefix_list_id"] = aws.StringValue(v) + tfMap["prefix_list_id"] = aws.ToString(v) } if v := apiObject.Protocol; v != nil { - tfMap[names.AttrProtocol] = aws.StringValue(v) + tfMap[names.AttrProtocol] = aws.ToString(v) } if v := apiObject.SecurityGroupId; v != nil { - tfMap["security_group_id"] = aws.StringValue(v) + tfMap["security_group_id"] = aws.ToString(v) } return tfMap } -func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { +func flattenExplanation(apiObject *awstypes.Explanation) map[string]interface{} { if apiObject == nil { return nil } @@ -1822,11 +1813,11 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.Address; v != nil { - tfMap[names.AttrAddress] = aws.StringValue(v) + tfMap[names.AttrAddress] = aws.ToString(v) } if v := apiObject.Addresses; v != nil { - tfMap["addresses"] = aws.StringValueSlice(v) + tfMap["addresses"] = v } if v := apiObject.AttachedTo; v != nil { @@ -1834,11 +1825,11 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.AvailabilityZones; v != nil { - tfMap[names.AttrAvailabilityZones] = aws.StringValueSlice(v) + tfMap[names.AttrAvailabilityZones] = v } if v := apiObject.Cidrs; v != nil { - tfMap["cidrs"] = aws.StringValueSlice(v) + tfMap["cidrs"] = v } if v := apiObject.ClassicLoadBalancerListener; v != nil { @@ -1862,7 +1853,7 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.Direction; v != nil { - tfMap["direction"] = aws.StringValue(v) + tfMap["direction"] = aws.ToString(v) } if v := apiObject.ElasticLoadBalancerListener; v != nil { @@ -1870,7 +1861,7 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.ExplanationCode; v != nil { - tfMap["explanation_code"] = aws.StringValue(v) + tfMap["explanation_code"] = aws.ToString(v) } if v := apiObject.IngressRouteTable; v != nil { @@ -1882,11 +1873,11 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.LoadBalancerArn; v != nil { - tfMap["load_balancer_arn"] = aws.StringValue(v) + tfMap["load_balancer_arn"] = aws.ToString(v) } if v := apiObject.LoadBalancerListenerPort; v != nil { - tfMap["load_balancer_listener_port"] = aws.Int64Value(v) + tfMap["load_balancer_listener_port"] = aws.ToInt32(v) } if v := apiObject.LoadBalancerTarget; v != nil { @@ -1902,11 +1893,11 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.LoadBalancerTargetPort; v != nil { - tfMap["load_balancer_target_port"] = aws.Int64Value(v) + tfMap["load_balancer_target_port"] = aws.ToInt32(v) } if v := apiObject.MissingComponent; v != nil { - tfMap["missing_component"] = aws.StringValue(v) + tfMap["missing_component"] = aws.ToString(v) } if v := apiObject.NatGateway; v != nil { @@ -1918,11 +1909,11 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.PacketField; v != nil { - tfMap["packet_field"] = aws.StringValue(v) + tfMap["packet_field"] = aws.ToString(v) } if v := apiObject.Port; v != nil { - tfMap[names.AttrPort] = aws.Int64Value(v) + tfMap[names.AttrPort] = aws.ToInt32(v) } if v := apiObject.PortRanges; v != nil { @@ -1934,7 +1925,7 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.Protocols; v != nil { - tfMap["protocols"] = aws.StringValueSlice(v) + tfMap["protocols"] = v } if v := apiObject.RouteTable; v != nil { @@ -1962,7 +1953,7 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { } if v := apiObject.State; v != nil { - tfMap[names.AttrState] = aws.StringValue(v) + tfMap[names.AttrState] = aws.ToString(v) } if v := apiObject.Subnet; v != nil { @@ -2012,7 +2003,7 @@ func flattenExplanation(apiObject *ec2.Explanation) map[string]interface{} { return tfMap } -func flattenExplanations(apiObjects []*ec2.Explanation) []interface{} { +func flattenExplanations(apiObjects []awstypes.Explanation) []interface{} { if len(apiObjects) == 0 { return nil } @@ -2020,17 +2011,13 @@ func flattenExplanations(apiObjects []*ec2.Explanation) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - tfList = append(tfList, flattenExplanation(apiObject)) + tfList = append(tfList, flattenExplanation(&apiObject)) } return tfList } -func flattenPathComponent(apiObject *ec2.PathComponent) map[string]interface{} { +func flattenPathComponent(apiObject *awstypes.PathComponent) map[string]interface{} { if apiObject == nil { return nil } @@ -2070,7 +2057,7 @@ func flattenPathComponent(apiObject *ec2.PathComponent) map[string]interface{} { } if v := apiObject.SequenceNumber; v != nil { - tfMap["sequence_number"] = aws.Int64Value(v) + tfMap["sequence_number"] = aws.ToInt32(v) } if v := apiObject.SourceVpc; v != nil { @@ -2096,7 +2083,7 @@ func flattenPathComponent(apiObject *ec2.PathComponent) map[string]interface{} { return tfMap } -func flattenPathComponents(apiObjects []*ec2.PathComponent) []interface{} { +func flattenPathComponents(apiObjects []awstypes.PathComponent) []interface{} { if len(apiObjects) == 0 { return nil } @@ -2104,17 +2091,13 @@ func flattenPathComponents(apiObjects []*ec2.PathComponent) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - tfList = append(tfList, flattenPathComponent(apiObject)) + tfList = append(tfList, flattenPathComponent(&apiObject)) } return tfList } -func flattenPortRange(apiObject *ec2.PortRange) map[string]interface{} { +func flattenPortRange(apiObject *awstypes.PortRange) map[string]interface{} { if apiObject == nil { return nil } @@ -2122,17 +2105,17 @@ func flattenPortRange(apiObject *ec2.PortRange) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.From; v != nil { - tfMap["from"] = aws.Int64Value(v) + tfMap["from"] = aws.ToInt32(v) } if v := apiObject.To; v != nil { - tfMap["to"] = aws.Int64Value(v) + tfMap["to"] = aws.ToInt32(v) } return tfMap } -func flattenPortRanges(apiObjects []*ec2.PortRange) []interface{} { +func flattenPortRanges(apiObjects []awstypes.PortRange) []interface{} { if len(apiObjects) == 0 { return nil } @@ -2140,17 +2123,13 @@ func flattenPortRanges(apiObjects []*ec2.PortRange) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - tfList = append(tfList, flattenPortRange(apiObject)) + tfList = append(tfList, flattenPortRange(&apiObject)) } return tfList } -func flattenTransitGatewayRouteTableRoute(apiObject *ec2.TransitGatewayRouteTableRoute) map[string]interface{} { +func flattenTransitGatewayRouteTableRoute(apiObject *awstypes.TransitGatewayRouteTableRoute) map[string]interface{} { if apiObject == nil { return nil } @@ -2158,31 +2137,31 @@ func flattenTransitGatewayRouteTableRoute(apiObject *ec2.TransitGatewayRouteTabl tfMap := map[string]interface{}{} if v := apiObject.AttachmentId; v != nil { - tfMap["attachment_id"] = aws.StringValue(v) + tfMap["attachment_id"] = aws.ToString(v) } if v := apiObject.DestinationCidr; v != nil { - tfMap["destination_cidr"] = aws.StringValue(v) + tfMap["destination_cidr"] = aws.ToString(v) } if v := apiObject.PrefixListId; v != nil { - tfMap["prefix_list_id"] = aws.StringValue(v) + tfMap["prefix_list_id"] = aws.ToString(v) } if v := apiObject.ResourceId; v != nil { - tfMap[names.AttrResourceID] = aws.StringValue(v) + tfMap[names.AttrResourceID] = aws.ToString(v) } if v := apiObject.ResourceType; v != nil { - tfMap[names.AttrResourceType] = aws.StringValue(v) + tfMap[names.AttrResourceType] = aws.ToString(v) } if v := apiObject.RouteOrigin; v != nil { - tfMap["route_origin"] = aws.StringValue(v) + tfMap["route_origin"] = aws.ToString(v) } if v := apiObject.State; v != nil { - tfMap[names.AttrState] = aws.StringValue(v) + tfMap[names.AttrState] = aws.ToString(v) } return tfMap diff --git a/internal/service/ec2/vpc_network_insights_analysis_data_source.go b/internal/service/ec2/vpc_network_insights_analysis_data_source.go index 9bb94c04c1e..6c6959e3772 100644 --- a/internal/service/ec2/vpc_network_insights_analysis_data_source.go +++ b/internal/service/ec2/vpc_network_insights_analysis_data_source.go @@ -7,8 +7,8 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,88 +18,90 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_network_insights_analysis") -func DataSourceNetworkInsightsAnalysis() *schema.Resource { +// @SDKDataSource("aws_ec2_network_insights_analysis", name="Network Insights Analysis") +// @Tags +// @Testing(tagsTest=false) +func dataSourceNetworkInsightsAnalysis() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceNetworkInsightsAnalysisRead, - Schema: map[string]*schema.Schema{ - "alternate_path_hints": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "component_arn": { - Type: schema.TypeString, - Computed: true, - }, - "component_id": { - Type: schema.TypeString, - Computed: true, + SchemaFunc: func() map[string]*schema.Schema { + return map[string]*schema.Schema{ + "alternate_path_hints": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "component_arn": { + Type: schema.TypeString, + Computed: true, + }, + "component_id": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - "explanations": networkInsightsAnalysisExplanationsSchema, - names.AttrFilter: customFiltersSchema(), - "filter_in_arns": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "forward_path_components": networkInsightsAnalysisPathComponentsSchema, - "network_insights_analysis_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "network_insights_path_id": { - Type: schema.TypeString, - Computed: true, - }, - "path_found": { - Type: schema.TypeBool, - Computed: true, - }, - "return_path_components": networkInsightsAnalysisPathComponentsSchema, - "start_date": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrStatus: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrStatusMessage: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrTags: tftags.TagsSchemaComputed(), - "warning_message": { - Type: schema.TypeString, - Computed: true, - }, + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "explanations": networkInsightsAnalysisExplanationsSchema(), + names.AttrFilter: customFiltersSchema(), + "filter_in_arns": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "forward_path_components": networkInsightsAnalysisPathComponentsSchema(), + "network_insights_analysis_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "network_insights_path_id": { + Type: schema.TypeString, + Computed: true, + }, + "path_found": { + Type: schema.TypeBool, + Computed: true, + }, + "return_path_components": networkInsightsAnalysisPathComponentsSchema(), + "start_date": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrStatus: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrStatusMessage: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchemaComputed(), + "warning_message": { + Type: schema.TypeString, + Computed: true, + }, + } }, } } func dataSourceNetworkInsightsAnalysisRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DescribeNetworkInsightsAnalysesInput{} if v, ok := d.GetOk("network_insights_analysis_id"); ok { - input.NetworkInsightsAnalysisIds = aws.StringSlice([]string{v.(string)}) + input.NetworkInsightsAnalysisIds = []string{v.(string)} } - input.Filters = append(input.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) @@ -108,13 +110,13 @@ func dataSourceNetworkInsightsAnalysisRead(ctx context.Context, d *schema.Resour input.Filters = nil } - output, err := FindNetworkInsightsAnalysis(ctx, conn, input) + output, err := findNetworkInsightsAnalysis(ctx, conn, input) if err != nil { return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EC2 Network Insights Analysis", err)) } - networkInsightsAnalysisID := aws.StringValue(output.NetworkInsightsAnalysisId) + networkInsightsAnalysisID := aws.ToString(output.NetworkInsightsAnalysisId) d.SetId(networkInsightsAnalysisID) if err := d.Set("alternate_path_hints", flattenAlternatePathHints(output.AlternatePathHints)); err != nil { return sdkdiag.AppendErrorf(diags, "setting alternate_path_hints: %s", err) @@ -123,7 +125,7 @@ func dataSourceNetworkInsightsAnalysisRead(ctx context.Context, d *schema.Resour if err := d.Set("explanations", flattenExplanations(output.Explanations)); err != nil { return sdkdiag.AppendErrorf(diags, "setting explanations: %s", err) } - d.Set("filter_in_arns", aws.StringValueSlice(output.FilterInArns)) + d.Set("filter_in_arns", output.FilterInArns) if err := d.Set("forward_path_components", flattenPathComponents(output.ForwardPathComponents)); err != nil { return sdkdiag.AppendErrorf(diags, "setting forward_path_components: %s", err) } @@ -138,9 +140,7 @@ func dataSourceNetworkInsightsAnalysisRead(ctx context.Context, d *schema.Resour d.Set(names.AttrStatusMessage, output.StatusMessage) d.Set("warning_message", output.WarningMessage) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, output.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOutV2(ctx, output.Tags) return diags } diff --git a/internal/service/ec2/vpc_network_insights_analysis_test.go b/internal/service/ec2/vpc_network_insights_analysis_test.go index 48747e75615..9725374c1a1 100644 --- a/internal/service/ec2/vpc_network_insights_analysis_test.go +++ b/internal/service/ec2/vpc_network_insights_analysis_test.go @@ -195,11 +195,7 @@ func testAccCheckNetworkInsightsAnalysisExists(ctx context.Context, n string) re return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No EC2 Network Insights Analysis ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) _, err := tfec2.FindNetworkInsightsAnalysisByID(ctx, conn, rs.Primary.ID) @@ -209,7 +205,7 @@ func testAccCheckNetworkInsightsAnalysisExists(ctx context.Context, n string) re func testAccCheckNetworkInsightsAnalysisDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_ec2_network_insights_analysis" { diff --git a/internal/service/ec2/vpc_network_insights_path.go b/internal/service/ec2/vpc_network_insights_path.go index 8392287c25b..94a4259cf78 100644 --- a/internal/service/ec2/vpc_network_insights_path.go +++ b/internal/service/ec2/vpc_network_insights_path.go @@ -8,14 +8,15 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + awstypes "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -26,7 +27,7 @@ import ( // @SDKResource("aws_ec2_network_insights_path", name="Network Insights Path") // @Tags(identifierAttribute="id") // @Testing(tagsTest=false) -func ResourceNetworkInsightsPath() *schema.Resource { +func resourceNetworkInsightsPath() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceNetworkInsightsPathCreate, ReadWithoutTimeout: resourceNetworkInsightsPathRead, @@ -48,7 +49,7 @@ func ResourceNetworkInsightsPath() *schema.Resource { }, names.AttrDestination: { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, DiffSuppressFunc: suppressEquivalentIDOrARN, }, @@ -63,10 +64,10 @@ func ResourceNetworkInsightsPath() *schema.Resource { ForceNew: true, }, names.AttrProtocol: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(ec2.Protocol_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.Protocol](), }, names.AttrSource: { Type: schema.TypeString, @@ -93,15 +94,17 @@ func ResourceNetworkInsightsPath() *schema.Resource { func resourceNetworkInsightsPathCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.CreateNetworkInsightsPathInput{ ClientToken: aws.String(id.UniqueId()), - Destination: aws.String(d.Get(names.AttrDestination).(string)), - Protocol: aws.String(d.Get(names.AttrProtocol).(string)), + Protocol: awstypes.Protocol(d.Get(names.AttrProtocol).(string)), Source: aws.String(d.Get(names.AttrSource).(string)), - TagSpecifications: getTagSpecificationsIn(ctx, ec2.ResourceTypeNetworkInsightsPath), + TagSpecifications: getTagSpecificationsInV2(ctx, awstypes.ResourceTypeNetworkInsightsPath), + } + + if v, ok := d.GetOk(names.AttrDestination); ok { + input.Destination = aws.String(v.(string)) } if v, ok := d.GetOk("destination_ip"); ok { @@ -109,30 +112,29 @@ func resourceNetworkInsightsPathCreate(ctx context.Context, d *schema.ResourceDa } if v, ok := d.GetOk("destination_port"); ok { - input.DestinationPort = aws.Int64(int64(v.(int))) + input.DestinationPort = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("source_ip"); ok { input.SourceIp = aws.String(v.(string)) } - output, err := conn.CreateNetworkInsightsPathWithContext(ctx, input) + output, err := conn.CreateNetworkInsightsPath(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating EC2 Network Insights Path: %s", err) } - d.SetId(aws.StringValue(output.NetworkInsightsPath.NetworkInsightsPathId)) + d.SetId(aws.ToString(output.NetworkInsightsPath.NetworkInsightsPathId)) return append(diags, resourceNetworkInsightsPathRead(ctx, d, meta)...) } func resourceNetworkInsightsPathRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).EC2Client(ctx) - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - - nip, err := FindNetworkInsightsPathByID(ctx, conn, d.Id()) + nip, err := findNetworkInsightsPathByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EC2 Network Insights Path %s not found, removing from state", d.Id()) @@ -154,7 +156,7 @@ func resourceNetworkInsightsPathRead(ctx context.Context, d *schema.ResourceData d.Set("source_arn", nip.SourceArn) d.Set("source_ip", nip.SourceIp) - setTagsOut(ctx, nip.Tags) + setTagsOutV2(ctx, nip.Tags) return diags } @@ -166,12 +168,11 @@ func resourceNetworkInsightsPathUpdate(ctx context.Context, d *schema.ResourceDa func resourceNetworkInsightsPathDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Conn(ctx) + conn := meta.(*conns.AWSClient).EC2Client(ctx) log.Printf("[DEBUG] Deleting EC2 Network Insights Path: %s", d.Id()) _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, ec2PropagationTimeout, func() (interface{}, error) { - return conn.DeleteNetworkInsightsPathWithContext(ctx, &ec2.DeleteNetworkInsightsPathInput{ + return conn.DeleteNetworkInsightsPath(ctx, &ec2.DeleteNetworkInsightsPathInput{ NetworkInsightsPathId: aws.String(d.Id()), }) }, errCodeAnalysisExistsForNetworkInsightsPath) diff --git a/internal/service/ec2/vpc_network_insights_path_data_source.go b/internal/service/ec2/vpc_network_insights_path_data_source.go index 464ba0fee48..c1f503d67f9 100644 --- a/internal/service/ec2/vpc_network_insights_path_data_source.go +++ b/internal/service/ec2/vpc_network_insights_path_data_source.go @@ -6,8 +6,8 @@ package ec2 import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ec2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -17,8 +17,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ec2_network_insights_path") -func DataSourceNetworkInsightsPath() *schema.Resource { +// @SDKDataSource("aws_ec2_network_insights_path", name="Network Insights Path") +// @Tags +// @Testing(tagsTest=false) +func dataSourceNetworkInsightsPath() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceNetworkInsightsPathRead, @@ -72,17 +74,15 @@ func DataSourceNetworkInsightsPath() *schema.Resource { func dataSourceNetworkInsightsPathRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EC2Conn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).EC2Client(ctx) input := &ec2.DescribeNetworkInsightsPathsInput{} if v, ok := d.GetOk("network_insights_path_id"); ok { - input.NetworkInsightsPathIds = aws.StringSlice([]string{v.(string)}) + input.NetworkInsightsPathIds = []string{v.(string)} } - input.Filters = append(input.Filters, newCustomFilterList( + input.Filters = append(input.Filters, newCustomFilterListV2( d.Get(names.AttrFilter).(*schema.Set), )...) @@ -91,13 +91,13 @@ func dataSourceNetworkInsightsPathRead(ctx context.Context, d *schema.ResourceDa input.Filters = nil } - nip, err := FindNetworkInsightsPath(ctx, conn, input) + nip, err := findNetworkInsightsPath(ctx, conn, input) if err != nil { return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EC2 Network Insights Path", err)) } - networkInsightsPathID := aws.StringValue(nip.NetworkInsightsPathId) + networkInsightsPathID := aws.ToString(nip.NetworkInsightsPathId) d.SetId(networkInsightsPathID) d.Set(names.AttrARN, nip.NetworkInsightsPathArn) d.Set(names.AttrDestination, nip.Destination) @@ -110,9 +110,7 @@ func dataSourceNetworkInsightsPathRead(ctx context.Context, d *schema.ResourceDa d.Set("source_arn", nip.SourceArn) d.Set("source_ip", nip.SourceIp) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, nip.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOutV2(ctx, nip.Tags) return diags } diff --git a/internal/service/ec2/vpc_network_insights_path_test.go b/internal/service/ec2/vpc_network_insights_path_test.go index 2ad8f02b262..1b78c002af3 100644 --- a/internal/service/ec2/vpc_network_insights_path_test.go +++ b/internal/service/ec2/vpc_network_insights_path_test.go @@ -262,11 +262,7 @@ func testAccCheckNetworkInsightsPathExists(ctx context.Context, n string) resour return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No EC2 Network Insights Path ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) _, err := tfec2.FindNetworkInsightsPathByID(ctx, conn, rs.Primary.ID) @@ -276,7 +272,7 @@ func testAccCheckNetworkInsightsPathExists(ctx context.Context, n string) resour func testAccCheckNetworkInsightsPathDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_ec2_network_insights_path" { diff --git a/internal/service/ec2/vpc_route_table.go b/internal/service/ec2/vpc_route_table.go index 387876da9f6..31a9396e7aa 100644 --- a/internal/service/ec2/vpc_route_table.go +++ b/internal/service/ec2/vpc_route_table.go @@ -506,6 +506,10 @@ func routeTableAddRoute(ctx context.Context, conn *ec2.Client, routeTableID stri return fmt.Errorf("local route cannot be created but must exist to be adopted, %s %s does not exist", target, destination) } + if err != nil { + return fmt.Errorf("finding local route %s %s: %w", target, destination, err) + } + return nil } diff --git a/internal/service/ec2/vpc_security_group.go b/internal/service/ec2/vpc_security_group.go index 0cf7358bf14..b47de6df567 100644 --- a/internal/service/ec2/vpc_security_group.go +++ b/internal/service/ec2/vpc_security_group.go @@ -37,7 +37,7 @@ import ( // @Tags(identifierAttribute="id") // @Testing(existsType="github.com/aws/aws-sdk-go/service/ec2;ec2.SecurityGroup") // @Testing(importIgnore="revoke_rules_on_delete") -func ResourceSecurityGroup() *schema.Resource { +func resourceSecurityGroup() *schema.Resource { //lintignore:R011 return &schema.Resource{ CreateWithoutTimeout: resourceSecurityGroupCreate, diff --git a/internal/service/ec2/wait.go b/internal/service/ec2/wait.go index 24b7d30dac2..5f3d34b1e2f 100644 --- a/internal/service/ec2/wait.go +++ b/internal/service/ec2/wait.go @@ -41,50 +41,6 @@ const ( InternetGatewayNotFoundChecks = 1000 // Should exceed any reasonable custom timeout value. ) -const ( - // Maximum amount of time to wait for a LocalGatewayRouteTableVpcAssociation to return Associated - LocalGatewayRouteTableVPCAssociationAssociatedTimeout = 5 * time.Minute - - // Maximum amount of time to wait for a LocalGatewayRouteTableVpcAssociation to return Disassociated - LocalGatewayRouteTableVPCAssociationDisassociatedTimeout = 5 * time.Minute -) - -// WaitLocalGatewayRouteTableVPCAssociationAssociated waits for a LocalGatewayRouteTableVpcAssociation to return Associated -func WaitLocalGatewayRouteTableVPCAssociationAssociated(ctx context.Context, conn *ec2.EC2, localGatewayRouteTableVpcAssociationID string) (*ec2.LocalGatewayRouteTableVpcAssociation, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ec2.RouteTableAssociationStateCodeAssociating}, - Target: []string{ec2.RouteTableAssociationStateCodeAssociated}, - Refresh: StatusLocalGatewayRouteTableVPCAssociationState(ctx, conn, localGatewayRouteTableVpcAssociationID), - Timeout: LocalGatewayRouteTableVPCAssociationAssociatedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.LocalGatewayRouteTableVpcAssociation); ok { - return output, err - } - - return nil, err -} - -// WaitLocalGatewayRouteTableVPCAssociationDisassociated waits for a LocalGatewayRouteTableVpcAssociation to return Disassociated -func WaitLocalGatewayRouteTableVPCAssociationDisassociated(ctx context.Context, conn *ec2.EC2, localGatewayRouteTableVpcAssociationID string) (*ec2.LocalGatewayRouteTableVpcAssociation, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ec2.RouteTableAssociationStateCodeDisassociating}, - Target: []string{ec2.RouteTableAssociationStateCodeDisassociated}, - Refresh: StatusLocalGatewayRouteTableVPCAssociationState(ctx, conn, localGatewayRouteTableVpcAssociationID), - Timeout: LocalGatewayRouteTableVPCAssociationAssociatedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.LocalGatewayRouteTableVpcAssociation); ok { - return output, err - } - - return nil, err -} - const ManagedPrefixListEntryCreateTimeout = 5 * time.Minute func WaitSecurityGroupCreated(ctx context.Context, conn *ec2.EC2, id string, timeout time.Duration) (*ec2.SecurityGroup, error) { @@ -692,27 +648,6 @@ func WaitManagedPrefixListDeleted(ctx context.Context, conn *ec2.EC2, id string) return nil, err } -func WaitNetworkInsightsAnalysisCreated(ctx context.Context, conn *ec2.EC2, id string, timeout time.Duration) (*ec2.NetworkInsightsAnalysis, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ec2.AnalysisStatusRunning}, - Target: []string{ec2.AnalysisStatusSucceeded}, - Timeout: timeout, - Refresh: StatusNetworkInsightsAnalysis(ctx, conn, id), - Delay: 10 * time.Second, - MinTimeout: 5 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.NetworkInsightsAnalysis); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusMessage))) - - return output, err - } - - return nil, err -} - const ( networkInterfaceAttachedTimeout = 5 * time.Minute NetworkInterfaceDetachedTimeout = 10 * time.Minute diff --git a/internal/service/ec2/waitv2.go b/internal/service/ec2/waitv2.go index bd9cb5e2242..8a7ea17b69b 100644 --- a/internal/service/ec2/waitv2.go +++ b/internal/service/ec2/waitv2.go @@ -57,30 +57,29 @@ func waitAvailabilityZoneGroupNotOptedIn(ctx context.Context, conn *ec2.Client, return nil, err } -const ( - CapacityReservationActiveTimeout = 2 * time.Minute - CapacityReservationDeletedTimeout = 2 * time.Minute -) - -func waitCapacityReservationActive(ctx context.Context, conn *ec2.Client, id string) error { +func waitCapacityReservationActive(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.CapacityReservation, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.CapacityReservationStatePending), Target: enum.Slice(awstypes.CapacityReservationStateActive), Refresh: statusCapacityReservation(ctx, conn, id), - Timeout: CapacityReservationActiveTimeout, + Timeout: timeout, } - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) - return err + if output, ok := outputRaw.(*awstypes.CapacityReservation); ok { + return output, err + } + + return nil, err } -func waitCapacityReservationDeleted(ctx context.Context, conn *ec2.Client, id string) (*awstypes.CapacityReservation, error) { +func waitCapacityReservationDeleted(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.CapacityReservation, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(awstypes.CapacityReservationStateActive), Target: []string{}, Refresh: statusCapacityReservation(ctx, conn, id), - Timeout: CapacityReservationDeletedTimeout, + Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -1752,9 +1751,65 @@ func waitIPAMScopeDeleted(ctx context.Context, conn *ec2.Client, id string, time return nil, err } -const ( - TransitGatewayIncorrectStateTimeout = 5 * time.Minute -) +func waitLocalGatewayRouteDeleted(ctx context.Context, conn *ec2.Client, localGatewayRouteTableID, destinationCIDRBlock string) (*awstypes.LocalGatewayRoute, error) { + const ( + timeout = 5 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.LocalGatewayRouteStateDeleting), + Target: []string{}, + Refresh: statusLocalGatewayRoute(ctx, conn, localGatewayRouteTableID, destinationCIDRBlock), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.LocalGatewayRoute); ok { + return output, err + } + + return nil, err +} + +func waitLocalGatewayRouteTableVPCAssociationAssociated(ctx context.Context, conn *ec2.Client, id string) (*awstypes.LocalGatewayRouteTableVpcAssociation, error) { + const ( + timeout = 5 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.RouteTableAssociationStateCodeAssociating), + Target: enum.Slice(awstypes.RouteTableAssociationStateCodeAssociated), + Refresh: statusLocalGatewayRouteTableVPCAssociation(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.LocalGatewayRouteTableVpcAssociation); ok { + return output, err + } + + return nil, err +} + +func waitLocalGatewayRouteTableVPCAssociationDisassociated(ctx context.Context, conn *ec2.Client, id string) (*awstypes.LocalGatewayRouteTableVpcAssociation, error) { + const ( + timeout = 5 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.RouteTableAssociationStateCodeDisassociating), + Target: []string{}, + Refresh: statusLocalGatewayRouteTableVPCAssociation(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.LocalGatewayRouteTableVpcAssociation); ok { + return output, err + } + + return nil, err +} func waitTransitGatewayCreated(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.TransitGateway, error) { stateConf := &retry.StateChangeConf{ @@ -2589,3 +2644,24 @@ func waitFastSnapshotRestoreDeleted(ctx context.Context, conn *ec2.Client, avail return nil, err } + +func waitNetworkInsightsAnalysisCreated(ctx context.Context, conn *ec2.Client, id string, timeout time.Duration) (*awstypes.NetworkInsightsAnalysis, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.AnalysisStatusRunning), + Target: enum.Slice(awstypes.AnalysisStatusSucceeded), + Timeout: timeout, + Refresh: statusNetworkInsightsAnalysis(ctx, conn, id), + Delay: 10 * time.Second, + MinTimeout: 5 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.NetworkInsightsAnalysis); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusMessage))) + + return output, err + } + + return nil, err +} diff --git a/internal/service/ecr/repository_data_source.go b/internal/service/ecr/repository_data_source.go index 355dcdb75c4..f042d7a64a2 100644 --- a/internal/service/ecr/repository_data_source.go +++ b/internal/service/ecr/repository_data_source.go @@ -19,7 +19,7 @@ import ( ) // @SDKDataSource("aws_ecr_repository", name="Repository") -// @Tags(identifierAttribute="arn")) +// @Tags(identifierAttribute="arn") func dataSourceRepository() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceRepositoryRead, diff --git a/internal/service/ecr/repository_data_source_test.go b/internal/service/ecr/repository_data_source_test.go index 0a3fa673bf9..a4acda2cb80 100644 --- a/internal/service/ecr/repository_data_source_test.go +++ b/internal/service/ecr/repository_data_source_test.go @@ -31,7 +31,7 @@ func TestAccECRRepositoryDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, names.AttrARN, dataSourceName, names.AttrARN), resource.TestCheckResourceAttrPair(resourceName, "registry_id", dataSourceName, "registry_id"), resource.TestCheckResourceAttrPair(resourceName, "repository_url", dataSourceName, "repository_url"), - resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsPercent, dataSourceName, acctest.CtTagsPercent), resource.TestCheckResourceAttrPair(resourceName, "image_scanning_configuration.#", dataSourceName, "image_scanning_configuration.#"), resource.TestCheckResourceAttrPair(resourceName, "image_tag_mutability", dataSourceName, "image_tag_mutability"), resource.TestCheckResourceAttrPair(resourceName, "encryption_configuration.#", dataSourceName, "encryption_configuration.#"), @@ -59,7 +59,7 @@ func TestAccECRRepositoryDataSource_encryption(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, names.AttrARN, dataSourceName, names.AttrARN), resource.TestCheckResourceAttrPair(resourceName, "registry_id", dataSourceName, "registry_id"), resource.TestCheckResourceAttrPair(resourceName, "repository_url", dataSourceName, "repository_url"), - resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsPercent, dataSourceName, acctest.CtTagsPercent), resource.TestCheckResourceAttrPair(resourceName, "image_scanning_configuration.#", dataSourceName, "image_scanning_configuration.#"), resource.TestCheckResourceAttrPair(resourceName, "image_tag_mutability", dataSourceName, "image_tag_mutability"), resource.TestCheckResourceAttrPair(resourceName, "encryption_configuration.#", dataSourceName, "encryption_configuration.#"), diff --git a/internal/service/ecr/service_endpoint_resolver_gen.go b/internal/service/ecr/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..550251f394c --- /dev/null +++ b/internal/service/ecr/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ecr + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ecr_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecr" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ ecr_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver ecr_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: ecr_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params ecr_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up ecr endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*ecr_sdkv2.Options) { + return func(o *ecr_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ecr/service_endpoints_gen_test.go b/internal/service/ecr/service_endpoints_gen_test.go index a733dcbf304..ebb1c083a2a 100644 --- a/internal/service/ecr/service_endpoints_gen_test.go +++ b/internal/service/ecr/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := ecr_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ecr_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := ecr_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ecr_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ecr/service_package_gen.go b/internal/service/ecr/service_package_gen.go index 3cdc67b28e1..200916df88b 100644 --- a/internal/service/ecr/service_package_gen.go +++ b/internal/service/ecr/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ecr @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" ecr_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecr" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -53,6 +52,9 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Factory: dataSourceRepository, TypeName: "aws_ecr_repository", Name: "Repository", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, } } @@ -108,19 +110,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*ecr_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return ecr_sdkv2.NewFromConfig(cfg, func(o *ecr_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return ecr_sdkv2.NewFromConfig(cfg, + ecr_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/ecr/tags_gen.go b/internal/service/ecr/tags_gen.go index 96121741024..d775c69606d 100644 --- a/internal/service/ecr/tags_gen.go +++ b/internal/service/ecr/tags_gen.go @@ -98,12 +98,12 @@ func setTagsOut(ctx context.Context, tags []awstypes.Tag) { } // createTags creates ecr service tags for new resources. -func createTags(ctx context.Context, conn *ecr.Client, identifier string, tags []awstypes.Tag) error { +func createTags(ctx context.Context, conn *ecr.Client, identifier string, tags []awstypes.Tag, optFns ...func(*ecr.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates ecr service tags. diff --git a/internal/service/ecrpublic/service_endpoint_resolver_gen.go b/internal/service/ecrpublic/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..e87de1cf693 --- /dev/null +++ b/internal/service/ecrpublic/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ecrpublic + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ecrpublic_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecrpublic" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ ecrpublic_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver ecrpublic_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: ecrpublic_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params ecrpublic_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up ecrpublic endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*ecrpublic_sdkv2.Options) { + return func(o *ecrpublic_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ecrpublic/service_endpoints_gen_test.go b/internal/service/ecrpublic/service_endpoints_gen_test.go index 8517b3685ea..eebd960a6ba 100644 --- a/internal/service/ecrpublic/service_endpoints_gen_test.go +++ b/internal/service/ecrpublic/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := ecrpublic_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ecrpublic_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := ecrpublic_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ecrpublic_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ecrpublic/service_package_gen.go b/internal/service/ecrpublic/service_package_gen.go index c3c39fc450e..6f3b96d38ff 100644 --- a/internal/service/ecrpublic/service_package_gen.go +++ b/internal/service/ecrpublic/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ecrpublic @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" ecrpublic_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecrpublic" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -57,19 +56,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*ecrpublic_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return ecrpublic_sdkv2.NewFromConfig(cfg, func(o *ecrpublic_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return ecrpublic_sdkv2.NewFromConfig(cfg, + ecrpublic_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/ecs/cluster.go b/internal/service/ecs/cluster.go index 366d29aae7a..71c228aaadf 100644 --- a/internal/service/ecs/cluster.go +++ b/internal/service/ecs/cluster.go @@ -99,6 +99,23 @@ func ResourceCluster() *schema.Resource { }, }, }, + "managed_storage_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fargate_ephemeral_storage_kms_key_id": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrKMSKeyID: { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, }, }, }, @@ -526,6 +543,11 @@ func flattenClusterConfiguration(apiObject *ecs.ClusterConfiguration) []interfac if apiObject.ExecuteCommandConfiguration != nil { tfMap["execute_command_configuration"] = flattenClusterConfigurationExecuteCommandConfiguration(apiObject.ExecuteCommandConfiguration) } + + if apiObject.ManagedStorageConfiguration != nil { + tfMap["managed_storage_configuration"] = flattenManagedStorageConfiguration(apiObject.ManagedStorageConfiguration) + } + return []interface{}{tfMap} } @@ -576,6 +598,24 @@ func flattenClusterConfigurationExecuteCommandConfigurationLogConfiguration(apiO return []interface{}{tfMap} } +func flattenManagedStorageConfiguration(apiObject *ecs.ManagedStorageConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if apiObject.FargateEphemeralStorageKmsKeyId != nil { + tfMap["fargate_ephemeral_storage_kms_key_id"] = aws.StringValue(apiObject.FargateEphemeralStorageKmsKeyId) + } + + if apiObject.KmsKeyId != nil { + tfMap[names.AttrKMSKeyID] = aws.StringValue(apiObject.KmsKeyId) + } + + return []interface{}{tfMap} +} + func expandClusterConfiguration(nc []interface{}) *ecs.ClusterConfiguration { if len(nc) == 0 || nc[0] == nil { return &ecs.ClusterConfiguration{} @@ -587,6 +627,10 @@ func expandClusterConfiguration(nc []interface{}) *ecs.ClusterConfiguration { config.ExecuteCommandConfiguration = expandClusterConfigurationExecuteCommandConfiguration(v) } + if v, ok := raw["managed_storage_configuration"].([]interface{}); ok && len(v) > 0 { + config.ManagedStorageConfiguration = expandManagedStorageConfiguration(v) + } + return config } @@ -642,3 +686,22 @@ func expandClusterConfigurationExecuteCommandLogConfiguration(nc []interface{}) return config } + +func expandManagedStorageConfiguration(tfList []interface{}) *ecs.ManagedStorageConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return &ecs.ManagedStorageConfiguration{} + } + + tfMap := tfList[0].(map[string]interface{}) + apiObject := &ecs.ManagedStorageConfiguration{} + + if v, ok := tfMap["fargate_ephemeral_storage_kms_key_id"].(string); ok && v != "" { + apiObject.FargateEphemeralStorageKmsKeyId = aws.String(v) + } + + if v, ok := tfMap[names.AttrKMSKeyID].(string); ok && v != "" { + apiObject.KmsKeyId = aws.String(v) + } + + return apiObject +} diff --git a/internal/service/ecs/cluster_test.go b/internal/service/ecs/cluster_test.go index 305b38a7109..f600554079a 100644 --- a/internal/service/ecs/cluster_test.go +++ b/internal/service/ecs/cluster_test.go @@ -265,6 +265,54 @@ func TestAccECSCluster_configuration(t *testing.T) { }) } +func TestAccECSCluster_managedStorageConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var cluster1 ecs.Cluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ecs_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ECSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_managedStorageConfiguration(rName, "aws_kms_key.test.arn", "null"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "configuration.0.managed_storage_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.managed_storage_configuration.0.fargate_ephemeral_storage_kms_key_id", "aws_kms_key.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "configuration.0.managed_storage_configuration.0.kms_key_id", ""), + ), + }, + { + ResourceName: resourceName, + ImportStateId: rName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccClusterConfig_managedStorageConfiguration(rName, "null", "aws_kms_key.test.arn"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster1), + resource.TestCheckResourceAttr(resourceName, "configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "configuration.0.managed_storage_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "configuration.0.managed_storage_configuration.0.fargate_ephemeral_storage_kms_key_id", ""), + resource.TestCheckResourceAttrPair(resourceName, "configuration.0.managed_storage_configuration.0.kms_key_id", "aws_kms_key.test", names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportStateId: rName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ECSConn(ctx) @@ -408,3 +456,94 @@ resource "aws_ecs_cluster" "test" { } `, rName, enable) } + +func testAccClusterConfig_managedStorageConfiguration(rName, fargateEphemeralStorageKmsKeyId, kmsKeyId string) string { + return fmt.Sprintf(` +data "aws_caller_identity" "current" {} + +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 +} + +resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = jsonencode({ + Id = "ECSClusterFargatePolicy" + Statement = [ + { + Sid = "Enable IAM User Permissions" + Effect = "Allow" + Principal = { + "AWS" : "*" + } + Action = "kms:*" + Resource = "*" + }, + { + Sid = "Allow generate data key access for Fargate tasks." + Effect = "Allow" + Principal = { + Service = "fargate.amazonaws.com" + } + Action = [ + "kms:GenerateDataKeyWithoutPlaintext" + ] + Condition = { + StringEquals = { + "kms:EncryptionContext:aws:ecs:clusterAccount" = [ + data.aws_caller_identity.current.account_id + ] + "kms:EncryptionContext:aws:ecs:clusterName" = [ + %[1]q + ] + } + } + Resource = "*" + }, + { + Sid = "Allow grant creation permission for Fargate tasks." + Effect = "Allow" + Principal = { + Service = "fargate.amazonaws.com" + } + Action = [ + "kms:CreateGrant" + ] + Condition = { + StringEquals = { + "kms:EncryptionContext:aws:ecs:clusterAccount" = [ + data.aws_caller_identity.current.account_id + ] + "kms:EncryptionContext:aws:ecs:clusterName" = [ + %[1]q + ] + } + "ForAllValues:StringEquals" = { + "kms:GrantOperations" = [ + "Decrypt" + ] + } + } + Resource = "*" + } + ] + Version = "2012-10-17" + }) +} + +resource "aws_ecs_cluster" "test" { + name = %[1]q + + configuration { + managed_storage_configuration { + fargate_ephemeral_storage_kms_key_id = %[2]s + kms_key_id = %[3]s + } + } + depends_on = [ + aws_kms_key_policy.test + ] +} +`, rName, fargateEphemeralStorageKmsKeyId, kmsKeyId) +} diff --git a/internal/service/ecs/find.go b/internal/service/ecs/find.go index c5dbd37e8d2..35d75d45e2b 100644 --- a/internal/service/ecs/find.go +++ b/internal/service/ecs/find.go @@ -55,14 +55,14 @@ func FindCapacityProviderByARN(ctx context.Context, conn *ecs.ECS, arn string) ( return capacityProvider, nil } -func FindServiceByID(ctx context.Context, conn *ecs.ECS, id, cluster string) (*ecs.Service, error) { +func findServiceByTwoPartKey(ctx context.Context, conn *ecs.ECS, serviceName, clusterNameOrARN string) (*ecs.Service, error) { input := &ecs.DescribeServicesInput{ - Cluster: aws.String(cluster), + Cluster: aws.String(clusterNameOrARN), Include: aws.StringSlice([]string{ecs.ServiceFieldTags}), - Services: aws.StringSlice([]string{id}), + Services: aws.StringSlice([]string{serviceName}), } - return FindService(ctx, conn, input) + return findService(ctx, conn, input) } func FindServiceNoTagsByID(ctx context.Context, conn *ecs.ECS, id, cluster string) (*ecs.Service, error) { @@ -73,7 +73,7 @@ func FindServiceNoTagsByID(ctx context.Context, conn *ecs.ECS, id, cluster strin input.Cluster = aws.String(cluster) } - return FindService(ctx, conn, input) + return findService(ctx, conn, input) } type expectActiveError struct { @@ -95,7 +95,7 @@ func FindServiceByIDWaitForActive(ctx context.Context, conn *ecs.ECS, id, cluste // Use the retry.RetryContext function instead of WaitForState() because we don't want the timeout error, if any err := retry.RetryContext(ctx, serviceDescribeTimeout, func() *retry.RetryError { var err error - service, err = FindServiceByID(ctx, conn, id, cluster) + service, err = findServiceByTwoPartKey(ctx, conn, id, cluster) if tfresource.NotFound(err) { return retry.RetryableError(err) } @@ -110,19 +110,26 @@ func FindServiceByIDWaitForActive(ctx context.Context, conn *ecs.ECS, id, cluste return nil }) if tfresource.TimedOut(err) { - service, err = FindServiceByID(ctx, conn, id, cluster) + service, err = findServiceByTwoPartKey(ctx, conn, id, cluster) } return service, err } -func FindService(ctx context.Context, conn *ecs.ECS, input *ecs.DescribeServicesInput) (*ecs.Service, error) { +func findService(ctx context.Context, conn *ecs.ECS, input *ecs.DescribeServicesInput) (*ecs.Service, error) { + output, err := findServices(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findServices(ctx context.Context, conn *ecs.ECS, input *ecs.DescribeServicesInput) ([]*ecs.Service, error) { output, err := conn.DescribeServicesWithContext(ctx, input) if errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) && input.Include != nil { - id := aws.StringValueSlice(input.Services)[0] - log.Printf("[WARN] failed describing ECS Service (%s) with tags: %s; retrying without tags", id, err) - input.Include = nil output, err = conn.DescribeServicesWithContext(ctx, input) } @@ -135,10 +142,15 @@ func FindService(ctx context.Context, conn *ecs.ECS, input *ecs.DescribeServices LastRequest: input, } } + if err != nil { return nil, err } + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + // When an ECS Service is not found by DescribeServices(), it will return a Failure struct with Reason = "MISSING" for _, v := range output.Failures { if aws.StringValue(v.Reason) == "MISSING" { @@ -148,12 +160,5 @@ func FindService(ctx context.Context, conn *ecs.ECS, input *ecs.DescribeServices } } - if len(output.Services) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - if n := len(output.Services); n > 1 { - return nil, tfresource.NewTooManyResultsError(n, input) - } - - return output.Services[0], nil + return output.Services, nil } diff --git a/internal/service/ecs/service.go b/internal/service/ecs/service.go index 820465e5af3..f14a28c4039 100644 --- a/internal/service/ecs/service.go +++ b/internal/service/ecs/service.go @@ -567,7 +567,7 @@ func ResourceService() *schema.Resource { Optional: true, }, names.AttrThroughput: { - Type: schema.TypeString, + Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntBetween(0, 1000), }, diff --git a/internal/service/ecs/service_data_source.go b/internal/service/ecs/service_data_source.go index 9c57ba5f386..5c4b5fe806d 100644 --- a/internal/service/ecs/service_data_source.go +++ b/internal/service/ecs/service_data_source.go @@ -5,28 +5,24 @@ package ecs import ( "context" - "log" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/ecs" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_ecs_service") -func DataSourceService() *schema.Resource { +// @SDKDataSource("aws_ecs_service", name="Service") +// @Tags +func dataSourceService() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceServiceRead, Schema: map[string]*schema.Schema{ - names.AttrServiceName: { - Type: schema.TypeString, - Required: true, - }, names.AttrARN: { Type: schema.TypeString, Computed: true, @@ -47,6 +43,10 @@ func DataSourceService() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrServiceName: { + Type: schema.TypeString, + Required: true, + }, "task_definition": { Type: schema.TypeString, Computed: true, @@ -59,45 +59,23 @@ func DataSourceService() *schema.Resource { func dataSourceServiceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ECSConn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - clusterArn := d.Get("cluster_arn").(string) - serviceName := d.Get(names.AttrServiceName).(string) - - params := &ecs.DescribeServicesInput{ - Cluster: aws.String(clusterArn), - Services: []*string{aws.String(serviceName)}, - } - - log.Printf("[DEBUG] Reading ECS Service: %s", params) - desc, err := conn.DescribeServicesWithContext(ctx, params) + service, err := findServiceByTwoPartKey(ctx, conn, d.Get(names.AttrServiceName).(string), d.Get("cluster_arn").(string)) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading ECS Service (%s): %s", serviceName, err) - } - - if desc == nil || len(desc.Services) == 0 { - return sdkdiag.AppendErrorf(diags, "service with name %q in cluster %q not found", serviceName, clusterArn) - } - - if len(desc.Services) > 1 { - return sdkdiag.AppendErrorf(diags, "multiple services with name %q found in cluster %q", serviceName, clusterArn) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ECS Service", err)) } - service := desc.Services[0] d.SetId(aws.StringValue(service.ServiceArn)) - - d.Set(names.AttrServiceName, service.ServiceName) d.Set(names.AttrARN, service.ServiceArn) d.Set("cluster_arn", service.ClusterArn) d.Set("desired_count", service.DesiredCount) d.Set("launch_type", service.LaunchType) d.Set("scheduling_strategy", service.SchedulingStrategy) + d.Set(names.AttrServiceName, service.ServiceName) d.Set("task_definition", service.TaskDefinition) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, service.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOut(ctx, service.Tags) return diags } diff --git a/internal/service/ecs/service_data_source_test.go b/internal/service/ecs/service_data_source_test.go index d34c9da4cd3..813a9de3dd7 100644 --- a/internal/service/ecs/service_data_source_test.go +++ b/internal/service/ecs/service_data_source_test.go @@ -33,7 +33,8 @@ func TestAccECSServiceDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "scheduling_strategy", dataSourceName, "scheduling_strategy"), resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrServiceName), resource.TestCheckResourceAttrPair(resourceName, "task_definition", dataSourceName, "task_definition"), - resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsPercent, dataSourceName, acctest.CtTagsPercent), + resource.TestCheckResourceAttrPair(resourceName, "tags.Name", dataSourceName, "tags.Name"), ), }, }, diff --git a/internal/service/ecs/service_endpoint_resolver_gen.go b/internal/service/ecs/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..74753d5032d --- /dev/null +++ b/internal/service/ecs/service_endpoint_resolver_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ecs + +import ( + "context" + "fmt" + "net" + "net/url" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ecs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecs" + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} + +var _ ecs_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver ecs_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: ecs_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params ecs_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up ecs endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*ecs_sdkv2.Options) { + return func(o *ecs_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ecs/service_endpoints_gen_test.go b/internal/service/ecs/service_endpoints_gen_test.go index 2e7e6ba0262..25f36a84d2e 100644 --- a/internal/service/ecs/service_endpoints_gen_test.go +++ b/internal/service/ecs/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -255,24 +257,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }) } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := ecs_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ecs_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := ecs_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ecs_sdkv2.EndpointParameters{ @@ -280,14 +282,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -364,16 +366,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ecs/service_package_gen.go b/internal/service/ecs/service_package_gen.go index 1166a226303..361bc80538f 100644 --- a/internal/service/ecs/service_package_gen.go +++ b/internal/service/ecs/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ecs @@ -8,7 +8,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" ecs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ecs" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" ecs_sdkv1 "github.com/aws/aws-sdk-go/service/ecs" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -38,8 +37,10 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_ecs_container_definition", }, { - Factory: DataSourceService, + Factory: dataSourceService, TypeName: "aws_ecs_service", + Name: "Service", + Tags: &types.ServicePackageResourceTags{}, }, { Factory: DataSourceTaskDefinition, @@ -126,11 +127,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*e "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return ecs_sdkv1.New(sess.Copy(&cfg)), nil @@ -140,19 +138,10 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*e func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*ecs_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return ecs_sdkv2.NewFromConfig(cfg, func(o *ecs_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return ecs_sdkv2.NewFromConfig(cfg, + ecs_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/efs/access_point.go b/internal/service/efs/access_point.go index 1f1837682c0..990d718f7ac 100644 --- a/internal/service/efs/access_point.go +++ b/internal/service/efs/access_point.go @@ -6,24 +6,30 @@ package efs import ( "context" "log" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_efs_access_point", name="Access Point") // @Tags(identifierAttribute="id") -func ResourceAccessPoint() *schema.Resource { +func resourceAccessPoint() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAccessPointCreate, ReadWithoutTimeout: resourceAccessPointRead, @@ -37,6 +43,10 @@ func ResourceAccessPoint() *schema.Resource { CustomizeDiff: verify.SetTagsDiff, Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, "file_system_arn": { Type: schema.TypeString, Computed: true, @@ -46,10 +56,6 @@ func ResourceAccessPoint() *schema.Resource { Required: true, ForceNew: true, }, - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, names.AttrOwnerID: { Type: schema.TypeString, Computed: true, @@ -66,18 +72,17 @@ func ResourceAccessPoint() *schema.Resource { Required: true, ForceNew: true, }, - "uid": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, "secondary_gids": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeInt}, - Set: schema.HashInt, Optional: true, ForceNew: true, }, + "uid": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, }, }, }, @@ -89,18 +94,12 @@ func ResourceAccessPoint() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - names.AttrPath: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Computed: true, - }, "creation_info": { Type: schema.TypeList, Optional: true, + Computed: true, ForceNew: true, MaxItems: 1, - Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "owner_gid": { @@ -121,6 +120,12 @@ func ResourceAccessPoint() *schema.Resource { }, }, }, + names.AttrPath: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, }, }, }, @@ -132,11 +137,11 @@ func ResourceAccessPoint() *schema.Resource { func resourceAccessPointCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - fsId := d.Get(names.AttrFileSystemID).(string) - input := efs.CreateAccessPointInput{ - FileSystemId: aws.String(fsId), + fsID := d.Get(names.AttrFileSystemID).(string) + input := &efs.CreateAccessPointInput{ + FileSystemId: aws.String(fsID), Tags: getTagsIn(ctx), } @@ -148,54 +153,39 @@ func resourceAccessPointCreate(ctx context.Context, d *schema.ResourceData, meta input.RootDirectory = expandAccessPointRootDirectory(v.([]interface{})) } - log.Printf("[DEBUG] Creating EFS Access Point: %#v", input) + output, err := conn.CreateAccessPoint(ctx, input) - ap, err := conn.CreateAccessPointWithContext(ctx, &input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating EFS Access Point for File System (%s): %s", fsId, err) + return sdkdiag.AppendErrorf(diags, "creating EFS Access Point for File System (%s): %s", fsID, err) } - d.SetId(aws.StringValue(ap.AccessPointId)) + d.SetId(aws.ToString(output.AccessPointId)) if _, err := waitAccessPointCreated(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EFS access point (%s) to be available: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for EFS Access Point (%s) create: %s", d.Id(), err) } return append(diags, resourceAccessPointRead(ctx, d, meta)...) } -func resourceAccessPointUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - - // Tags only. - - return append(diags, resourceAccessPointRead(ctx, d, meta)...) -} - func resourceAccessPointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - resp, err := conn.DescribeAccessPointsWithContext(ctx, &efs.DescribeAccessPointsInput{ - AccessPointId: aws.String(d.Id()), - }) - if err != nil { - if tfawserr.ErrCodeEquals(err, efs.ErrCodeAccessPointNotFound) { - log.Printf("[WARN] EFS access point %q could not be found.", d.Id()) - d.SetId("") - return diags - } - return sdkdiag.AppendErrorf(diags, "reading EFS access point %s: %s", d.Id(), err) - } + ap, err := findAccessPointByID(ctx, conn, d.Id()) - if hasEmptyAccessPoints(resp) { - return sdkdiag.AppendErrorf(diags, "EFS access point %q could not be found.", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] EFS Access Point (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags } - ap := resp.AccessPoints[0] + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading EFS Access Point (%s): %s", d.Id(), err) + } d.Set(names.AttrARN, ap.AccessPointArn) - fsID := aws.StringValue(ap.FileSystemId) + fsID := aws.ToString(ap.FileSystemId) fsARN := arn.ARN{ AccountID: meta.(*conns.AWSClient).AccountID, Partition: meta.(*conns.AWSClient).Partition, @@ -218,132 +208,243 @@ func resourceAccessPointRead(ctx context.Context, d *schema.ResourceData, meta i return diags } +func resourceAccessPointUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + // Tags only. + + return append(diags, resourceAccessPointRead(ctx, d, meta)...) +} + func resourceAccessPointDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - log.Printf("[DEBUG] Deleting EFS access point %q", d.Id()) - _, err := conn.DeleteAccessPointWithContext(ctx, &efs.DeleteAccessPointInput{ + log.Printf("[DEBUG] Deleting EFS Access Point: %s", d.Id()) + _, err := conn.DeleteAccessPoint(ctx, &efs.DeleteAccessPointInput{ AccessPointId: aws.String(d.Id()), }) + + if errs.IsA[*awstypes.AccessPointNotFound](err) { + return diags + } + if err != nil { - if tfawserr.ErrCodeEquals(err, efs.ErrCodeAccessPointNotFound) { - return diags - } return sdkdiag.AppendErrorf(diags, "deleting EFS Access Point (%s): %s", d.Id(), err) } if _, err := waitAccessPointDeleted(ctx, conn, d.Id()); err != nil { - if tfawserr.ErrCodeEquals(err, efs.ErrCodeAccessPointNotFound) { - return diags + return sdkdiag.AppendErrorf(diags, "waiting for EFS Access Point (%s) delete: %s", d.Id(), err) + } + + return diags +} + +func findAccessPoint(ctx context.Context, conn *efs.Client, input *efs.DescribeAccessPointsInput, filter tfslices.Predicate[*awstypes.AccessPointDescription]) (*awstypes.AccessPointDescription, error) { + output, err := findAccessPoints(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findAccessPoints(ctx context.Context, conn *efs.Client, input *efs.DescribeAccessPointsInput, filter tfslices.Predicate[*awstypes.AccessPointDescription]) ([]awstypes.AccessPointDescription, error) { + var output []awstypes.AccessPointDescription + + pages := efs.NewDescribeAccessPointsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.AccessPointNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.AccessPoints { + if filter(&v) { + output = append(output, v) + } } - return sdkdiag.AppendErrorf(diags, "waiting for EFS access point (%s) deletion: %s", d.Id(), err) } - log.Printf("[DEBUG] EFS access point %q deleted.", d.Id()) + return output, nil +} - return diags +func findAccessPointByID(ctx context.Context, conn *efs.Client, id string) (*awstypes.AccessPointDescription, error) { + input := &efs.DescribeAccessPointsInput{ + AccessPointId: aws.String(id), + } + + output, err := findAccessPoint(ctx, conn, input, tfslices.PredicateTrue[*awstypes.AccessPointDescription]()) + + if err != nil { + return nil, err + } + + if state := output.LifeCycleState; state == awstypes.LifeCycleStateDeleted { + return nil, &retry.NotFoundError{ + Message: string(state), + LastRequest: input, + } + } + + return output, nil } -func hasEmptyAccessPoints(aps *efs.DescribeAccessPointsOutput) bool { - if aps != nil && len(aps.AccessPoints) > 0 { - return false +func statusAccessPointLifeCycleState(ctx context.Context, conn *efs.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findAccessPointByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.LifeCycleState), nil } - return true } -func expandAccessPointPOSIXUser(pUser []interface{}) *efs.PosixUser { - if len(pUser) < 1 || pUser[0] == nil { - return nil +func waitAccessPointCreated(ctx context.Context, conn *efs.Client, id string) (*awstypes.AccessPointDescription, error) { + const ( + timeout = 10 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.LifeCycleStateCreating), + Target: enum.Slice(awstypes.LifeCycleStateAvailable), + Refresh: statusAccessPointLifeCycleState(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.AccessPointDescription); ok { + return output, err } - m := pUser[0].(map[string]interface{}) + return nil, err +} - posixUser := &efs.PosixUser{ - Gid: aws.Int64(int64(m["gid"].(int))), - Uid: aws.Int64(int64(m["uid"].(int))), +func waitAccessPointDeleted(ctx context.Context, conn *efs.Client, id string) (*awstypes.AccessPointDescription, error) { + const ( + accessPointCreatedTimeout = 10 * time.Minute + accessPointDeletedTimeout = 10 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.LifeCycleStateAvailable, awstypes.LifeCycleStateDeleting), + Target: []string{}, + Refresh: statusAccessPointLifeCycleState(ctx, conn, id), + Timeout: accessPointDeletedTimeout, } - if v, ok := m["secondary_gids"].(*schema.Set); ok && len(v.List()) > 0 { - posixUser.SecondaryGids = flex.ExpandInt64Set(v) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.AccessPointDescription); ok { + return output, err } - return posixUser + return nil, err } -func expandAccessPointRootDirectory(rDir []interface{}) *efs.RootDirectory { - if len(rDir) < 1 || rDir[0] == nil { +func expandAccessPointPOSIXUser(tfList []interface{}) *awstypes.PosixUser { + if len(tfList) < 1 || tfList[0] == nil { return nil } - m := rDir[0].(map[string]interface{}) + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.PosixUser{ + Gid: aws.Int64(int64(tfMap["gid"].(int))), + Uid: aws.Int64(int64(tfMap["uid"].(int))), + } - rootDir := &efs.RootDirectory{} + if v, ok := tfMap["secondary_gids"].(*schema.Set); ok && len(v.List()) > 0 { + apiObject.SecondaryGids = flex.ExpandInt64ValueSet(v) + } - if v, ok := m[names.AttrPath]; ok { - rootDir.Path = aws.String(v.(string)) + return apiObject +} + +func expandAccessPointRootDirectory(tfList []interface{}) *awstypes.RootDirectory { + if len(tfList) < 1 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.RootDirectory{} + + if v, ok := tfMap[names.AttrPath]; ok { + apiObject.Path = aws.String(v.(string)) } - if v, ok := m["creation_info"]; ok { - rootDir.CreationInfo = expandAccessPointRootDirectoryCreationInfo(v.([]interface{})) + if v, ok := tfMap["creation_info"]; ok { + apiObject.CreationInfo = expandAccessPointRootDirectoryCreationInfo(v.([]interface{})) } - return rootDir + return apiObject } -func expandAccessPointRootDirectoryCreationInfo(cInfo []interface{}) *efs.CreationInfo { - if len(cInfo) < 1 || cInfo[0] == nil { +func expandAccessPointRootDirectoryCreationInfo(tfList []interface{}) *awstypes.CreationInfo { + if len(tfList) < 1 || tfList[0] == nil { return nil } - m := cInfo[0].(map[string]interface{}) - - creationInfo := &efs.CreationInfo{ - OwnerGid: aws.Int64(int64(m["owner_gid"].(int))), - OwnerUid: aws.Int64(int64(m["owner_uid"].(int))), - Permissions: aws.String(m[names.AttrPermissions].(string)), + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.CreationInfo{ + OwnerGid: aws.Int64(int64(tfMap["owner_gid"].(int))), + OwnerUid: aws.Int64(int64(tfMap["owner_uid"].(int))), + Permissions: aws.String(tfMap[names.AttrPermissions].(string)), } - return creationInfo + return apiObject } -func flattenAccessPointPOSIXUser(posixUser *efs.PosixUser) []interface{} { - if posixUser == nil { +func flattenAccessPointPOSIXUser(apiObject *awstypes.PosixUser) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "gid": aws.Int64Value(posixUser.Gid), - "uid": aws.Int64Value(posixUser.Uid), - "secondary_gids": aws.Int64ValueSlice(posixUser.SecondaryGids), + tfMap := map[string]interface{}{ + "gid": aws.ToInt64(apiObject.Gid), + "uid": aws.ToInt64(apiObject.Uid), + "secondary_gids": apiObject.SecondaryGids, } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenAccessPointRootDirectory(rDir *efs.RootDirectory) []interface{} { - if rDir == nil { +func flattenAccessPointRootDirectory(apiObject *awstypes.RootDirectory) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - names.AttrPath: aws.StringValue(rDir.Path), - "creation_info": flattenAccessPointRootDirectoryCreationInfo(rDir.CreationInfo), + tfMap := map[string]interface{}{ + "creation_info": flattenAccessPointRootDirectoryCreationInfo(apiObject.CreationInfo), + names.AttrPath: aws.ToString(apiObject.Path), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenAccessPointRootDirectoryCreationInfo(cInfo *efs.CreationInfo) []interface{} { - if cInfo == nil { +func flattenAccessPointRootDirectoryCreationInfo(apiObject *awstypes.CreationInfo) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "owner_gid": aws.Int64Value(cInfo.OwnerGid), - "owner_uid": aws.Int64Value(cInfo.OwnerUid), - names.AttrPermissions: aws.StringValue(cInfo.Permissions), + tfMap := map[string]interface{}{ + "owner_gid": aws.ToInt64(apiObject.OwnerGid), + "owner_uid": aws.ToInt64(apiObject.OwnerUid), + names.AttrPermissions: aws.ToString(apiObject.Permissions), } - return []interface{}{m} + return []interface{}{tfMap} } diff --git a/internal/service/efs/access_point_data_source.go b/internal/service/efs/access_point_data_source.go index f5836be02f8..31c5ad2bd82 100644 --- a/internal/service/efs/access_point_data_source.go +++ b/internal/service/efs/access_point_data_source.go @@ -5,21 +5,21 @@ package efs import ( "context" - "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_efs_access_point") -func DataSourceAccessPoint() *schema.Resource { +// @SDKDataSource("aws_efs_access_point", name="Access Point") +// @Tags +func dataSourceAccessPoint() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceAccessPointRead, @@ -28,15 +28,15 @@ func DataSourceAccessPoint() *schema.Resource { Type: schema.TypeString, Required: true, }, - "file_system_arn": { + names.AttrARN: { Type: schema.TypeString, Computed: true, }, - names.AttrFileSystemID: { + "file_system_arn": { Type: schema.TypeString, Computed: true, }, - names.AttrARN: { + names.AttrFileSystemID: { Type: schema.TypeString, Computed: true, }, @@ -53,14 +53,13 @@ func DataSourceAccessPoint() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "uid": { - Type: schema.TypeInt, - Computed: true, - }, "secondary_gids": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeInt}, - Set: schema.HashInt, + Computed: true, + }, + "uid": { + Type: schema.TypeInt, Computed: true, }, }, @@ -71,10 +70,6 @@ func DataSourceAccessPoint() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - names.AttrPath: { - Type: schema.TypeString, - Computed: true, - }, "creation_info": { Type: schema.TypeList, Computed: true, @@ -95,36 +90,32 @@ func DataSourceAccessPoint() *schema.Resource { }, }, }, + names.AttrPath: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - names.AttrTags: tftags.TagsSchema(), + names.AttrTags: tftags.TagsSchemaComputed(), }, } } func dataSourceAccessPointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).EFSClient(ctx) + + accessPointID := d.Get("access_point_id").(string) + ap, err := findAccessPointByID(ctx, conn, accessPointID) - resp, err := conn.DescribeAccessPointsWithContext(ctx, &efs.DescribeAccessPointsInput{ - AccessPointId: aws.String(d.Get("access_point_id").(string)), - }) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EFS access point %s: %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EFS Access Point", err)) } - if len(resp.AccessPoints) != 1 { - return sdkdiag.AppendErrorf(diags, "Search returned %d results, please revise so only one is returned", len(resp.AccessPoints)) - } - - ap := resp.AccessPoints[0] - log.Printf("[DEBUG] Found EFS access point: %#v", ap) - - d.SetId(aws.StringValue(ap.AccessPointId)) + d.SetId(aws.ToString(ap.AccessPointId)) d.Set(names.AttrARN, ap.AccessPointArn) - fsID := aws.StringValue(ap.FileSystemId) + fsID := aws.ToString(ap.FileSystemId) fsARN := arn.ARN{ AccountID: meta.(*conns.AWSClient).AccountID, Partition: meta.(*conns.AWSClient).Partition, @@ -141,9 +132,8 @@ func dataSourceAccessPointRead(ctx context.Context, d *schema.ResourceData, meta if err := d.Set("root_directory", flattenAccessPointRootDirectory(ap.RootDirectory)); err != nil { return sdkdiag.AppendErrorf(diags, "setting root_directory: %s", err) } - if err := d.Set(names.AttrTags, KeyValueTags(ctx, ap.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + + setTagsOut(ctx, ap.Tags) return diags } diff --git a/internal/service/efs/access_point_data_source_test.go b/internal/service/efs/access_point_data_source_test.go index 8cd504daab9..acd47834cdb 100644 --- a/internal/service/efs/access_point_data_source_test.go +++ b/internal/service/efs/access_point_data_source_test.go @@ -23,16 +23,16 @@ func TestAccEFSAccessPointDataSource_basic(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.EFSServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckAccessPointDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccAccessPointDataSourceConfig_basic(rName), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(dataSourceName, names.AttrID, resourceName, names.AttrID), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrOwnerID, resourceName, names.AttrOwnerID), - resource.TestCheckResourceAttrPair(dataSourceName, names.AttrTags, resourceName, names.AttrTags), resource.TestCheckResourceAttrPair(dataSourceName, "posix_user", resourceName, "posix_user"), resource.TestCheckResourceAttrPair(dataSourceName, "root_directory", resourceName, "root_directory"), + resource.TestCheckResourceAttrPair(dataSourceName, "tags.#", resourceName, "tags.#"), ), }, }, @@ -42,11 +42,19 @@ func TestAccEFSAccessPointDataSource_basic(t *testing.T) { func testAccAccessPointDataSourceConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_efs_file_system" "test" { - creation_token = "%s" + creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_access_point" "test" { file_system_id = aws_efs_file_system.test.id + + tags = { + Name = %[1]q + } } data "aws_efs_access_point" "test" { diff --git a/internal/service/efs/access_point_test.go b/internal/service/efs/access_point_test.go index 13c8f081e06..c30ed291091 100644 --- a/internal/service/efs/access_point_test.go +++ b/internal/service/efs/access_point_test.go @@ -9,21 +9,20 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfefs "github.com/hashicorp/terraform-provider-aws/internal/service/efs" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccEFSAccessPoint_basic(t *testing.T) { ctx := acctest.Context(t) - var ap efs.AccessPointDescription + var ap awstypes.AccessPointDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_access_point.test" fsResourceName := "aws_efs_file_system.test" @@ -59,7 +58,7 @@ func TestAccEFSAccessPoint_basic(t *testing.T) { func TestAccEFSAccessPoint_Root_directory(t *testing.T) { ctx := acctest.Context(t) - var ap efs.AccessPointDescription + var ap awstypes.AccessPointDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_access_point.test" @@ -89,7 +88,7 @@ func TestAccEFSAccessPoint_Root_directory(t *testing.T) { func TestAccEFSAccessPoint_RootDirectoryCreation_info(t *testing.T) { ctx := acctest.Context(t) - var ap efs.AccessPointDescription + var ap awstypes.AccessPointDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_access_point.test" @@ -122,7 +121,7 @@ func TestAccEFSAccessPoint_RootDirectoryCreation_info(t *testing.T) { func TestAccEFSAccessPoint_POSIX_user(t *testing.T) { ctx := acctest.Context(t) - var ap efs.AccessPointDescription + var ap awstypes.AccessPointDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_access_point.test" @@ -153,7 +152,7 @@ func TestAccEFSAccessPoint_POSIX_user(t *testing.T) { func TestAccEFSAccessPoint_POSIXUserSecondary_gids(t *testing.T) { ctx := acctest.Context(t) - var ap efs.AccessPointDescription + var ap awstypes.AccessPointDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_access_point.test" @@ -183,7 +182,7 @@ func TestAccEFSAccessPoint_POSIXUserSecondary_gids(t *testing.T) { func TestAccEFSAccessPoint_tags(t *testing.T) { ctx := acctest.Context(t) - var ap efs.AccessPointDescription + var ap awstypes.AccessPointDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_access_point.test" @@ -229,7 +228,7 @@ func TestAccEFSAccessPoint_tags(t *testing.T) { func TestAccEFSAccessPoint_disappears(t *testing.T) { ctx := acctest.Context(t) - var ap efs.AccessPointDescription + var ap awstypes.AccessPointDescription resourceName := "aws_efs_access_point.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -253,60 +252,45 @@ func TestAccEFSAccessPoint_disappears(t *testing.T) { func testAccCheckAccessPointDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_efs_access_point" { continue } - resp, err := conn.DescribeAccessPointsWithContext(ctx, &efs.DescribeAccessPointsInput{ - AccessPointId: aws.String(rs.Primary.ID), - }) - if err != nil { - if tfawserr.ErrCodeEquals(err, efs.ErrCodeAccessPointNotFound) { - continue - } - return fmt.Errorf("Error describing EFS access point in tests: %s", err) + _, err := tfefs.FindAccessPointByID(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue } - if len(resp.AccessPoints) > 0 { - return fmt.Errorf("EFS access point %q still exists", rs.Primary.ID) + + if err != nil { + return err } + + return fmt.Errorf("EFS Access Point %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckAccessPointExists(ctx context.Context, resourceID string, mount *efs.AccessPointDescription) resource.TestCheckFunc { +func testAccCheckAccessPointExists(ctx context.Context, n string, v *awstypes.AccessPointDescription) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceID] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceID) + return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) - fs, ok := s.RootModule().Resources[resourceID] - if !ok { - return fmt.Errorf("Not found: %s", resourceID) - } + output, err := tfefs.FindAccessPointByID(ctx, conn, rs.Primary.ID) - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) - mt, err := conn.DescribeAccessPointsWithContext(ctx, &efs.DescribeAccessPointsInput{ - AccessPointId: aws.String(fs.Primary.ID), - }) if err != nil { return err } - apId := aws.StringValue(mt.AccessPoints[0].AccessPointId) - if apId != fs.Primary.ID { - return fmt.Errorf("access point ID mismatch: %q != %q", apId, fs.Primary.ID) - } - - *mount = *mt.AccessPoints[0] + *v = *output return nil } @@ -315,7 +299,11 @@ func testAccCheckAccessPointExists(ctx context.Context, resourceID string, mount func testAccAccessPointConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_efs_file_system" "test" { - creation_token = "%s" + creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_access_point" "test" { @@ -328,6 +316,10 @@ func testAccAccessPointConfig_rootDirectory(rName, dir string) string { return fmt.Sprintf(` resource "aws_efs_file_system" "test" { creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_access_point" "test" { @@ -343,6 +335,10 @@ func testAccAccessPointConfig_rootDirectoryCreationInfo(rName, dir string) strin return fmt.Sprintf(` resource "aws_efs_file_system" "test" { creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_access_point" "test" { @@ -362,7 +358,11 @@ resource "aws_efs_access_point" "test" { func testAccAccessPointConfig_posixUser(rName string) string { return fmt.Sprintf(` resource "aws_efs_file_system" "test" { - creation_token = "%s" + creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_access_point" "test" { @@ -378,7 +378,11 @@ resource "aws_efs_access_point" "test" { func testAccAccessPointConfig_posixUserSecondaryGids(rName string) string { return fmt.Sprintf(` resource "aws_efs_file_system" "test" { - creation_token = "%s" + creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_access_point" "test" { @@ -396,6 +400,10 @@ func testAccAccessPointConfig_tags1(rName, tagKey1, tagValue1 string) string { return fmt.Sprintf(` resource "aws_efs_file_system" "test" { creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_access_point" "test" { @@ -412,6 +420,10 @@ func testAccAccessPointConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue return fmt.Sprintf(` resource "aws_efs_file_system" "test" { creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_access_point" "test" { diff --git a/internal/service/efs/access_points_data_source.go b/internal/service/efs/access_points_data_source.go index 950defe75aa..e63dfc1dc31 100644 --- a/internal/service/efs/access_points_data_source.go +++ b/internal/service/efs/access_points_data_source.go @@ -6,8 +6,9 @@ package efs import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -16,8 +17,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_efs_access_points") -func DataSourceAccessPoints() *schema.Resource { +// @SDKDataSource("aws_efs_access_points", name="Access Point") +func dataSourceAccessPoints() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceAccessPointsRead, @@ -43,7 +44,7 @@ func DataSourceAccessPoints() *schema.Resource { func dataSourceAccessPointsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) fileSystemID := d.Get(names.AttrFileSystemID).(string) input := &efs.DescribeAccessPointsInput{ @@ -59,8 +60,8 @@ func dataSourceAccessPointsRead(ctx context.Context, d *schema.ResourceData, met var accessPointIDs, arns []string for _, v := range output { - accessPointIDs = append(accessPointIDs, aws.StringValue(v.AccessPointId)) - arns = append(arns, aws.StringValue(v.AccessPointArn)) + accessPointIDs = append(accessPointIDs, aws.ToString(v.AccessPointId)) + arns = append(arns, aws.ToString(v.AccessPointArn)) } d.SetId(fileSystemID) @@ -70,25 +71,19 @@ func dataSourceAccessPointsRead(ctx context.Context, d *schema.ResourceData, met return diags } -func findAccessPointDescriptions(ctx context.Context, conn *efs.EFS, input *efs.DescribeAccessPointsInput) ([]*efs.AccessPointDescription, error) { - var output []*efs.AccessPointDescription +func findAccessPointDescriptions(ctx context.Context, conn *efs.Client, input *efs.DescribeAccessPointsInput) ([]awstypes.AccessPointDescription, error) { + var output []awstypes.AccessPointDescription - err := conn.DescribeAccessPointsPagesWithContext(ctx, input, func(page *efs.DescribeAccessPointsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := efs.NewDescribeAccessPointsPaginator(conn, input) - for _, v := range page.AccessPoints { - if v != nil { - output = append(output, v) - } - } + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - return !lastPage - }) + if err != nil { + return nil, err + } - if err != nil { - return nil, err + output = append(output, page.AccessPoints...) } return output, nil diff --git a/internal/service/efs/backup_policy.go b/internal/service/efs/backup_policy.go index c3f862f1598..103f69e349b 100644 --- a/internal/service/efs/backup_policy.go +++ b/internal/service/efs/backup_policy.go @@ -7,26 +7,31 @@ import ( "context" "fmt" "log" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_efs_backup_policy") -func ResourceBackupPolicy() *schema.Resource { +// @SDKResource("aws_efs_backup_policy", name="Backup Policy") +func resourceBackupPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBackupPolicyCreate, ReadWithoutTimeout: resourceBackupPolicyRead, UpdateWithoutTimeout: resourceBackupPolicyUpdate, DeleteWithoutTimeout: resourceBackupPolicyDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, @@ -41,15 +46,14 @@ func ResourceBackupPolicy() *schema.Resource { names.AttrStatus: { Type: schema.TypeString, Required: true, - ValidateFunc: validation.StringInSlice([]string{ - efs.StatusDisabled, - efs.StatusEnabled, - }, false), + ValidateFunc: validation.StringInSlice(enum.Slice( + awstypes.StatusDisabled, + awstypes.StatusEnabled, + ), false), }, }, }, }, - names.AttrFileSystemID: { Type: schema.TypeString, Required: true, @@ -61,12 +65,12 @@ func ResourceBackupPolicy() *schema.Resource { func resourceBackupPolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) fsID := d.Get(names.AttrFileSystemID).(string) - if err := backupPolicyPut(ctx, conn, fsID, d.Get("backup_policy").([]interface{})[0].(map[string]interface{})); err != nil { - return sdkdiag.AppendErrorf(diags, "creating EFS Backup Policy (%s): %s", fsID, err) + if err := putBackupPolicy(ctx, conn, fsID, d.Get("backup_policy").([]interface{})[0].(map[string]interface{})); err != nil { + return sdkdiag.AppendFromErr(diags, err) } d.SetId(fsID) @@ -76,9 +80,9 @@ func resourceBackupPolicyCreate(ctx context.Context, d *schema.ResourceData, met func resourceBackupPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - output, err := FindBackupPolicyByID(ctx, conn, d.Id()) + output, err := findBackupPolicyByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EFS Backup Policy (%s) not found, removing from state", d.Id()) @@ -93,7 +97,6 @@ func resourceBackupPolicyRead(ctx context.Context, d *schema.ResourceData, meta if err := d.Set("backup_policy", []interface{}{flattenBackupPolicy(output)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting backup_policy: %s", err) } - d.Set(names.AttrFileSystemID, d.Id()) return diags @@ -101,10 +104,10 @@ func resourceBackupPolicyRead(ctx context.Context, d *schema.ResourceData, meta func resourceBackupPolicyUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - if err := backupPolicyPut(ctx, conn, d.Id(), d.Get("backup_policy").([]interface{})[0].(map[string]interface{})); err != nil { - return sdkdiag.AppendErrorf(diags, "updating EFS Backup Policy (%s): %s", d.Id(), err) + if err := putBackupPolicy(ctx, conn, d.Id(), d.Get("backup_policy").([]interface{})[0].(map[string]interface{})); err != nil { + return sdkdiag.AppendFromErr(diags, err) } return append(diags, resourceBackupPolicyRead(ctx, d, meta)...) @@ -112,75 +115,151 @@ func resourceBackupPolicyUpdate(ctx context.Context, d *schema.ResourceData, met func resourceBackupPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - err := backupPolicyPut(ctx, conn, d.Id(), map[string]interface{}{ - names.AttrStatus: efs.StatusDisabled, + err := putBackupPolicy(ctx, conn, d.Id(), map[string]interface{}{ + names.AttrStatus: string(awstypes.StatusDisabled), }) - if tfawserr.ErrCodeEquals(err, efs.ErrCodeFileSystemNotFound) { + if errs.IsA[*awstypes.FileSystemNotFound](err) { return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting EFS Backup Policy (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } return diags } -// backupPolicyPut attempts to update the file system's backup policy. -// Any error is returned. -func backupPolicyPut(ctx context.Context, conn *efs.EFS, fsID string, tfMap map[string]interface{}) error { +func putBackupPolicy(ctx context.Context, conn *efs.Client, fsID string, tfMap map[string]interface{}) error { input := &efs.PutBackupPolicyInput{ BackupPolicy: expandBackupPolicy(tfMap), FileSystemId: aws.String(fsID), } - log.Printf("[DEBUG] Putting EFS Backup Policy: %s", input) - _, err := conn.PutBackupPolicyWithContext(ctx, input) + _, err := conn.PutBackupPolicy(ctx, input) if err != nil { return fmt.Errorf("putting EFS Backup Policy (%s): %w", fsID, err) } - if aws.StringValue(input.BackupPolicy.Status) == efs.StatusEnabled { + if input.BackupPolicy.Status == awstypes.StatusEnabled { if _, err := waitBackupPolicyEnabled(ctx, conn, fsID); err != nil { - return fmt.Errorf("waiting for EFS Backup Policy (%s) to enable: %w", fsID, err) + return fmt.Errorf("waiting for EFS Backup Policy (%s) enable: %w", fsID, err) } } else { if _, err := waitBackupPolicyDisabled(ctx, conn, fsID); err != nil { - return fmt.Errorf("waiting for EFS Backup Policy (%s) to disable: %w", fsID, err) + return fmt.Errorf("waiting for EFS Backup Policy (%s) disable: %w", fsID, err) } } return nil } -func expandBackupPolicy(tfMap map[string]interface{}) *efs.BackupPolicy { +func findBackupPolicyByID(ctx context.Context, conn *efs.Client, id string) (*awstypes.BackupPolicy, error) { + input := &efs.DescribeBackupPolicyInput{ + FileSystemId: aws.String(id), + } + + output, err := conn.DescribeBackupPolicy(ctx, input) + + if errs.IsA[*awstypes.FileSystemNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.BackupPolicy == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.BackupPolicy, nil +} + +func statusBackupPolicy(ctx context.Context, conn *efs.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findBackupPolicyByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitBackupPolicyEnabled(ctx context.Context, conn *efs.Client, id string) (*awstypes.BackupPolicy, error) { + const ( + backupPoltimeoutcyEnabledTimeout = 10 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.StatusEnabling), + Target: enum.Slice(awstypes.StatusEnabled), + Refresh: statusBackupPolicy(ctx, conn, id), + Timeout: backupPoltimeoutcyEnabledTimeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.BackupPolicy); ok { + return output, err + } + + return nil, err +} + +func waitBackupPolicyDisabled(ctx context.Context, conn *efs.Client, id string) (*awstypes.BackupPolicy, error) { + const ( + timeout = 10 * time.Minute + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.StatusDisabling), + Target: enum.Slice(awstypes.StatusDisabled), + Refresh: statusBackupPolicy(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.BackupPolicy); ok { + return output, err + } + + return nil, err +} + +func expandBackupPolicy(tfMap map[string]interface{}) *awstypes.BackupPolicy { if tfMap == nil { return nil } - apiObject := &efs.BackupPolicy{} + apiObject := &awstypes.BackupPolicy{} if v, ok := tfMap[names.AttrStatus].(string); ok && v != "" { - apiObject.Status = aws.String(v) + apiObject.Status = awstypes.Status(v) } return apiObject } -func flattenBackupPolicy(apiObject *efs.BackupPolicy) map[string]interface{} { +func flattenBackupPolicy(apiObject *awstypes.BackupPolicy) map[string]interface{} { if apiObject == nil { return nil } tfMap := map[string]interface{}{} - if v := apiObject.Status; v != nil { - tfMap[names.AttrStatus] = aws.StringValue(v) - } + tfMap[names.AttrStatus] = apiObject.Status return tfMap } diff --git a/internal/service/efs/backup_policy_test.go b/internal/service/efs/backup_policy_test.go index dcab413dc2e..ba45f955bf6 100644 --- a/internal/service/efs/backup_policy_test.go +++ b/internal/service/efs/backup_policy_test.go @@ -8,8 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +21,7 @@ import ( func TestAccEFSBackupPolicy_basic(t *testing.T) { ctx := acctest.Context(t) - var v efs.BackupPolicy + var v awstypes.BackupPolicy resourceName := "aws_efs_backup_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -51,7 +50,7 @@ func TestAccEFSBackupPolicy_basic(t *testing.T) { func TestAccEFSBackupPolicy_Disappears_fs(t *testing.T) { ctx := acctest.Context(t) - var v efs.BackupPolicy + var v awstypes.BackupPolicy resourceName := "aws_efs_backup_policy.test" fsResourceName := "aws_efs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -76,7 +75,7 @@ func TestAccEFSBackupPolicy_Disappears_fs(t *testing.T) { func TestAccEFSBackupPolicy_update(t *testing.T) { ctx := acctest.Context(t) - var v efs.BackupPolicy + var v awstypes.BackupPolicy resourceName := "aws_efs_backup_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -119,18 +118,14 @@ func TestAccEFSBackupPolicy_update(t *testing.T) { }) } -func testAccCheckBackupPolicyExists(ctx context.Context, name string, v *efs.BackupPolicy) resource.TestCheckFunc { +func testAccCheckBackupPolicyExists(ctx context.Context, n string, v *awstypes.BackupPolicy) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("not found: %s", name) + return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("no ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) output, err := tfefs.FindBackupPolicyByID(ctx, conn, rs.Primary.ID) @@ -146,7 +141,7 @@ func testAccCheckBackupPolicyExists(ctx context.Context, name string, v *efs.Bac func testAccCheckBackupPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_efs_backup_policy" { @@ -163,11 +158,11 @@ func testAccCheckBackupPolicyDestroy(ctx context.Context) resource.TestCheckFunc return err } - if aws.StringValue(output.Status) == efs.StatusDisabled { + if output.Status == awstypes.StatusDisabled { continue } - return fmt.Errorf("Transfer Server %s still exists", rs.Primary.ID) + return fmt.Errorf("EFS Backup Policy %s still exists", rs.Primary.ID) } return nil @@ -178,6 +173,10 @@ func testAccBackupPolicyConfig_basic(rName, status string) string { return fmt.Sprintf(` resource "aws_efs_file_system" "test" { creation_token = %[1]q + + tags = { + Name = %[1]q + } } resource "aws_efs_backup_policy" "test" { diff --git a/internal/service/efs/exports_test.go b/internal/service/efs/exports_test.go new file mode 100644 index 00000000000..212f9aa441e --- /dev/null +++ b/internal/service/efs/exports_test.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package efs + +// Exports for use in tests only. +var ( + ResourceAccessPoint = resourceAccessPoint + ResourceBackupPolicy = resourceBackupPolicy + ResourceFileSystem = resourceFileSystem + ResourceFileSystemPolicy = resourceFileSystemPolicy + ResourceMountTarget = resourceMountTarget + ResourceReplicationConfiguration = resourceReplicationConfiguration + + FindAccessPointByID = findAccessPointByID + FindBackupPolicyByID = findBackupPolicyByID + FindFileSystemByID = findFileSystemByID + FindFileSystemPolicyByID = findFileSystemPolicyByID + FindMountTargetByID = findMountTargetByID + FindReplicationConfigurationByID = findReplicationConfigurationByID +) diff --git a/internal/service/efs/file_system.go b/internal/service/efs/file_system.go index 303c44f0a87..5824e28a33a 100644 --- a/internal/service/efs/file_system.go +++ b/internal/service/efs/file_system.go @@ -9,15 +9,17 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -28,7 +30,7 @@ import ( // @SDKResource("aws_efs_file_system", name="File System") // @Tags(identifierAttribute="id") -func ResourceFileSystem() *schema.Resource { +func resourceFileSystem() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceFileSystemCreate, ReadWithoutTimeout: resourceFileSystemRead, @@ -88,19 +90,19 @@ func ResourceFileSystem() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "transition_to_archive": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(efs.TransitionToArchiveRules_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.TransitionToArchiveRules](), }, "transition_to_ia": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(efs.TransitionToIARules_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.TransitionToIARules](), }, "transition_to_primary_storage_class": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(efs.TransitionToPrimaryStorageClassRules_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.TransitionToPrimaryStorageClassRules](), }, }, }, @@ -118,11 +120,11 @@ func ResourceFileSystem() *schema.Resource { Computed: true, }, "performance_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(efs.PerformanceMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.PerformanceMode](), }, "protection": { Type: schema.TypeList, @@ -135,10 +137,10 @@ func ResourceFileSystem() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.StringInSlice([]string{ - efs.ReplicationOverwriteProtectionEnabled, - efs.ReplicationOverwriteProtectionDisabled, - }, false), + ValidateFunc: validation.StringInSlice(enum.Slice( + awstypes.ReplicationOverwriteProtectionEnabled, + awstypes.ReplicationOverwriteProtectionDisabled, + ), false), }, }, }, @@ -170,10 +172,10 @@ func ResourceFileSystem() *schema.Resource { names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), "throughput_mode": { - Type: schema.TypeString, - Optional: true, - Default: efs.ThroughputModeBursting, - ValidateFunc: validation.StringInSlice(efs.ThroughputMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.ThroughputModeBursting, + ValidateDiagFunc: enum.Validate[awstypes.ThroughputMode](), }, }, } @@ -181,8 +183,7 @@ func ResourceFileSystem() *schema.Resource { func resourceFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) var creationToken string if v, ok := d.GetOk("creation_token"); ok { @@ -190,11 +191,11 @@ func resourceFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta } else { creationToken = id.UniqueId() } - throughputMode := d.Get("throughput_mode").(string) + throughputMode := awstypes.ThroughputMode(d.Get("throughput_mode").(string)) input := &efs.CreateFileSystemInput{ CreationToken: aws.String(creationToken), Tags: getTagsIn(ctx), - ThroughputMode: aws.String(throughputMode), + ThroughputMode: throughputMode, } if v, ok := d.GetOk("availability_zone_name"); ok { @@ -202,60 +203,59 @@ func resourceFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("performance_mode"); ok { - input.PerformanceMode = aws.String(v.(string)) + input.PerformanceMode = awstypes.PerformanceMode(v.(string)) } - if throughputMode == efs.ThroughputModeProvisioned { + if throughputMode == awstypes.ThroughputModeProvisioned { input.ProvisionedThroughputInMibps = aws.Float64(d.Get("provisioned_throughput_in_mibps").(float64)) } encrypted, hasEncrypted := d.GetOk(names.AttrEncrypted) - kmsKeyId, hasKmsKeyId := d.GetOk(names.AttrKMSKeyID) - if hasEncrypted { input.Encrypted = aws.Bool(encrypted.(bool)) } - if hasKmsKeyId { - input.KmsKeyId = aws.String(kmsKeyId.(string)) + kmsKeyID, hasKmsKeyID := d.GetOk(names.AttrKMSKeyID) + if hasKmsKeyID { + input.KmsKeyId = aws.String(kmsKeyID.(string)) } - if encrypted == false && hasKmsKeyId { + if encrypted == false && hasKmsKeyID { return sdkdiag.AppendFromErr(diags, errors.New("encrypted must be set to true when kms_key_id is specified")) } - output, err := conn.CreateFileSystemWithContext(ctx, input) + output, err := conn.CreateFileSystem(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating EFS file system: %s", err) + return sdkdiag.AppendErrorf(diags, "creating EFS File System: %s", err) } - d.SetId(aws.StringValue(output.FileSystemId)) + d.SetId(aws.ToString(output.FileSystemId)) if _, err := waitFileSystemAvailable(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EFS file system (%s) create: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for EFS File System (%s) create: %s", d.Id(), err) } if v, ok := d.GetOk("lifecycle_policy"); ok { input := &efs.PutLifecycleConfigurationInput{ FileSystemId: aws.String(d.Id()), - LifecyclePolicies: expandFileSystemLifecyclePolicies(v.([]interface{})), + LifecyclePolicies: expandLifecyclePolicies(v.([]interface{})), } - _, err := conn.PutLifecycleConfigurationWithContext(ctx, input) + _, err := conn.PutLifecycleConfiguration(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "putting EFS file system (%s) lifecycle configuration: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "putting EFS File System (%s) lifecycle configuration: %s", d.Id(), err) } } if v, ok := d.GetOk("protection"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input := expandUpdateFileSystemProtectionInput(d.Id(), v.([]interface{})[0].(map[string]interface{})) - _, err := conn.UpdateFileSystemProtectionWithContext(ctx, input) + _, err := conn.UpdateFileSystemProtection(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EFS file system (%s) protection: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating EFS File System (%s) protection: %s", d.Id(), err) } } @@ -264,19 +264,18 @@ func resourceFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta func resourceFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).EFSClient(ctx) - conn := meta.(*conns.AWSClient).EFSConn(ctx) - - fs, err := FindFileSystemByID(ctx, conn, d.Id()) + fs, err := findFileSystemByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] EFS file system (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] EFS File System (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EFS file system (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading EFS File System (%s): %s", d.Id(), err) } d.Set(names.AttrARN, fs.FileSystemArn) @@ -290,26 +289,26 @@ func resourceFileSystemRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("number_of_mount_targets", fs.NumberOfMountTargets) d.Set(names.AttrOwnerID, fs.OwnerId) d.Set("performance_mode", fs.PerformanceMode) - if err := d.Set("protection", flattenFileSystemProtection(fs.FileSystemProtection)); err != nil { + if err := d.Set("protection", flattenFileSystemProtectionDescription(fs.FileSystemProtection)); err != nil { return sdkdiag.AppendErrorf(diags, "setting protection: %s", err) } d.Set("provisioned_throughput_in_mibps", fs.ProvisionedThroughputInMibps) - if err := d.Set("size_in_bytes", flattenFileSystemSizeInBytes(fs.SizeInBytes)); err != nil { + if err := d.Set("size_in_bytes", flattenFileSystemSize(fs.SizeInBytes)); err != nil { return sdkdiag.AppendErrorf(diags, "setting size_in_bytes: %s", err) } d.Set("throughput_mode", fs.ThroughputMode) setTagsOut(ctx, fs.Tags) - output, err := conn.DescribeLifecycleConfigurationWithContext(ctx, &efs.DescribeLifecycleConfigurationInput{ + output, err := conn.DescribeLifecycleConfiguration(ctx, &efs.DescribeLifecycleConfigurationInput{ FileSystemId: aws.String(d.Id()), }) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading EFS file system (%s) lifecycle configuration: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading EFS File System (%s) lifecycle configuration: %s", d.Id(), err) } - if err := d.Set("lifecycle_policy", flattenFileSystemLifecyclePolicies(output.LifecyclePolicies)); err != nil { + if err := d.Set("lifecycle_policy", flattenLifecyclePolicies(output.LifecyclePolicies)); err != nil { return sdkdiag.AppendErrorf(diags, "setting lifecycle_policy: %s", err) } @@ -318,48 +317,47 @@ func resourceFileSystemRead(ctx context.Context, d *schema.ResourceData, meta in func resourceFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) if d.HasChanges("provisioned_throughput_in_mibps", "throughput_mode") { - throughputMode := d.Get("throughput_mode").(string) + throughputMode := awstypes.ThroughputMode(d.Get("throughput_mode").(string)) input := &efs.UpdateFileSystemInput{ FileSystemId: aws.String(d.Id()), - ThroughputMode: aws.String(throughputMode), + ThroughputMode: throughputMode, } - if throughputMode == efs.ThroughputModeProvisioned { + if throughputMode == awstypes.ThroughputModeProvisioned { input.ProvisionedThroughputInMibps = aws.Float64(d.Get("provisioned_throughput_in_mibps").(float64)) } - _, err := conn.UpdateFileSystemWithContext(ctx, input) + _, err := conn.UpdateFileSystem(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EFS file system (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating EFS File System (%s): %s", d.Id(), err) } if _, err := waitFileSystemAvailable(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EFS file system (%s) update: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for EFS File System (%s) update: %s", d.Id(), err) } } if d.HasChange("lifecycle_policy") { input := &efs.PutLifecycleConfigurationInput{ FileSystemId: aws.String(d.Id()), - LifecyclePolicies: expandFileSystemLifecyclePolicies(d.Get("lifecycle_policy").([]interface{})), + LifecyclePolicies: expandLifecyclePolicies(d.Get("lifecycle_policy").([]interface{})), } // Prevent the following error during removal: // InvalidParameter: 1 validation error(s) found. // - missing required field, PutLifecycleConfigurationInput.LifecyclePolicies. if input.LifecyclePolicies == nil { - input.LifecyclePolicies = []*efs.LifecyclePolicy{} + input.LifecyclePolicies = []awstypes.LifecyclePolicy{} } - _, err := conn.PutLifecycleConfigurationWithContext(ctx, input) + _, err := conn.PutLifecycleConfiguration(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "putting EFS file system (%s) lifecycle configuration: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "putting EFS File System (%s) lifecycle configuration: %s", d.Id(), err) } } @@ -367,10 +365,10 @@ func resourceFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta if v, ok := d.GetOk("protection"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input := expandUpdateFileSystemProtectionInput(d.Id(), v.([]interface{})[0].(map[string]interface{})) - _, err := conn.UpdateFileSystemProtectionWithContext(ctx, input) + _, err := conn.UpdateFileSystemProtection(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating EFS file system (%s) protection: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating EFS File System (%s) protection: %s", d.Id(), err) } } } @@ -380,81 +378,90 @@ func resourceFileSystemUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceFileSystemDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).EFSClient(ctx) - conn := meta.(*conns.AWSClient).EFSConn(ctx) - - log.Printf("[DEBUG] Deleting EFS file system: %s", d.Id()) - _, err := conn.DeleteFileSystemWithContext(ctx, &efs.DeleteFileSystemInput{ + log.Printf("[DEBUG] Deleting EFS File System: %s", d.Id()) + _, err := conn.DeleteFileSystem(ctx, &efs.DeleteFileSystemInput{ FileSystemId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, efs.ErrCodeFileSystemNotFound) { + if errs.IsA[*awstypes.FileSystemNotFound](err) { return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting EFS file system (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting EFS File System (%s): %s", d.Id(), err) } if _, err := waitFileSystemDeleted(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for EFS file system (%s) delete: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for EFS File System (%s) delete: %s", d.Id(), err) } return diags } -func findFileSystem(ctx context.Context, conn *efs.EFS, input *efs.DescribeFileSystemsInput, filter tfslices.Predicate[*efs.FileSystemDescription]) (*efs.FileSystemDescription, error) { +func findFileSystem(ctx context.Context, conn *efs.Client, input *efs.DescribeFileSystemsInput, filter tfslices.Predicate[*awstypes.FileSystemDescription]) (*awstypes.FileSystemDescription, error) { output, err := findFileSystems(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findFileSystems(ctx context.Context, conn *efs.EFS, input *efs.DescribeFileSystemsInput, filter tfslices.Predicate[*efs.FileSystemDescription]) ([]*efs.FileSystemDescription, error) { - var output []*efs.FileSystemDescription +func findFileSystems(ctx context.Context, conn *efs.Client, input *efs.DescribeFileSystemsInput, filter tfslices.Predicate[*awstypes.FileSystemDescription]) ([]awstypes.FileSystemDescription, error) { + var output []awstypes.FileSystemDescription + + pages := efs.NewDescribeFileSystemsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - err := conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *efs.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + if errs.IsA[*awstypes.FileSystemNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err } for _, v := range page.FileSystems { - if v != nil && filter(v) { + if filter(&v) { output = append(output, v) } } + } - return !lastPage - }) + return output, nil +} - if tfawserr.ErrCodeEquals(err, efs.ErrCodeFileSystemNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } +func findFileSystemByID(ctx context.Context, conn *efs.Client, id string) (*awstypes.FileSystemDescription, error) { + input := &efs.DescribeFileSystemsInput{ + FileSystemId: aws.String(id), } + output, err := findFileSystem(ctx, conn, input, tfslices.PredicateTrue[*awstypes.FileSystemDescription]()) + if err != nil { return nil, err } - return output, nil -} - -func FindFileSystemByID(ctx context.Context, conn *efs.EFS, id string) (*efs.FileSystemDescription, error) { - input := &efs.DescribeFileSystemsInput{ - FileSystemId: aws.String(id), + if state := output.LifeCycleState; state == awstypes.LifeCycleStateDeleted { + return nil, &retry.NotFoundError{ + Message: string(state), + LastRequest: input, + } } - return findFileSystem(ctx, conn, input, tfslices.PredicateTrue[*efs.FileSystemDescription]()) + return output, nil } -func statusFileSystemLifeCycleState(ctx context.Context, conn *efs.EFS, id string) retry.StateRefreshFunc { +func statusFileSystemLifeCycleState(ctx context.Context, conn *efs.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindFileSystemByID(ctx, conn, id) + output, err := findFileSystemByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -464,78 +471,63 @@ func statusFileSystemLifeCycleState(ctx context.Context, conn *efs.EFS, id strin return nil, "", err } - return output, aws.StringValue(output.LifeCycleState), nil + return output, string(output.LifeCycleState), nil } } -const ( - fileSystemAvailableTimeout = 10 * time.Minute - fileSystemAvailableDelayTimeout = 2 * time.Second - fileSystemAvailableMinTimeout = 3 * time.Second - fileSystemDeletedTimeout = 10 * time.Minute - fileSystemDeletedDelayTimeout = 2 * time.Second - fileSystemDeletedMinTimeout = 3 * time.Second -) - -func waitFileSystemAvailable(ctx context.Context, conn *efs.EFS, fileSystemID string) (*efs.FileSystemDescription, error) { //nolint:unparam +func waitFileSystemAvailable(ctx context.Context, conn *efs.Client, fileSystemID string) (*awstypes.FileSystemDescription, error) { //nolint:unparam + const ( + timeout = 10 * time.Minute + ) stateConf := &retry.StateChangeConf{ - Pending: []string{efs.LifeCycleStateCreating, efs.LifeCycleStateUpdating}, - Target: []string{efs.LifeCycleStateAvailable}, + Pending: enum.Slice(awstypes.LifeCycleStateCreating, awstypes.LifeCycleStateUpdating), + Target: enum.Slice(awstypes.LifeCycleStateAvailable), Refresh: statusFileSystemLifeCycleState(ctx, conn, fileSystemID), - Timeout: fileSystemAvailableTimeout, - Delay: fileSystemAvailableDelayTimeout, - MinTimeout: fileSystemAvailableMinTimeout, + Timeout: timeout, + Delay: 2 * time.Second, + MinTimeout: 3 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*efs.FileSystemDescription); ok { + if output, ok := outputRaw.(*awstypes.FileSystemDescription); ok { return output, err } return nil, err } -func waitFileSystemDeleted(ctx context.Context, conn *efs.EFS, fileSystemID string) (*efs.FileSystemDescription, error) { +func waitFileSystemDeleted(ctx context.Context, conn *efs.Client, fileSystemID string) (*awstypes.FileSystemDescription, error) { + const ( + timeout = 10 * time.Minute + ) stateConf := &retry.StateChangeConf{ - Pending: []string{efs.LifeCycleStateAvailable, efs.LifeCycleStateDeleting}, + Pending: enum.Slice(awstypes.LifeCycleStateAvailable, awstypes.LifeCycleStateDeleting), Target: []string{}, Refresh: statusFileSystemLifeCycleState(ctx, conn, fileSystemID), - Timeout: fileSystemDeletedTimeout, - Delay: fileSystemDeletedDelayTimeout, - MinTimeout: fileSystemDeletedMinTimeout, + Timeout: timeout, + Delay: 2 * time.Second, + MinTimeout: 3 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*efs.FileSystemDescription); ok { + if output, ok := outputRaw.(*awstypes.FileSystemDescription); ok { return output, err } return nil, err } -func flattenFileSystemLifecyclePolicies(apiObjects []*efs.LifecyclePolicy) []interface{} { +func flattenLifecyclePolicies(apiObjects []awstypes.LifecyclePolicy) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfMap := make(map[string]interface{}) - if apiObject.TransitionToArchive != nil { - tfMap["transition_to_archive"] = aws.StringValue(apiObject.TransitionToArchive) - } - - if apiObject.TransitionToIA != nil { - tfMap["transition_to_ia"] = aws.StringValue(apiObject.TransitionToIA) - } - - if apiObject.TransitionToPrimaryStorageClass != nil { - tfMap["transition_to_primary_storage_class"] = aws.StringValue(apiObject.TransitionToPrimaryStorageClass) - } + tfMap["transition_to_archive"] = apiObject.TransitionToArchive + tfMap["transition_to_ia"] = apiObject.TransitionToIA + tfMap["transition_to_primary_storage_class"] = apiObject.TransitionToPrimaryStorageClass tfList = append(tfList, tfMap) } @@ -543,28 +535,27 @@ func flattenFileSystemLifecyclePolicies(apiObjects []*efs.LifecyclePolicy) []int return tfList } -func expandFileSystemLifecyclePolicies(tfList []interface{}) []*efs.LifecyclePolicy { - var apiObjects []*efs.LifecyclePolicy +func expandLifecyclePolicies(tfList []interface{}) []awstypes.LifecyclePolicy { + var apiObjects []awstypes.LifecyclePolicy for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { continue } - apiObject := &efs.LifecyclePolicy{} + apiObject := awstypes.LifecyclePolicy{} if v, ok := tfMap["transition_to_archive"].(string); ok && v != "" { - apiObject.TransitionToArchive = aws.String(v) + apiObject.TransitionToArchive = awstypes.TransitionToArchiveRules(v) } if v, ok := tfMap["transition_to_ia"].(string); ok && v != "" { - apiObject.TransitionToIA = aws.String(v) + apiObject.TransitionToIA = awstypes.TransitionToIARules(v) } if v, ok := tfMap["transition_to_primary_storage_class"].(string); ok && v != "" { - apiObject.TransitionToPrimaryStorageClass = aws.String(v) + apiObject.TransitionToPrimaryStorageClass = awstypes.TransitionToPrimaryStorageClassRules(v) } apiObjects = append(apiObjects, apiObject) @@ -573,52 +564,50 @@ func expandFileSystemLifecyclePolicies(tfList []interface{}) []*efs.LifecyclePol return apiObjects } -func flattenFileSystemSizeInBytes(sizeInBytes *efs.FileSystemSize) []interface{} { - if sizeInBytes == nil { +func flattenFileSystemSize(apiObject *awstypes.FileSystemSize) []interface{} { + if apiObject == nil { return []interface{}{} } m := map[string]interface{}{ - names.AttrValue: aws.Int64Value(sizeInBytes.Value), + names.AttrValue: apiObject.Value, } - if sizeInBytes.ValueInIA != nil { - m["value_in_ia"] = aws.Int64Value(sizeInBytes.ValueInIA) + if apiObject.ValueInIA != nil { + m["value_in_ia"] = aws.ToInt64(apiObject.ValueInIA) } - if sizeInBytes.ValueInStandard != nil { - m["value_in_standard"] = aws.Int64Value(sizeInBytes.ValueInStandard) + if apiObject.ValueInStandard != nil { + m["value_in_standard"] = aws.ToInt64(apiObject.ValueInStandard) } return []interface{}{m} } -func expandUpdateFileSystemProtectionInput(id string, tfMap map[string]interface{}) *efs.UpdateFileSystemProtectionInput { +func expandUpdateFileSystemProtectionInput(fileSystemID string, tfMap map[string]interface{}) *efs.UpdateFileSystemProtectionInput { if tfMap == nil { return nil } apiObject := &efs.UpdateFileSystemProtectionInput{ - FileSystemId: aws.String(id), + FileSystemId: aws.String(fileSystemID), } if v, ok := tfMap["replication_overwrite"].(string); ok && v != "" { - apiObject.ReplicationOverwriteProtection = aws.String(v) + apiObject.ReplicationOverwriteProtection = awstypes.ReplicationOverwriteProtection(v) } return apiObject } -func flattenFileSystemProtection(protection *efs.FileSystemProtectionDescription) []interface{} { - if protection == nil { +func flattenFileSystemProtectionDescription(apiObject *awstypes.FileSystemProtectionDescription) []interface{} { + if apiObject == nil { return []interface{}{} } tfMap := map[string]interface{}{} - if protection.ReplicationOverwriteProtection != nil { - tfMap["replication_overwrite"] = aws.StringValue(protection.ReplicationOverwriteProtection) - } + tfMap["replication_overwrite"] = apiObject.ReplicationOverwriteProtection return []interface{}{tfMap} } diff --git a/internal/service/efs/file_system_data_source.go b/internal/service/efs/file_system_data_source.go index 26ab6c1677f..f045aa36a4b 100644 --- a/internal/service/efs/file_system_data_source.go +++ b/internal/service/efs/file_system_data_source.go @@ -6,8 +6,9 @@ package efs import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -19,8 +20,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_efs_file_system") -func DataSourceFileSystem() *schema.Resource { +// @SDKDataSource("aws_efs_file_system", name="File System") +// @Tags +func dataSourceFileSystem() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceFileSystemRead, @@ -119,7 +121,7 @@ func DataSourceFileSystem() *schema.Resource { func dataSourceFileSystemRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig input := &efs.DescribeFileSystemsInput{} @@ -132,10 +134,10 @@ func dataSourceFileSystemRead(ctx context.Context, d *schema.ResourceData, meta input.FileSystemId = aws.String(v.(string)) } - filter := tfslices.PredicateTrue[*efs.FileSystemDescription]() + filter := tfslices.PredicateTrue[*awstypes.FileSystemDescription]() if tagsToMatch := tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{})).IgnoreAWS().IgnoreConfig(ignoreTagsConfig); len(tagsToMatch) > 0 { - filter = func(v *efs.FileSystemDescription) bool { + filter = func(v *awstypes.FileSystemDescription) bool { return KeyValueTags(ctx, v.Tags).ContainsAll(tagsToMatch) } } @@ -143,10 +145,10 @@ func dataSourceFileSystemRead(ctx context.Context, d *schema.ResourceData, meta fs, err := findFileSystem(ctx, conn, input, filter) if err != nil { - return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EFS file system", err)) + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("EFS File System", err)) } - fsID := aws.StringValue(fs.FileSystemId) + fsID := aws.ToString(fs.FileSystemId) d.SetId(fsID) d.Set(names.AttrARN, fs.FileSystemArn) d.Set("availability_zone_id", fs.AvailabilityZoneId) @@ -158,7 +160,7 @@ func dataSourceFileSystemRead(ctx context.Context, d *schema.ResourceData, meta d.Set(names.AttrKMSKeyID, fs.KmsKeyId) d.Set(names.AttrName, fs.Name) d.Set("performance_mode", fs.PerformanceMode) - if err := d.Set("protection", flattenFileSystemProtection(fs.FileSystemProtection)); err != nil { + if err := d.Set("protection", flattenFileSystemProtectionDescription(fs.FileSystemProtection)); err != nil { return sdkdiag.AppendErrorf(diags, "setting protection: %s", err) } d.Set("provisioned_throughput_in_mibps", fs.ProvisionedThroughputInMibps) @@ -167,19 +169,17 @@ func dataSourceFileSystemRead(ctx context.Context, d *schema.ResourceData, meta } d.Set("throughput_mode", fs.ThroughputMode) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, fs.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOut(ctx, fs.Tags) - res, err := conn.DescribeLifecycleConfigurationWithContext(ctx, &efs.DescribeLifecycleConfigurationInput{ - FileSystemId: fs.FileSystemId, + output, err := conn.DescribeLifecycleConfiguration(ctx, &efs.DescribeLifecycleConfigurationInput{ + FileSystemId: aws.String(d.Id()), }) + if err != nil { - return sdkdiag.AppendErrorf(diags, "describing lifecycle configuration for EFS file system (%s): %s", - aws.StringValue(fs.FileSystemId), err) + return sdkdiag.AppendErrorf(diags, "reading EFS File System (%s) lifecycle configuration: %s", d.Id(), err) } - if err := d.Set("lifecycle_policy", flattenFileSystemLifecyclePolicies(res.LifecyclePolicies)); err != nil { + if err := d.Set("lifecycle_policy", flattenLifecyclePolicies(output.LifecyclePolicies)); err != nil { return sdkdiag.AppendErrorf(diags, "setting lifecycle_policy: %s", err) } diff --git a/internal/service/efs/file_system_data_source_test.go b/internal/service/efs/file_system_data_source_test.go index 15656aabebd..820c40f5ba8 100644 --- a/internal/service/efs/file_system_data_source_test.go +++ b/internal/service/efs/file_system_data_source_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -133,7 +133,7 @@ func TestAccEFSFileSystemDataSource_availabilityZone(t *testing.T) { func TestAccEFSFileSystemDataSource_nonExistent_tags(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -150,7 +150,7 @@ func TestAccEFSFileSystemDataSource_nonExistent_tags(t *testing.T) { }, { Config: testAccFileSystemDataSourceConfig_tagsNonExistent(rName), - ExpectError: regexache.MustCompile(`no matching EFS file system found`), + ExpectError: regexache.MustCompile(`no matching EFS File System found`), }, }, }) @@ -213,16 +213,7 @@ data "aws_efs_file_system" "test" { } ` -const testAccFileSystemDataSourceConfig_availabilityZone = ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - +var testAccFileSystemDataSourceConfig_availabilityZone = acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), ` resource "aws_efs_file_system" "test" { availability_zone_name = data.aws_availability_zones.available.names[0] } @@ -230,4 +221,4 @@ resource "aws_efs_file_system" "test" { data "aws_efs_file_system" "test" { file_system_id = aws_efs_file_system.test.id } -` +`) diff --git a/internal/service/efs/file_system_policy.go b/internal/service/efs/file_system_policy.go index 8f19ab94c41..2d45dd23ba2 100644 --- a/internal/service/efs/file_system_policy.go +++ b/internal/service/efs/file_system_policy.go @@ -7,22 +7,24 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_efs_file_system_policy") -func ResourceFileSystemPolicy() *schema.Resource { +// @SDKResource("aws_efs_file_system_policy", name="File System Policy") +func resourceFileSystemPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceFileSystemPolicyPut, ReadWithoutTimeout: resourceFileSystemPolicyRead, @@ -61,7 +63,7 @@ func ResourceFileSystemPolicy() *schema.Resource { func resourceFileSystemPolicyPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) policy, err := structure.NormalizeJsonString(d.Get(names.AttrPolicy).(string)) if err != nil { @@ -70,14 +72,14 @@ func resourceFileSystemPolicyPut(ctx context.Context, d *schema.ResourceData, me fsID := d.Get(names.AttrFileSystemID).(string) input := &efs.PutFileSystemPolicyInput{ - BypassPolicyLockoutSafetyCheck: aws.Bool(d.Get("bypass_policy_lockout_safety_check").(bool)), + BypassPolicyLockoutSafetyCheck: d.Get("bypass_policy_lockout_safety_check").(bool), FileSystemId: aws.String(fsID), Policy: aws.String(policy), } - _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, func() (interface{}, error) { - return conn.PutFileSystemPolicyWithContext(ctx, input) - }, efs.ErrCodeInvalidPolicyException, "Policy contains invalid Principal block") + _, err = tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidPolicyException](ctx, propagationTimeout, func() (interface{}, error) { + return conn.PutFileSystemPolicy(ctx, input) + }, "Policy contains invalid Principal block") if err != nil { return sdkdiag.AppendErrorf(diags, "putting EFS File System Policy (%s): %s", fsID, err) @@ -92,9 +94,9 @@ func resourceFileSystemPolicyPut(ctx context.Context, d *schema.ResourceData, me func resourceFileSystemPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - output, err := FindFileSystemPolicyByID(ctx, conn, d.Id()) + output, err := findFileSystemPolicyByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EFS File System Policy (%s) not found, removing from state", d.Id()) @@ -108,7 +110,7 @@ func resourceFileSystemPolicyRead(ctx context.Context, d *schema.ResourceData, m d.Set(names.AttrFileSystemID, output.FileSystemId) - policyToSet, err := verify.SecondJSONUnlessEquivalent(d.Get(names.AttrPolicy).(string), aws.StringValue(output.Policy)) + policyToSet, err := verify.SecondJSONUnlessEquivalent(d.Get(names.AttrPolicy).(string), aws.ToString(output.Policy)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -125,14 +127,14 @@ func resourceFileSystemPolicyRead(ctx context.Context, d *schema.ResourceData, m func resourceFileSystemPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) log.Printf("[DEBUG] Deleting EFS File System Policy: %s", d.Id()) - _, err := conn.DeleteFileSystemPolicyWithContext(ctx, &efs.DeleteFileSystemPolicyInput{ + _, err := conn.DeleteFileSystemPolicy(ctx, &efs.DeleteFileSystemPolicyInput{ FileSystemId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, efs.ErrCodeFileSystemNotFound) { + if errs.IsA[*awstypes.FileSystemNotFound](err) { return diags } @@ -142,3 +144,28 @@ func resourceFileSystemPolicyDelete(ctx context.Context, d *schema.ResourceData, return diags } + +func findFileSystemPolicyByID(ctx context.Context, conn *efs.Client, id string) (*efs.DescribeFileSystemPolicyOutput, error) { + input := &efs.DescribeFileSystemPolicyInput{ + FileSystemId: aws.String(id), + } + + output, err := conn.DescribeFileSystemPolicy(ctx, input) + + if errs.IsA[*awstypes.FileSystemNotFound](err) || errs.IsA[*awstypes.PolicyNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} diff --git a/internal/service/efs/file_system_policy_test.go b/internal/service/efs/file_system_policy_test.go index d45621daeb6..16390fdff97 100644 --- a/internal/service/efs/file_system_policy_test.go +++ b/internal/service/efs/file_system_policy_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go-v2/service/efs" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -173,7 +173,7 @@ func TestAccEFSFileSystemPolicy_equivalentPoliciesIAMPolicyDoc(t *testing.T) { func testAccCheckFileSystemPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_efs_file_system_policy" { @@ -204,11 +204,7 @@ func testAccCheckFileSystemPolicyExists(ctx context.Context, n string, v *efs.De return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No EFS File System Policy ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) output, err := tfefs.FindFileSystemPolicyByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/efs/file_system_test.go b/internal/service/efs/file_system_test.go index 6fca0ea414b..3496ef6eb6d 100644 --- a/internal/service/efs/file_system_test.go +++ b/internal/service/efs/file_system_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccEFSFileSystem_basic(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" resource.ParallelTest(t, resource.TestCase{ @@ -51,7 +51,7 @@ func TestAccEFSFileSystem_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "size_in_bytes.0.value_in_ia"), resource.TestCheckResourceAttrSet(resourceName, "size_in_bytes.0.value_in_standard"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "throughput_mode", efs.ThroughputModeBursting), + resource.TestCheckResourceAttr(resourceName, "throughput_mode", string(awstypes.ThroughputModeBursting)), ), }, { @@ -65,7 +65,7 @@ func TestAccEFSFileSystem_basic(t *testing.T) { func TestAccEFSFileSystem_disappears(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" resource.ParallelTest(t, resource.TestCase{ @@ -88,7 +88,7 @@ func TestAccEFSFileSystem_disappears(t *testing.T) { func TestAccEFSFileSystem_performanceMode(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" resource.ParallelTest(t, resource.TestCase{ @@ -115,7 +115,7 @@ func TestAccEFSFileSystem_performanceMode(t *testing.T) { func TestAccEFSFileSystem_protection(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" resource.ParallelTest(t, resource.TestCase{ @@ -150,7 +150,7 @@ func TestAccEFSFileSystem_protection(t *testing.T) { func TestAccEFSFileSystem_availabilityZoneName(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" rName := sdkacctest.RandomWithPrefix("tf-acc") @@ -179,7 +179,7 @@ func TestAccEFSFileSystem_availabilityZoneName(t *testing.T) { func TestAccEFSFileSystem_tags(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_file_system.test" @@ -242,7 +242,7 @@ func TestAccEFSFileSystem_tags(t *testing.T) { func TestAccEFSFileSystem_kmsKey(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) kmsKeyResourceName := "aws_kms_key.test" resourceName := "aws_efs_file_system.test" @@ -293,7 +293,7 @@ func TestAccEFSFileSystem_kmsWithoutEncryption(t *testing.T) { func TestAccEFSFileSystem_provisionedThroughputInMibps(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" resource.ParallelTest(t, resource.TestCase{ @@ -307,7 +307,7 @@ func TestAccEFSFileSystem_provisionedThroughputInMibps(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "provisioned_throughput_in_mibps", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "throughput_mode", efs.ThroughputModeProvisioned), + resource.TestCheckResourceAttr(resourceName, "throughput_mode", string(awstypes.ThroughputModeProvisioned)), ), }, { @@ -315,7 +315,7 @@ func TestAccEFSFileSystem_provisionedThroughputInMibps(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "provisioned_throughput_in_mibps", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "throughput_mode", efs.ThroughputModeProvisioned), + resource.TestCheckResourceAttr(resourceName, "throughput_mode", string(awstypes.ThroughputModeProvisioned)), ), }, { @@ -329,7 +329,7 @@ func TestAccEFSFileSystem_provisionedThroughputInMibps(t *testing.T) { func TestAccEFSFileSystem_throughputMode(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" resource.ParallelTest(t, resource.TestCase{ @@ -343,15 +343,15 @@ func TestAccEFSFileSystem_throughputMode(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "provisioned_throughput_in_mibps", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "throughput_mode", efs.ThroughputModeProvisioned), + resource.TestCheckResourceAttr(resourceName, "throughput_mode", string(awstypes.ThroughputModeProvisioned)), ), }, { - Config: testAccFileSystemConfig_throughputMode(efs.ThroughputModeBursting), + Config: testAccFileSystemConfig_throughputMode(string(awstypes.ThroughputModeBursting)), Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "provisioned_throughput_in_mibps", acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "throughput_mode", efs.ThroughputModeBursting), + resource.TestCheckResourceAttr(resourceName, "throughput_mode", string(awstypes.ThroughputModeBursting)), ), }, { @@ -365,7 +365,7 @@ func TestAccEFSFileSystem_throughputMode(t *testing.T) { func TestAccEFSFileSystem_lifecyclePolicy(t *testing.T) { ctx := acctest.Context(t) - var desc efs.FileSystemDescription + var desc awstypes.FileSystemDescription resourceName := "aws_efs_file_system.test" resource.ParallelTest(t, resource.TestCase{ @@ -384,13 +384,13 @@ func TestAccEFSFileSystem_lifecyclePolicy(t *testing.T) { { Config: testAccFileSystemConfig_lifecyclePolicy( "transition_to_ia", - efs.TransitionToIARulesAfter30Days, + string(awstypes.TransitionToIARulesAfter30Days), ), Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_archive", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_ia", efs.TransitionToIARulesAfter30Days), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_ia", string(awstypes.TransitionToIARulesAfter30Days)), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_primary_storage_class", ""), ), }, @@ -402,14 +402,14 @@ func TestAccEFSFileSystem_lifecyclePolicy(t *testing.T) { { Config: testAccFileSystemConfig_lifecyclePolicy( "transition_to_primary_storage_class", - efs.TransitionToPrimaryStorageClassRulesAfter1Access, + string(awstypes.TransitionToPrimaryStorageClassRulesAfter1Access), ), Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_archive", ""), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_ia", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_primary_storage_class", efs.TransitionToPrimaryStorageClassRulesAfter1Access), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_primary_storage_class", string(awstypes.TransitionToPrimaryStorageClassRulesAfter1Access)), ), }, { @@ -422,40 +422,40 @@ func TestAccEFSFileSystem_lifecyclePolicy(t *testing.T) { { Config: testAccFileSystemConfig_lifecyclePolicyMulti( "transition_to_primary_storage_class", - efs.TransitionToPrimaryStorageClassRulesAfter1Access, + string(awstypes.TransitionToPrimaryStorageClassRulesAfter1Access), "transition_to_ia", - efs.TransitionToIARulesAfter30Days, + string(awstypes.TransitionToIARulesAfter30Days), ), Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.#", acctest.Ct2), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_archive", ""), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_ia", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_primary_storage_class", efs.TransitionToPrimaryStorageClassRulesAfter1Access), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_primary_storage_class", string(awstypes.TransitionToPrimaryStorageClassRulesAfter1Access)), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_archive", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_ia", efs.TransitionToIARulesAfter30Days), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_ia", string(awstypes.TransitionToIARulesAfter30Days)), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_primary_storage_class", ""), ), }, { Config: testAccFileSystemConfig_lifecyclePolicyAll( "transition_to_primary_storage_class", - efs.TransitionToPrimaryStorageClassRulesAfter1Access, + string(awstypes.TransitionToPrimaryStorageClassRulesAfter1Access), "transition_to_ia", - efs.TransitionToIARulesAfter30Days, + string(awstypes.TransitionToIARulesAfter30Days), "transition_to_archive", - efs.TransitionToArchiveRulesAfter60Days, + string(awstypes.TransitionToArchiveRulesAfter60Days), ), Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.#", acctest.Ct3), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_archive", ""), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_ia", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_primary_storage_class", efs.TransitionToPrimaryStorageClassRulesAfter1Access), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_primary_storage_class", string(awstypes.TransitionToPrimaryStorageClassRulesAfter1Access)), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_archive", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_ia", efs.TransitionToIARulesAfter30Days), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_ia", string(awstypes.TransitionToIARulesAfter30Days)), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_primary_storage_class", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.2.transition_to_archive", efs.TransitionToArchiveRulesAfter60Days), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.2.transition_to_archive", string(awstypes.TransitionToArchiveRulesAfter60Days)), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.2.transition_to_ia", ""), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.2.transition_to_primary_storage_class", ""), ), @@ -463,17 +463,17 @@ func TestAccEFSFileSystem_lifecyclePolicy(t *testing.T) { { Config: testAccFileSystemConfig_lifecyclePolicyTransitionToArchive( "transition_to_ia", - efs.TransitionToIARulesAfter30Days, + string(awstypes.TransitionToIARulesAfter30Days), "transition_to_archive", - efs.TransitionToArchiveRulesAfter60Days, + string(awstypes.TransitionToArchiveRulesAfter60Days), ), Check: resource.ComposeTestCheckFunc( testAccCheckFileSystem(ctx, resourceName, &desc), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.#", acctest.Ct2), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_archive", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_ia", efs.TransitionToIARulesAfter30Days), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_ia", string(awstypes.TransitionToIARulesAfter30Days)), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.0.transition_to_primary_storage_class", ""), - resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_archive", efs.TransitionToArchiveRulesAfter60Days), + resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_archive", string(awstypes.TransitionToArchiveRulesAfter60Days)), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_ia", ""), resource.TestCheckResourceAttr(resourceName, "lifecycle_policy.1.transition_to_primary_storage_class", ""), ), @@ -484,7 +484,7 @@ func TestAccEFSFileSystem_lifecyclePolicy(t *testing.T) { func testAccCheckFileSystemDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_efs_file_system" { continue @@ -500,21 +500,21 @@ func testAccCheckFileSystemDestroy(ctx context.Context) resource.TestCheckFunc { return err } - return fmt.Errorf("EFS file system %s still exists", rs.Primary.ID) + return fmt.Errorf("EFS File System %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckFileSystem(ctx context.Context, n string, v *efs.FileSystemDescription) resource.TestCheckFunc { +func testAccCheckFileSystem(ctx context.Context, n string, v *awstypes.FileSystemDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) output, err := tfefs.FindFileSystemByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/efs/find.go b/internal/service/efs/find.go deleted file mode 100644 index 4cbf9a45aba..00000000000 --- a/internal/service/efs/find.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package efs - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindBackupPolicyByID(ctx context.Context, conn *efs.EFS, id string) (*efs.BackupPolicy, error) { - input := &efs.DescribeBackupPolicyInput{ - FileSystemId: aws.String(id), - } - - output, err := conn.DescribeBackupPolicyWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, efs.ErrCodeFileSystemNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.BackupPolicy == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.BackupPolicy, nil -} - -func FindFileSystemPolicyByID(ctx context.Context, conn *efs.EFS, id string) (*efs.DescribeFileSystemPolicyOutput, error) { - input := &efs.DescribeFileSystemPolicyInput{ - FileSystemId: aws.String(id), - } - - output, err := conn.DescribeFileSystemPolicyWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, efs.ErrCodeFileSystemNotFound) || tfawserr.ErrCodeEquals(err, efs.ErrCodePolicyNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} diff --git a/internal/service/efs/generate.go b/internal/service/efs/generate.go index d9782cec2ac..bca48156cad 100644 --- a/internal/service/efs/generate.go +++ b/internal/service/efs/generate.go @@ -1,8 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/listpages/main.go -ListOps=DescribeMountTargets -InputPaginator=Marker -OutputPaginator=NextMarker -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=DescribeTags -ListTagsOpPaginated -ListTagsInIDElem=FileSystemId -ServiceTagsSlice -TagInIDElem=ResourceId -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ListTagsOp=DescribeTags -ListTagsOpPaginated -ListTagsInIDElem=FileSystemId -ServiceTagsSlice -TagInIDElem=ResourceId -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/efs/list_pages_gen.go b/internal/service/efs/list_pages_gen.go deleted file mode 100644 index 470f689fc72..00000000000 --- a/internal/service/efs/list_pages_gen.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "internal/generate/listpages/main.go -ListOps=DescribeMountTargets -InputPaginator=Marker -OutputPaginator=NextMarker"; DO NOT EDIT. - -package efs - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/aws/aws-sdk-go/service/efs/efsiface" -) - -func describeMountTargetsPages(ctx context.Context, conn efsiface.EFSAPI, input *efs.DescribeMountTargetsInput, fn func(*efs.DescribeMountTargetsOutput, bool) bool) error { - for { - output, err := conn.DescribeMountTargetsWithContext(ctx, input) - if err != nil { - return err - } - - lastPage := aws.StringValue(output.NextMarker) == "" - if !fn(output, lastPage) || lastPage { - break - } - - input.Marker = output.NextMarker - } - return nil -} diff --git a/internal/service/efs/mount_target.go b/internal/service/efs/mount_target.go index 05a4abf1218..fd99607f686 100644 --- a/internal/service/efs/mount_target.go +++ b/internal/service/efs/mount_target.go @@ -9,25 +9,28 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/aws/aws-sdk-go/service/ec2" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_efs_mount_target", name="Mount Target") -func ResourceMountTarget() *schema.Resource { +func resourceMountTarget() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceMountTargetCreate, ReadWithoutTimeout: resourceMountTargetRead, @@ -44,11 +47,11 @@ func ResourceMountTarget() *schema.Resource { }, Schema: map[string]*schema.Schema{ - "availability_zone_name": { + "availability_zone_id": { Type: schema.TypeString, Computed: true, }, - "availability_zone_id": { + "availability_zone_name": { Type: schema.TypeString, Computed: true, }, @@ -101,7 +104,7 @@ func ResourceMountTarget() *schema.Resource { func resourceMountTargetCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) // CreateMountTarget would return the same Mount Target ID // to parallel requests if they both include the same AZ @@ -129,16 +132,16 @@ func resourceMountTargetCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk(names.AttrSecurityGroups); ok { - input.SecurityGroups = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroups = flex.ExpandStringValueSet(v.(*schema.Set)) } - mt, err := conn.CreateMountTargetWithContext(ctx, input) + mt, err := conn.CreateMountTarget(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating EFS Mount Target (%s): %s", fsID, err) } - d.SetId(aws.StringValue(mt.MountTargetId)) + d.SetId(aws.ToString(mt.MountTargetId)) if _, err := waitMountTargetCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for EFS Mount Target (%s) create: %s", d.Id(), err) @@ -149,9 +152,9 @@ func resourceMountTargetCreate(ctx context.Context, d *schema.ResourceData, meta func resourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - mt, err := FindMountTargetByID(ctx, conn, d.Id()) + mt, err := findMountTargetByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EFS Mount Target (%s) not found, removing from state", d.Id()) @@ -163,7 +166,7 @@ func resourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading EFS Mount Target (%s): %s", d.Id(), err) } - fsID := aws.StringValue(mt.FileSystemId) + fsID := aws.ToString(mt.FileSystemId) fsARN := arn.ARN{ AccountID: meta.(*conns.AWSClient).AccountID, Partition: meta.(*conns.AWSClient).Partition, @@ -177,12 +180,12 @@ func resourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("file_system_arn", fsARN) d.Set(names.AttrFileSystemID, fsID) d.Set(names.AttrIPAddress, mt.IpAddress) - d.Set("mount_target_dns_name", meta.(*conns.AWSClient).RegionalHostname(ctx, fmt.Sprintf("%s.%s.efs", aws.StringValue(mt.AvailabilityZoneName), aws.StringValue(mt.FileSystemId)))) + d.Set("mount_target_dns_name", meta.(*conns.AWSClient).RegionalHostname(ctx, fmt.Sprintf("%s.%s.efs", aws.ToString(mt.AvailabilityZoneName), aws.ToString(mt.FileSystemId)))) d.Set(names.AttrNetworkInterfaceID, mt.NetworkInterfaceId) d.Set(names.AttrOwnerID, mt.OwnerId) d.Set(names.AttrSubnetID, mt.SubnetId) - output, err := conn.DescribeMountTargetSecurityGroupsWithContext(ctx, &efs.DescribeMountTargetSecurityGroupsInput{ + output, err := conn.DescribeMountTargetSecurityGroups(ctx, &efs.DescribeMountTargetSecurityGroupsInput{ MountTargetId: aws.String(d.Id()), }) @@ -190,22 +193,22 @@ func resourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading EFS Mount Target (%s) security groups: %s", d.Id(), err) } - d.Set(names.AttrSecurityGroups, aws.StringValueSlice(output.SecurityGroups)) + d.Set(names.AttrSecurityGroups, output.SecurityGroups) return diags } func resourceMountTargetUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) if d.HasChange(names.AttrSecurityGroups) { input := &efs.ModifyMountTargetSecurityGroupsInput{ MountTargetId: aws.String(d.Id()), - SecurityGroups: flex.ExpandStringSet(d.Get(names.AttrSecurityGroups).(*schema.Set)), + SecurityGroups: flex.ExpandStringValueSet(d.Get(names.AttrSecurityGroups).(*schema.Set)), } - _, err := conn.ModifyMountTargetSecurityGroupsWithContext(ctx, input) + _, err := conn.ModifyMountTargetSecurityGroups(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating EFS Mount Target (%s) security groups: %s", d.Id(), err) @@ -217,14 +220,14 @@ func resourceMountTargetUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceMountTargetDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) log.Printf("[DEBUG] Deleting EFS Mount Target: %s", d.Id()) - _, err := conn.DeleteMountTargetWithContext(ctx, &efs.DeleteMountTargetInput{ + _, err := conn.DeleteMountTarget(ctx, &efs.DeleteMountTargetInput{ MountTargetId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, efs.ErrCodeMountTargetNotFound) { + if errs.IsA[*awstypes.MountTargetNotFound](err) { return diags } @@ -246,61 +249,71 @@ func getAZFromSubnetID(ctx context.Context, conn *ec2.EC2, subnetID string) (str return "", err } - return aws.StringValue(subnet.AvailabilityZone), nil + return aws.ToString(subnet.AvailabilityZone), nil } -func findMountTarget(ctx context.Context, conn *efs.EFS, input *efs.DescribeMountTargetsInput) (*efs.MountTargetDescription, error) { - output, err := findMountTargets(ctx, conn, input) +func findMountTarget(ctx context.Context, conn *efs.Client, input *efs.DescribeMountTargetsInput, filter tfslices.Predicate[*awstypes.MountTargetDescription]) (*awstypes.MountTargetDescription, error) { + output, err := findMountTargets(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findMountTargets(ctx context.Context, conn *efs.EFS, input *efs.DescribeMountTargetsInput) ([]*efs.MountTargetDescription, error) { - var output []*efs.MountTargetDescription +func findMountTargets(ctx context.Context, conn *efs.Client, input *efs.DescribeMountTargetsInput, filter tfslices.Predicate[*awstypes.MountTargetDescription]) ([]awstypes.MountTargetDescription, error) { + var output []awstypes.MountTargetDescription + + pages := efs.NewDescribeMountTargetsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.MountTargetNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } - err := conn.DescribeMountTargetsPagesWithContext(ctx, input, func(page *efs.DescribeMountTargetsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + if err != nil { + return nil, err } for _, v := range page.MountTargets { - if v != nil { + if filter(&v) { output = append(output, v) } } + } - return !lastPage - }) + return output, nil +} - if tfawserr.ErrCodeEquals(err, efs.ErrCodeMountTargetNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } +func findMountTargetByID(ctx context.Context, conn *efs.Client, id string) (*awstypes.MountTargetDescription, error) { + input := &efs.DescribeMountTargetsInput{ + MountTargetId: aws.String(id), } + output, err := findMountTarget(ctx, conn, input, tfslices.PredicateTrue[*awstypes.MountTargetDescription]()) + if err != nil { return nil, err } - return output, nil -} - -func FindMountTargetByID(ctx context.Context, conn *efs.EFS, id string) (*efs.MountTargetDescription, error) { - input := &efs.DescribeMountTargetsInput{ - MountTargetId: aws.String(id), + if state := output.LifeCycleState; state == awstypes.LifeCycleStateDeleted { + return nil, &retry.NotFoundError{ + Message: string(state), + LastRequest: input, + } } - return findMountTarget(ctx, conn, input) + return output, nil } -func statusMountTargetLifeCycleState(ctx context.Context, conn *efs.EFS, id string) retry.StateRefreshFunc { +func statusMountTargetLifeCycleState(ctx context.Context, conn *efs.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindMountTargetByID(ctx, conn, id) + output, err := findMountTargetByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil @@ -310,14 +323,14 @@ func statusMountTargetLifeCycleState(ctx context.Context, conn *efs.EFS, id stri return nil, "", err } - return output, aws.StringValue(output.LifeCycleState), nil + return output, string(output.LifeCycleState), nil } } -func waitMountTargetCreated(ctx context.Context, conn *efs.EFS, id string, timeout time.Duration) (*efs.MountTargetDescription, error) { +func waitMountTargetCreated(ctx context.Context, conn *efs.Client, id string, timeout time.Duration) (*awstypes.MountTargetDescription, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{efs.LifeCycleStateCreating}, - Target: []string{efs.LifeCycleStateAvailable}, + Pending: enum.Slice(awstypes.LifeCycleStateCreating), + Target: enum.Slice(awstypes.LifeCycleStateAvailable), Refresh: statusMountTargetLifeCycleState(ctx, conn, id), Timeout: timeout, Delay: 2 * time.Second, @@ -326,16 +339,16 @@ func waitMountTargetCreated(ctx context.Context, conn *efs.EFS, id string, timeo outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*efs.MountTargetDescription); ok { + if output, ok := outputRaw.(*awstypes.MountTargetDescription); ok { return output, err } return nil, err } -func waitMountTargetDeleted(ctx context.Context, conn *efs.EFS, id string, timeout time.Duration) (*efs.MountTargetDescription, error) { +func waitMountTargetDeleted(ctx context.Context, conn *efs.Client, id string, timeout time.Duration) (*awstypes.MountTargetDescription, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{efs.LifeCycleStateAvailable, efs.LifeCycleStateDeleting, efs.LifeCycleStateDeleted}, + Pending: enum.Slice(awstypes.LifeCycleStateAvailable, awstypes.LifeCycleStateDeleting, awstypes.LifeCycleStateDeleted), Target: []string{}, Refresh: statusMountTargetLifeCycleState(ctx, conn, id), Timeout: timeout, @@ -345,7 +358,7 @@ func waitMountTargetDeleted(ctx context.Context, conn *efs.EFS, id string, timeo outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*efs.MountTargetDescription); ok { + if output, ok := outputRaw.(*awstypes.MountTargetDescription); ok { return output, err } diff --git a/internal/service/efs/mount_target_data_source.go b/internal/service/efs/mount_target_data_source.go index 683b881b5aa..bedea3271da 100644 --- a/internal/service/efs/mount_target_data_source.go +++ b/internal/service/efs/mount_target_data_source.go @@ -7,18 +7,20 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/efs" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_efs_mount_target") -func DataSourceMountTarget() *schema.Resource { +// @SDKDataSource("aws_efs_mount_target", name="Mount Target") +func dataSourceMountTarget() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceMountTargetRead, @@ -84,7 +86,7 @@ func DataSourceMountTarget() *schema.Resource { func dataSourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) input := &efs.DescribeMountTargetsInput{} @@ -100,14 +102,14 @@ func dataSourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta input.MountTargetId = aws.String(v.(string)) } - mt, err := findMountTarget(ctx, conn, input) + mt, err := findMountTarget(ctx, conn, input, tfslices.PredicateTrue[*awstypes.MountTargetDescription]()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading EFS Mount Target: %s", err) } - d.SetId(aws.StringValue(mt.MountTargetId)) - fsID := aws.StringValue(mt.FileSystemId) + d.SetId(aws.ToString(mt.MountTargetId)) + fsID := aws.ToString(mt.FileSystemId) fsARN := arn.ARN{ AccountID: meta.(*conns.AWSClient).AccountID, Partition: meta.(*conns.AWSClient).Partition, @@ -121,13 +123,13 @@ func dataSourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta d.Set("file_system_arn", fsARN) d.Set(names.AttrFileSystemID, fsID) d.Set(names.AttrIPAddress, mt.IpAddress) - d.Set("mount_target_dns_name", meta.(*conns.AWSClient).RegionalHostname(ctx, fmt.Sprintf("%s.%s.efs", aws.StringValue(mt.AvailabilityZoneName), aws.StringValue(mt.FileSystemId)))) + d.Set("mount_target_dns_name", meta.(*conns.AWSClient).RegionalHostname(ctx, fmt.Sprintf("%s.%s.efs", aws.ToString(mt.AvailabilityZoneName), aws.ToString(mt.FileSystemId)))) d.Set("mount_target_id", mt.MountTargetId) d.Set(names.AttrNetworkInterfaceID, mt.NetworkInterfaceId) d.Set(names.AttrOwnerID, mt.OwnerId) d.Set(names.AttrSubnetID, mt.SubnetId) - output, err := conn.DescribeMountTargetSecurityGroupsWithContext(ctx, &efs.DescribeMountTargetSecurityGroupsInput{ + output, err := conn.DescribeMountTargetSecurityGroups(ctx, &efs.DescribeMountTargetSecurityGroupsInput{ MountTargetId: aws.String(d.Id()), }) @@ -135,7 +137,7 @@ func dataSourceMountTargetRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading EFS Mount Target (%s) security groups: %s", d.Id(), err) } - d.Set(names.AttrSecurityGroups, aws.StringValueSlice(output.SecurityGroups)) + d.Set(names.AttrSecurityGroups, output.SecurityGroups) return diags } diff --git a/internal/service/efs/mount_target_data_source_test.go b/internal/service/efs/mount_target_data_source_test.go index 4654b80084c..774c3f84e8b 100644 --- a/internal/service/efs/mount_target_data_source_test.go +++ b/internal/service/efs/mount_target_data_source_test.go @@ -106,8 +106,8 @@ func TestAccEFSMountTargetDataSource_byFileSystemID(t *testing.T) { }) } -func testAccMountTargetBaseDataSourceConfig(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` +func testAccMountTargetDataSourceConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` resource "aws_efs_file_system" "test" { creation_token = %[1]q @@ -118,31 +118,13 @@ resource "aws_efs_file_system" "test" { resource "aws_efs_mount_target" "test" { file_system_id = aws_efs_file_system.test.id - subnet_id = aws_subnet.test.id -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - vpc_id = aws_vpc.test.id - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = "10.0.1.0/24" - - tags = { - Name = %[1]q - } + subnet_id = aws_subnet.test[0].id } `, rName)) } func testAccMountTargetDataSourceConfig_byID(rName string) string { - return acctest.ConfigCompose(testAccMountTargetBaseDataSourceConfig(rName), ` + return acctest.ConfigCompose(testAccMountTargetDataSourceConfig_base(rName), ` data "aws_efs_mount_target" "test" { mount_target_id = aws_efs_mount_target.test.id } @@ -150,7 +132,7 @@ data "aws_efs_mount_target" "test" { } func testAccMountTargetDataSourceConfig_byAccessPointID(rName string) string { - return acctest.ConfigCompose(testAccMountTargetBaseDataSourceConfig(rName), ` + return acctest.ConfigCompose(testAccMountTargetDataSourceConfig_base(rName), ` resource "aws_efs_access_point" "test" { file_system_id = aws_efs_file_system.test.id } @@ -162,7 +144,7 @@ data "aws_efs_mount_target" "test" { } func testAccMountTargetDataSourceConfig_byFileSystemID(rName string) string { - return acctest.ConfigCompose(testAccMountTargetBaseDataSourceConfig(rName), ` + return acctest.ConfigCompose(testAccMountTargetDataSourceConfig_base(rName), ` data "aws_efs_mount_target" "test" { file_system_id = aws_efs_file_system.test.id diff --git a/internal/service/efs/mount_target_test.go b/internal/service/efs/mount_target_test.go index 64bb869f384..953555803a9 100644 --- a/internal/service/efs/mount_target_test.go +++ b/internal/service/efs/mount_target_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccEFSMountTarget_basic(t *testing.T) { ctx := acctest.Context(t) - var mount efs.MountTargetDescription + var mount awstypes.MountTargetDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_mount_target.test" resourceName2 := "aws_efs_mount_target.test2" @@ -67,7 +67,7 @@ func TestAccEFSMountTarget_basic(t *testing.T) { func TestAccEFSMountTarget_disappears(t *testing.T) { ctx := acctest.Context(t) - var mount efs.MountTargetDescription + var mount awstypes.MountTargetDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_mount_target.test" @@ -91,7 +91,7 @@ func TestAccEFSMountTarget_disappears(t *testing.T) { func TestAccEFSMountTarget_ipAddress(t *testing.T) { ctx := acctest.Context(t) - var mount efs.MountTargetDescription + var mount awstypes.MountTargetDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_mount_target.test" @@ -120,7 +120,7 @@ func TestAccEFSMountTarget_ipAddress(t *testing.T) { // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/13845 func TestAccEFSMountTarget_IPAddress_emptyString(t *testing.T) { ctx := acctest.Context(t) - var mount efs.MountTargetDescription + var mount awstypes.MountTargetDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_efs_mount_target.test" @@ -148,7 +148,7 @@ func TestAccEFSMountTarget_IPAddress_emptyString(t *testing.T) { func testAccCheckMountTargetDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_efs_mount_target" { continue @@ -171,18 +171,14 @@ func testAccCheckMountTargetDestroy(ctx context.Context) resource.TestCheckFunc } } -func testAccCheckMountTargetExists(ctx context.Context, n string, v *efs.MountTargetDescription) resource.TestCheckFunc { +func testAccCheckMountTargetExists(ctx context.Context, n string, v *awstypes.MountTargetDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No EFS Mount Target ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) output, err := tfefs.FindMountTargetByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/efs/replication_configuration.go b/internal/service/efs/replication_configuration.go index fe6f0e121ba..98f49cccc4c 100644 --- a/internal/service/efs/replication_configuration.go +++ b/internal/service/efs/replication_configuration.go @@ -9,21 +9,24 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_efs_replication_configuration", name="Replication Configuration") -func ResourceReplicationConfiguration() *schema.Resource { +func resourceReplicationConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceReplicationConfigurationCreate, ReadWithoutTimeout: resourceReplicationConfigurationRead, @@ -105,7 +108,7 @@ func ResourceReplicationConfiguration() *schema.Resource { func resourceReplicationConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) fsID := d.Get("source_file_system_id").(string) input := &efs.CreateReplicationConfigurationInput{ @@ -116,7 +119,7 @@ func resourceReplicationConfigurationCreate(ctx context.Context, d *schema.Resou input.Destinations = expandDestinationsToCreate(v.([]interface{})) } - _, err := conn.CreateReplicationConfigurationWithContext(ctx, input) + _, err := conn.CreateReplicationConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating EFS Replication Configuration (%s): %s", fsID, err) @@ -133,9 +136,9 @@ func resourceReplicationConfigurationCreate(ctx context.Context, d *schema.Resou func resourceReplicationConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) - replication, err := FindReplicationConfigurationByID(ctx, conn, d.Id()) + replication, err := findReplicationConfigurationByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] EFS Replication Configuration (%s) not found, removing from state", d.Id()) @@ -159,7 +162,7 @@ func resourceReplicationConfigurationRead(ctx context.Context, d *schema.Resourc copy(0, names.AttrKMSKeyID) } - d.Set(names.AttrCreationTime, aws.TimeValue(replication.CreationTime).String()) + d.Set(names.AttrCreationTime, aws.ToTime(replication.CreationTime).String()) if err := d.Set(names.AttrDestination, destinations); err != nil { return sdkdiag.AppendErrorf(diags, "setting destination: %s", err) } @@ -173,14 +176,16 @@ func resourceReplicationConfigurationRead(ctx context.Context, d *schema.Resourc func resourceReplicationConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).EFSConn(ctx) + conn := meta.(*conns.AWSClient).EFSClient(ctx) // Deletion of the replication configuration must be done from the Region in which the destination file system is located. destination := expandDestinationsToCreate(d.Get(names.AttrDestination).([]interface{}))[0] - regionConn := meta.(*conns.AWSClient).EFSConnForRegion(ctx, aws.StringValue(destination.Region)) + optFn := func(o *efs.Options) { + o.Region = aws.ToString(destination.Region) + } log.Printf("[DEBUG] Deleting EFS Replication Configuration: %s", d.Id()) - if err := deleteReplicationConfiguration(ctx, regionConn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if err := deleteReplicationConfiguration(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete), optFn); err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -192,12 +197,12 @@ func resourceReplicationConfigurationDelete(ctx context.Context, d *schema.Resou return diags } -func deleteReplicationConfiguration(ctx context.Context, conn *efs.EFS, fsID string, timeout time.Duration) error { - _, err := conn.DeleteReplicationConfigurationWithContext(ctx, &efs.DeleteReplicationConfigurationInput{ +func deleteReplicationConfiguration(ctx context.Context, conn *efs.Client, fsID string, timeout time.Duration, optFns ...func(*efs.Options)) error { + _, err := conn.DeleteReplicationConfiguration(ctx, &efs.DeleteReplicationConfigurationInput{ SourceFileSystemId: aws.String(fsID), - }) + }, optFns...) - if tfawserr.ErrCodeEquals(err, efs.ErrCodeFileSystemNotFound, efs.ErrCodeReplicationNotFound) { + if errs.IsA[*awstypes.FileSystemNotFound](err) || errs.IsA[*awstypes.ReplicationNotFound](err) { return nil } @@ -205,75 +210,72 @@ func deleteReplicationConfiguration(ctx context.Context, conn *efs.EFS, fsID str return fmt.Errorf("deleting EFS Replication Configuration (%s): %w", fsID, err) } - if _, err := waitReplicationConfigurationDeleted(ctx, conn, fsID, timeout); err != nil { + if _, err := waitReplicationConfigurationDeleted(ctx, conn, fsID, timeout, optFns...); err != nil { return fmt.Errorf("waiting for EFS Replication Configuration (%s) delete: %w", fsID, err) } return nil } -func findReplicationConfiguration(ctx context.Context, conn *efs.EFS, input *efs.DescribeReplicationConfigurationsInput) (*efs.ReplicationConfigurationDescription, error) { - output, err := findReplicationConfigurations(ctx, conn, input) +func findReplicationConfiguration(ctx context.Context, conn *efs.Client, input *efs.DescribeReplicationConfigurationsInput, filter tfslices.Predicate[*awstypes.ReplicationConfigurationDescription], optFns ...func(*efs.Options)) (*awstypes.ReplicationConfigurationDescription, error) { + output, err := findReplicationConfigurations(ctx, conn, input, filter, optFns...) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationConfigurations(ctx context.Context, conn *efs.EFS, input *efs.DescribeReplicationConfigurationsInput) ([]*efs.ReplicationConfigurationDescription, error) { - var output []*efs.ReplicationConfigurationDescription +func findReplicationConfigurations(ctx context.Context, conn *efs.Client, input *efs.DescribeReplicationConfigurationsInput, filter tfslices.Predicate[*awstypes.ReplicationConfigurationDescription], optFns ...func(*efs.Options)) ([]awstypes.ReplicationConfigurationDescription, error) { + var output []awstypes.ReplicationConfigurationDescription - err := conn.DescribeReplicationConfigurationsPagesWithContext(ctx, input, func(page *efs.DescribeReplicationConfigurationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := efs.NewDescribeReplicationConfigurationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx, optFns...) - for _, v := range page.Replications { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.FileSystemNotFound](err) || errs.IsA[*awstypes.ReplicationNotFound](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, efs.ErrCodeFileSystemNotFound, efs.ErrCodeReplicationNotFound) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.Replications { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func FindReplicationConfigurationByID(ctx context.Context, conn *efs.EFS, id string) (*efs.ReplicationConfigurationDescription, error) { +func findReplicationConfigurationByID(ctx context.Context, conn *efs.Client, id string, optFns ...func(*efs.Options)) (*awstypes.ReplicationConfigurationDescription, error) { input := &efs.DescribeReplicationConfigurationsInput{ FileSystemId: aws.String(id), } - output, err := findReplicationConfiguration(ctx, conn, input) + output, err := findReplicationConfiguration(ctx, conn, input, tfslices.PredicateTrue[*awstypes.ReplicationConfigurationDescription](), optFns...) if err != nil { return nil, err } - if len(output.Destinations) == 0 || output.Destinations[0] == nil { + if len(output.Destinations) == 0 { return nil, tfresource.NewEmptyResultError(input) } return output, nil } -func statusReplicationConfiguration(ctx context.Context, conn *efs.EFS, id string) retry.StateRefreshFunc { +func statusReplicationConfiguration(ctx context.Context, conn *efs.Client, id string, optFns ...func(*efs.Options)) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindReplicationConfigurationByID(ctx, conn, id) + output, err := findReplicationConfigurationByID(ctx, conn, id, optFns...) if tfresource.NotFound(err) { return nil, "", nil @@ -283,56 +285,56 @@ func statusReplicationConfiguration(ctx context.Context, conn *efs.EFS, id strin return nil, "", err } - return output, aws.StringValue(output.Destinations[0].Status), nil + return output, string(output.Destinations[0].Status), nil } } -func waitReplicationConfigurationCreated(ctx context.Context, conn *efs.EFS, id string, timeout time.Duration) (*efs.ReplicationConfigurationDescription, error) { +func waitReplicationConfigurationCreated(ctx context.Context, conn *efs.Client, id string, timeout time.Duration, optFns ...func(*efs.Options)) (*awstypes.ReplicationConfigurationDescription, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{efs.ReplicationStatusEnabling}, - Target: []string{efs.ReplicationStatusEnabled}, - Refresh: statusReplicationConfiguration(ctx, conn, id), + Pending: enum.Slice(awstypes.ReplicationStatusEnabling), + Target: enum.Slice(awstypes.ReplicationStatusEnabled), + Refresh: statusReplicationConfiguration(ctx, conn, id, optFns...), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*efs.ReplicationConfigurationDescription); ok { + if output, ok := outputRaw.(*awstypes.ReplicationConfigurationDescription); ok { return output, err } return nil, err } -func waitReplicationConfigurationDeleted(ctx context.Context, conn *efs.EFS, id string, timeout time.Duration) (*efs.ReplicationConfigurationDescription, error) { +func waitReplicationConfigurationDeleted(ctx context.Context, conn *efs.Client, id string, timeout time.Duration, optFns ...func(*efs.Options)) (*awstypes.ReplicationConfigurationDescription, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{efs.ReplicationStatusDeleting}, + Pending: enum.Slice(awstypes.ReplicationStatusDeleting), Target: []string{}, - Refresh: statusReplicationConfiguration(ctx, conn, id), + Refresh: statusReplicationConfiguration(ctx, conn, id, optFns...), Timeout: timeout, ContinuousTargetOccurence: 2, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*efs.ReplicationConfigurationDescription); ok { + if output, ok := outputRaw.(*awstypes.ReplicationConfigurationDescription); ok { return output, err } return nil, err } -func expandDestinationToCreate(tfMap map[string]interface{}) *efs.DestinationToCreate { - if tfMap == nil { - return nil - } - - apiObject := &efs.DestinationToCreate{} +func expandDestinationToCreate(tfMap map[string]interface{}) *awstypes.DestinationToCreate { + apiObject := &awstypes.DestinationToCreate{} if v, ok := tfMap["availability_zone_name"].(string); ok && v != "" { apiObject.AvailabilityZoneName = aws.String(v) } + if v, ok := tfMap[names.AttrFileSystemID].(string); ok && v != "" { + apiObject.FileSystemId = aws.String(v) + } + if v, ok := tfMap[names.AttrKMSKeyID].(string); ok && v != "" { apiObject.KmsKeyId = aws.String(v) } @@ -341,62 +343,47 @@ func expandDestinationToCreate(tfMap map[string]interface{}) *efs.DestinationToC apiObject.Region = aws.String(v) } - if v, ok := tfMap[names.AttrFileSystemID].(string); ok && v != "" { - apiObject.FileSystemId = aws.String(v) - } - return apiObject } -func expandDestinationsToCreate(tfList []interface{}) []*efs.DestinationToCreate { +func expandDestinationsToCreate(tfList []interface{}) []awstypes.DestinationToCreate { if len(tfList) == 0 { return nil } - var apiObjects []*efs.DestinationToCreate + var apiObjects []awstypes.DestinationToCreate for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { continue } apiObject := expandDestinationToCreate(tfMap) - if apiObject == nil { - continue - } - - apiObjects = append(apiObjects, apiObject) + apiObjects = append(apiObjects, *apiObject) } return apiObjects } -func flattenDestination(apiObject *efs.Destination) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenDestination(apiObject awstypes.Destination) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.FileSystemId; v != nil { - tfMap[names.AttrFileSystemID] = aws.StringValue(v) + tfMap[names.AttrFileSystemID] = aws.ToString(v) } if v := apiObject.Region; v != nil { - tfMap[names.AttrRegion] = aws.StringValue(v) + tfMap[names.AttrRegion] = aws.ToString(v) } - if v := apiObject.Status; v != nil { - tfMap[names.AttrStatus] = aws.StringValue(v) - } + tfMap[names.AttrStatus] = string(apiObject.Status) return tfMap } -func flattenDestinations(apiObjects []*efs.Destination) []interface{} { +func flattenDestinations(apiObjects []awstypes.Destination) []interface{} { if len(apiObjects) == 0 { return nil } @@ -404,10 +391,6 @@ func flattenDestinations(apiObjects []*efs.Destination) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenDestination(apiObject)) } diff --git a/internal/service/efs/replication_configuration_test.go b/internal/service/efs/replication_configuration_test.go index 7e6649d8306..63489e0851b 100644 --- a/internal/service/efs/replication_configuration_test.go +++ b/internal/service/efs/replication_configuration_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -45,7 +45,7 @@ func TestAccEFSReplicationConfiguration_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "destination.#", acctest.Ct1), resource.TestMatchResourceAttr(resourceName, "destination.0.file_system_id", regexache.MustCompile(`fs-.+`)), resource.TestCheckResourceAttr(resourceName, "destination.0.region", acctest.AlternateRegion()), - resource.TestCheckResourceAttr(resourceName, "destination.0.status", efs.ReplicationStatusEnabled), + resource.TestCheckResourceAttr(resourceName, "destination.0.status", string(awstypes.ReplicationStatusEnabled)), resource.TestCheckResourceAttrPair(resourceName, "original_source_file_system_arn", fsResourceName, names.AttrARN), resource.TestCheckResourceAttrPair(resourceName, "source_file_system_arn", fsResourceName, names.AttrARN), resource.TestCheckResourceAttrPair(resourceName, "source_file_system_id", fsResourceName, names.AttrID), @@ -118,7 +118,7 @@ func TestAccEFSReplicationConfiguration_allAttributes(t *testing.T) { resource.TestMatchResourceAttr(resourceName, "destination.0.file_system_id", regexache.MustCompile(`fs-.+`)), resource.TestCheckResourceAttrPair(resourceName, "destination.0.kms_key_id", kmsKeyResourceName, names.AttrKeyID), resource.TestCheckResourceAttr(resourceName, "destination.0.region", acctest.AlternateRegion()), - resource.TestCheckResourceAttr(resourceName, "destination.0.status", efs.ReplicationStatusEnabled), + resource.TestCheckResourceAttr(resourceName, "destination.0.status", string(awstypes.ReplicationStatusEnabled)), resource.TestCheckResourceAttrPair(resourceName, "original_source_file_system_arn", fsResourceName, names.AttrARN), resource.TestCheckResourceAttrPair(resourceName, "source_file_system_arn", fsResourceName, names.AttrARN), resource.TestCheckResourceAttrPair(resourceName, "source_file_system_id", fsResourceName, names.AttrID), @@ -156,7 +156,7 @@ func TestAccEFSReplicationConfiguration_existingDestination(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationTime), resource.TestCheckResourceAttr(resourceName, "destination.#", acctest.Ct1), resource.TestCheckResourceAttrPair(resourceName, "destination.0.file_system_id", destinationFsResourceName, names.AttrID), - resource.TestCheckResourceAttr(resourceName, "destination.0.status", efs.ReplicationStatusEnabled), + resource.TestCheckResourceAttr(resourceName, "destination.0.status", string(awstypes.ReplicationStatusEnabled)), ), }, }, @@ -170,7 +170,7 @@ func testAccCheckReplicationConfigurationExists(ctx context.Context, n string) r return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EFSClient(ctx) _, err := tfefs.FindReplicationConfigurationByID(ctx, conn, rs.Primary.ID) @@ -186,7 +186,7 @@ func testAccCheckReplicationConfigurationDestroy(ctx context.Context) resource.T func testAccCheckReplicationConfigurationDestroyWithProvider(ctx context.Context) acctest.TestCheckWithProviderFunc { return func(s *terraform.State, provider *schema.Provider) error { - conn := provider.Meta().(*conns.AWSClient).EFSConn(ctx) + conn := provider.Meta().(*conns.AWSClient).EFSClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_efs_replication_configuration" { diff --git a/internal/service/efs/service_endpoint_resolver_gen.go b/internal/service/efs/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..266db9695a0 --- /dev/null +++ b/internal/service/efs/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package efs + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + efs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/efs" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ efs_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver efs_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: efs_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params efs_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up efs endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*efs_sdkv2.Options) { + return func(o *efs_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/efs/service_endpoints_gen_test.go b/internal/service/efs/service_endpoints_gen_test.go index d8ce6e2c590..33a135c0f48 100644 --- a/internal/service/efs/service_endpoints_gen_test.go +++ b/internal/service/efs/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package efs_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - efs_sdkv1 "github.com/aws/aws-sdk-go/service/efs" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + efs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/efs" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := efs_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(efs_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), efs_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := efs_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(efs_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), efs_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.EFSConn(ctx) - - req, _ := client.DescribeFileSystemsRequest(&efs_sdkv1.DescribeFileSystemsInput{}) + client := meta.EFSClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.DescribeFileSystems(ctx, &efs_sdkv2.DescribeFileSystemsInput{}, + func(opts *efs_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/efs/service_package_gen.go b/internal/service/efs/service_package_gen.go index 98d34b8546d..944de84393e 100644 --- a/internal/service/efs/service_package_gen.go +++ b/internal/service/efs/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package efs import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - efs_sdkv1 "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + efs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/efs" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -28,20 +25,26 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceAccessPoint, + Factory: dataSourceAccessPoint, TypeName: "aws_efs_access_point", + Name: "Access Point", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceAccessPoints, + Factory: dataSourceAccessPoints, TypeName: "aws_efs_access_points", + Name: "Access Point", }, { - Factory: DataSourceFileSystem, + Factory: dataSourceFileSystem, TypeName: "aws_efs_file_system", + Name: "File System", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceMountTarget, + Factory: dataSourceMountTarget, TypeName: "aws_efs_mount_target", + Name: "Mount Target", }, } } @@ -49,7 +52,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceAccessPoint, + Factory: resourceAccessPoint, TypeName: "aws_efs_access_point", Name: "Access Point", Tags: &types.ServicePackageResourceTags{ @@ -57,11 +60,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceBackupPolicy, + Factory: resourceBackupPolicy, TypeName: "aws_efs_backup_policy", + Name: "Backup Policy", }, { - Factory: ResourceFileSystem, + Factory: resourceFileSystem, TypeName: "aws_efs_file_system", Name: "File System", Tags: &types.ServicePackageResourceTags{ @@ -69,16 +73,17 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceFileSystemPolicy, + Factory: resourceFileSystemPolicy, TypeName: "aws_efs_file_system_policy", + Name: "File System Policy", }, { - Factory: ResourceMountTarget, + Factory: resourceMountTarget, TypeName: "aws_efs_mount_target", Name: "Mount Target", }, { - Factory: ResourceReplicationConfiguration, + Factory: resourceReplicationConfiguration, TypeName: "aws_efs_replication_configuration", Name: "Replication Configuration", }, @@ -89,25 +94,14 @@ func (p *servicePackage) ServicePackageName() string { return names.EFS } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*efs_sdkv1.EFS, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*efs_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return efs_sdkv1.New(sess.Copy(&cfg)), nil + return efs_sdkv2.NewFromConfig(cfg, + efs_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/efs/status.go b/internal/service/efs/status.go deleted file mode 100644 index 6b73cc46938..00000000000 --- a/internal/service/efs/status.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package efs - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -// statusAccessPointLifeCycleState fetches the Access Point and its LifecycleState -func statusAccessPointLifeCycleState(ctx context.Context, conn *efs.EFS, accessPointId string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - input := &efs.DescribeAccessPointsInput{ - AccessPointId: aws.String(accessPointId), - } - - output, err := conn.DescribeAccessPointsWithContext(ctx, input) - - if err != nil { - return nil, "", err - } - - if output == nil || len(output.AccessPoints) == 0 || output.AccessPoints[0] == nil { - return nil, "", nil - } - - mt := output.AccessPoints[0] - - return mt, aws.StringValue(mt.LifeCycleState), nil - } -} - -func statusBackupPolicy(ctx context.Context, conn *efs.EFS, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindBackupPolicyByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} diff --git a/internal/service/efs/sweep.go b/internal/service/efs/sweep.go index 832ec2762e1..f49d5989f2c 100644 --- a/internal/service/efs/sweep.go +++ b/internal/service/efs/sweep.go @@ -7,12 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - multierror "github.com/hashicorp/go-multierror" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/efs" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -27,6 +26,7 @@ func RegisterSweepers() { Dependencies: []string{ "aws_efs_mount_target", "aws_efs_access_point", + "aws_m2_environment", }, }) @@ -42,14 +42,21 @@ func sweepAccessPoints(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.EFSConn(ctx) + conn := client.EFSClient(ctx) input := &efs.DescribeFileSystemsInput{} - var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *efs.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := efs.NewDescribeFileSystemsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EFS Access Point sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing EFS File Systems (%s): %w", region, err) } for _, v := range page.FileSystems { @@ -57,50 +64,32 @@ func sweepAccessPoints(region string) error { FileSystemId: v.FileSystemId, } - err := conn.DescribeAccessPointsPagesWithContext(ctx, input, func(page *efs.DescribeAccessPointsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := efs.NewDescribeAccessPointsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + continue } for _, v := range page.AccessPoints { - r := ResourceAccessPoint() + r := resourceAccessPoint() d := r.Data(nil) - d.SetId(aws.StringValue(v.AccessPointId)) + d.SetId(aws.ToString(v.AccessPointId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EFS Access Points (%s): %w", region, err)) } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EFS Access Point sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EFS File Systems (%s): %w", region, err)) } err = sweep.SweepOrchestrator(ctx, sweepResources) if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping EFS Access Points (%s): %w", region, err)) + return fmt.Errorf("error sweeping EFS Acess Points (%s): %w", region, err) } - return sweeperErrs.ErrorOrNil() + return nil } func sweepFileSystems(region string) error { @@ -109,33 +98,30 @@ func sweepFileSystems(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.EFSConn(ctx) + conn := client.EFSClient(ctx) input := &efs.DescribeFileSystemsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *efs.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := efs.NewDescribeFileSystemsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EFS File System sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing EFS File Systems (%s): %w", region, err) } for _, v := range page.FileSystems { - r := ResourceFileSystem() + r := resourceFileSystem() d := r.Data(nil) - d.SetId(aws.StringValue(v.FileSystemId)) + d.SetId(aws.ToString(v.FileSystemId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EFS File System sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing EFS File Systems (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -153,14 +139,21 @@ func sweepMountTargets(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.EFSConn(ctx) + conn := client.EFSClient(ctx) input := &efs.DescribeFileSystemsInput{} - var sweeperErrs *multierror.Error sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeFileSystemsPagesWithContext(ctx, input, func(page *efs.DescribeFileSystemsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := efs.NewDescribeFileSystemsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping EFS Mount Target sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing EFS File Systems (%s): %w", region, err) } for _, v := range page.FileSystems { @@ -168,48 +161,30 @@ func sweepMountTargets(region string) error { FileSystemId: v.FileSystemId, } - err := describeMountTargetsPages(ctx, conn, input, func(page *efs.DescribeMountTargetsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := efs.NewDescribeMountTargetsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + continue } for _, v := range page.MountTargets { - r := ResourceMountTarget() + r := resourceMountTarget() d := r.Data(nil) - d.SetId(aws.StringValue(v.MountTargetId)) + d.SetId(aws.ToString(v.MountTargetId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - continue - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EFS Mount Targets (%s): %w", region, err)) } } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping EFS Mount Target sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error listing EFS File Systems (%s): %w", region, err)) } err = sweep.SweepOrchestrator(ctx, sweepResources) if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error sweeping EFS Mount Targets (%s): %w", region, err)) + return fmt.Errorf("error sweeping EFS Mount Targets (%s): %w", region, err) } - return sweeperErrs.ErrorOrNil() + return nil } diff --git a/internal/service/efs/tags_gen.go b/internal/service/efs/tags_gen.go index 0d772a011ef..0c1d5f85692 100644 --- a/internal/service/efs/tags_gen.go +++ b/internal/service/efs/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/efs" - "github.com/aws/aws-sdk-go/service/efs/efsiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/efs" + awstypes "github.com/aws/aws-sdk-go-v2/service/efs/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,28 +19,23 @@ import ( // listTags lists efs service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn efsiface.EFSAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *efs.Client, identifier string, optFns ...func(*efs.Options)) (tftags.KeyValueTags, error) { input := &efs.DescribeTagsInput{ FileSystemId: aws.String(identifier), } - var output []*efs.Tag + var output []awstypes.Tag - err := conn.DescribeTagsPagesWithContext(ctx, input, func(page *efs.DescribeTagsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := efs.NewDescribeTagsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx, optFns...) + + if err != nil { + return tftags.New(ctx, nil), err } for _, v := range page.Tags { - if v != nil { - output = append(output, v) - } + output = append(output, v) } - - return !lastPage - }) - - if err != nil { - return tftags.New(ctx, nil), err } return KeyValueTags(ctx, output), nil @@ -49,7 +44,7 @@ func listTags(ctx context.Context, conn efsiface.EFSAPI, identifier string) (tft // ListTags lists efs service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).EFSConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).EFSClient(ctx), identifier) if err != nil { return err @@ -65,11 +60,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns efs service tags. -func Tags(tags tftags.KeyValueTags) []*efs.Tag { - result := make([]*efs.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &efs.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -81,11 +76,11 @@ func Tags(tags tftags.KeyValueTags) []*efs.Tag { } // KeyValueTags creates tftags.KeyValueTags from efs service tags. -func KeyValueTags(ctx context.Context, tags []*efs.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -93,7 +88,7 @@ func KeyValueTags(ctx context.Context, tags []*efs.Tag) tftags.KeyValueTags { // getTagsIn returns efs service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*efs.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -104,7 +99,7 @@ func getTagsIn(ctx context.Context) []*efs.Tag { } // setTagsOut sets efs service tags in Context. -func setTagsOut(ctx context.Context, tags []*efs.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -113,7 +108,7 @@ func setTagsOut(ctx context.Context, tags []*efs.Tag) { // updateTags updates efs service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn efsiface.EFSAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *efs.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*efs.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -124,10 +119,10 @@ func updateTags(ctx context.Context, conn efsiface.EFSAPI, identifier string, ol if len(removedTags) > 0 { input := &efs.UntagResourceInput{ ResourceId: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -142,7 +137,7 @@ func updateTags(ctx context.Context, conn efsiface.EFSAPI, identifier string, ol Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -155,5 +150,5 @@ func updateTags(ctx context.Context, conn efsiface.EFSAPI, identifier string, ol // UpdateTags updates efs service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).EFSConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).EFSClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/efs/wait.go b/internal/service/efs/wait.go deleted file mode 100644 index 27a792befef..00000000000 --- a/internal/service/efs/wait.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package efs - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go/service/efs" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -const ( - // Maximum amount of time to wait for an Operation to return Success - accessPointCreatedTimeout = 10 * time.Minute - accessPointDeletedTimeout = 10 * time.Minute - - backupPolicyDisabledTimeout = 10 * time.Minute - backupPolicyEnabledTimeout = 10 * time.Minute -) - -// waitAccessPointCreated waits for an Operation to return Success -func waitAccessPointCreated(ctx context.Context, conn *efs.EFS, accessPointId string) (*efs.AccessPointDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{efs.LifeCycleStateCreating}, - Target: []string{efs.LifeCycleStateAvailable}, - Refresh: statusAccessPointLifeCycleState(ctx, conn, accessPointId), - Timeout: accessPointCreatedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*efs.AccessPointDescription); ok { - return output, err - } - - return nil, err -} - -// waitAccessPointDeleted waits for an Access Point to return Deleted -func waitAccessPointDeleted(ctx context.Context, conn *efs.EFS, accessPointId string) (*efs.AccessPointDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{efs.LifeCycleStateAvailable, efs.LifeCycleStateDeleting, efs.LifeCycleStateDeleted}, - Target: []string{}, - Refresh: statusAccessPointLifeCycleState(ctx, conn, accessPointId), - Timeout: accessPointDeletedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*efs.AccessPointDescription); ok { - return output, err - } - - return nil, err -} - -func waitBackupPolicyDisabled(ctx context.Context, conn *efs.EFS, id string) (*efs.BackupPolicy, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{efs.StatusDisabling}, - Target: []string{efs.StatusDisabled}, - Refresh: statusBackupPolicy(ctx, conn, id), - Timeout: backupPolicyDisabledTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*efs.BackupPolicy); ok { - return output, err - } - - return nil, err -} - -func waitBackupPolicyEnabled(ctx context.Context, conn *efs.EFS, id string) (*efs.BackupPolicy, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{efs.StatusEnabling}, - Target: []string{efs.StatusEnabled}, - Refresh: statusBackupPolicy(ctx, conn, id), - Timeout: backupPolicyEnabledTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*efs.BackupPolicy); ok { - return output, err - } - - return nil, err -} diff --git a/internal/service/eks/cluster.go b/internal/service/eks/cluster.go index e9ab6aa1e20..bb28f67a71c 100644 --- a/internal/service/eks/cluster.go +++ b/internal/service/eks/cluster.go @@ -43,6 +43,15 @@ func resourceCluster() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceClusterV0().CoreConfigSchema().ImpliedType(), + Upgrade: clusterStateUpgradeV0, + Version: 0, + }, + }, + CustomizeDiff: customdiff.Sequence( verify.SetTagsDiff, customdiff.ForceNewIfChange("encryption_config", func(_ context.Context, old, new, meta interface{}) bool { @@ -83,6 +92,12 @@ func resourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "bootstrap_self_managed_addons": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + }, "certificate_authority": { Type: schema.TypeList, Computed: true, @@ -321,12 +336,13 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int name := d.Get(names.AttrName).(string) input := &eks.CreateClusterInput{ - EncryptionConfig: expandEncryptionConfig(d.Get("encryption_config").([]interface{})), - Logging: expandLogging(d.Get("enabled_cluster_log_types").(*schema.Set)), - Name: aws.String(name), - ResourcesVpcConfig: expandVpcConfigRequest(d.Get(names.AttrVPCConfig).([]interface{})), - RoleArn: aws.String(d.Get(names.AttrRoleARN).(string)), - Tags: getTagsIn(ctx), + BootstrapSelfManagedAddons: aws.Bool(d.Get("bootstrap_self_managed_addons").(bool)), + EncryptionConfig: expandEncryptionConfig(d.Get("encryption_config").([]interface{})), + Logging: expandLogging(d.Get("enabled_cluster_log_types").(*schema.Set)), + Name: aws.String(name), + ResourcesVpcConfig: expandVpcConfigRequest(d.Get(names.AttrVPCConfig).([]interface{})), + RoleArn: aws.String(d.Get(names.AttrRoleARN).(string)), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("access_config"); ok { @@ -408,6 +424,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter } // bootstrap_cluster_creator_admin_permissions isn't returned from the AWS API. + // See https://github.com/aws/containers-roadmap/issues/185#issuecomment-1863025784. var bootstrapClusterCreatorAdminPermissions *bool if v, ok := d.GetOk("access_config"); ok { if apiObject := expandCreateAccessConfigRequest(v.([]interface{})); apiObject != nil { @@ -418,6 +435,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter return sdkdiag.AppendErrorf(diags, "setting access_config: %s", err) } d.Set(names.AttrARN, cluster.Arn) + d.Set("bootstrap_self_managed_addons", d.Get("bootstrap_self_managed_addons")) if err := d.Set("certificate_authority", flattenCertificate(cluster.CertificateAuthority)); err != nil { return sdkdiag.AppendErrorf(diags, "setting certificate_authority: %s", err) } @@ -770,6 +788,9 @@ func waitClusterDeleted(ctx context.Context, conn *eks.Client, name string, time Target: []string{}, Refresh: statusCluster(ctx, conn, name), Timeout: timeout, + // An attempt to avoid "ResourceInUseException: Cluster already exists with name: ..." errors + // in acceptance tests when recreating a cluster with the same randomly generated name. + ContinuousTargetOccurence: 3, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -1048,6 +1069,9 @@ func flattenAccessConfigResponse(apiObject *types.AccessConfigResponse, bootstra if bootstrapClusterCreatorAdminPermissions != nil { tfMap["bootstrap_cluster_creator_admin_permissions"] = aws.ToBool(bootstrapClusterCreatorAdminPermissions) + } else { + // Setting default value to true for backward compatibility. + tfMap["bootstrap_cluster_creator_admin_permissions"] = true } return []interface{}{tfMap} diff --git a/internal/service/eks/cluster_data_source.go b/internal/service/eks/cluster_data_source.go index 072b6663aac..092bf3b810b 100644 --- a/internal/service/eks/cluster_data_source.go +++ b/internal/service/eks/cluster_data_source.go @@ -30,6 +30,10 @@ func dataSourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "bootstrap_cluster_creator_admin_permissions": { + Type: schema.TypeBool, + Computed: true, + }, }, }, }, diff --git a/internal/service/eks/cluster_migrate.go b/internal/service/eks/cluster_migrate.go new file mode 100644 index 00000000000..70e1592200d --- /dev/null +++ b/internal/service/eks/cluster_migrate.go @@ -0,0 +1,270 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package eks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// aws_eks_cluster resource's Schema @v5.56.1 minus validators. +func resourceClusterV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authentication_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "bootstrap_cluster_creator_admin_permissions": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + }, + }, + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "certificate_authority": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "cluster_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrCreatedAt: { + Type: schema.TypeString, + Computed: true, + }, + "enabled_cluster_log_types": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "encryption_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "provider": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key_arn": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + names.AttrResources: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + names.AttrEndpoint: { + Type: schema.TypeString, + Computed: true, + }, + "identity": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oidc": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrIssuer: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "kubernetes_network_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_family": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "service_ipv4_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "service_ipv6_cidr": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + names.AttrName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "outpost_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "control_plane_instance_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "control_plane_placement": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrGroupName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + }, + }, + "outpost_arns": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "platform_version": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + names.AttrStatus: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + names.AttrVersion: { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + names.AttrVPCConfig: { + Type: schema.TypeList, + MinItems: 1, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_security_group_id": { + Type: schema.TypeString, + Computed: true, + }, + "endpoint_private_access": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "endpoint_public_access": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "public_access_cidrs": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + names.AttrSecurityGroupIDs: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + names.AttrSubnetIDs: { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + names.AttrVPCID: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func clusterStateUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + if rawState == nil { + rawState = map[string]interface{}{} + } + + if _, ok := rawState["bootstrap_self_managed_addons"]; !ok { + rawState["bootstrap_self_managed_addons"] = "true" + } + + return rawState, nil +} diff --git a/internal/service/eks/cluster_migrate_test.go b/internal/service/eks/cluster_migrate_test.go new file mode 100644 index 00000000000..8e4c1ec4e9e --- /dev/null +++ b/internal/service/eks/cluster_migrate_test.go @@ -0,0 +1,73 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package eks_test + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + tfeks "github.com/hashicorp/terraform-provider-aws/internal/service/eks" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestClusterStateUpgradeV0(t *testing.T) { + ctx := context.Background() + t.Parallel() + + testCases := []struct { + testName string + rawState map[string]interface{} + want map[string]interface{} + }{ + { + testName: "empty state", + rawState: map[string]interface{}{}, + want: map[string]interface{}{ + "bootstrap_self_managed_addons": acctest.CtTrue, + }, + }, + { + testName: "non-empty state", + rawState: map[string]interface{}{ + names.AttrName: "testing", + names.AttrVersion: "1.1.0", + }, + want: map[string]interface{}{ + "bootstrap_self_managed_addons": acctest.CtTrue, + names.AttrName: "testing", + names.AttrVersion: "1.1.0", + }, + }, + { + testName: "bootstrap_self_managed_addons set", + rawState: map[string]interface{}{ + "bootstrap_self_managed_addons": acctest.CtFalse, + names.AttrName: "testing", + names.AttrVersion: "1.1.0", + }, + want: map[string]interface{}{ + "bootstrap_self_managed_addons": acctest.CtFalse, + names.AttrName: "testing", + names.AttrVersion: "1.1.0", + }, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.testName, func(t *testing.T) { + t.Parallel() + + got, err := tfeks.ClusterStateUpgradeV0(ctx, testCase.rawState, nil) + + if err != nil { + t.Errorf("err = %q", err) + } else if diff := cmp.Diff(got, testCase.want); diff != "" { + t.Errorf("unexpected diff (+wanted, -got): %s", diff) + } + }) + } +} diff --git a/internal/service/eks/cluster_test.go b/internal/service/eks/cluster_test.go index 0c047909aee..0f3162b9abb 100644 --- a/internal/service/eks/cluster_test.go +++ b/internal/service/eks/cluster_test.go @@ -16,6 +16,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/eks/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/plancheck" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -47,6 +48,7 @@ func TestAccEKSCluster_basic(t *testing.T) { testAccCheckClusterExists(ctx, resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "access_config.#", acctest.Ct1), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "eks", regexache.MustCompile(fmt.Sprintf("cluster/%s$", rName))), + resource.TestCheckResourceAttr(resourceName, "bootstrap_self_managed_addons", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "certificate_authority.#", acctest.Ct1), resource.TestCheckResourceAttrSet(resourceName, "certificate_authority.0.data"), resource.TestCheckNoResourceAttr(resourceName, "cluster_id"), @@ -76,9 +78,10 @@ func TestAccEKSCluster_basic(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, }, }) @@ -130,9 +133,10 @@ func TestAccEKSCluster_AccessConfig_create(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: false, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: false, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, }, }) @@ -155,10 +159,17 @@ func TestAccEKSCluster_AccessConfig_update(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "access_config.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "access_config.0.authentication_mode", string(types.AuthenticationModeConfigMap)), + resource.TestCheckResourceAttr(resourceName, "access_config.0.bootstrap_cluster_creator_admin_permissions", acctest.CtTrue), ), }, { Config: testAccClusterConfig_accessConfig(rName, types.AuthenticationModeConfigMap), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "access_config.#", acctest.Ct1), @@ -168,6 +179,11 @@ func TestAccEKSCluster_AccessConfig_update(t *testing.T) { }, { Config: testAccClusterConfig_accessConfig(rName, types.AuthenticationModeApiAndConfigMap), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + }, + }, Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &cluster), resource.TestCheckResourceAttr(resourceName, "access_config.#", acctest.Ct1), @@ -186,6 +202,84 @@ func TestAccEKSCluster_AccessConfig_update(t *testing.T) { }) } +func TestAccEKSCluster_BootstrapSelfManagedAddons_update(t *testing.T) { + ctx := acctest.Context(t) + var cluster types.Cluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_eks_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_bootstrapSelfManagedAddons(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "bootstrap_self_managed_addons", acctest.CtFalse), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, + }, + { + Config: testAccClusterConfig_bootstrapSelfManagedAddons(rName, true), + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionReplace), + }, + }, + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster), + resource.TestCheckResourceAttr(resourceName, "bootstrap_self_managed_addons", acctest.CtTrue), + ), + }, + }, + }) +} + +func TestAccEKSCluster_BootstrapSelfManagedAddons_migrate(t *testing.T) { + ctx := acctest.Context(t) + var cluster1, cluster2 types.Cluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_eks_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EKSServiceID), + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.56.1", + }, + }, + Config: testAccClusterConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster1), + resource.TestCheckNoResourceAttr(resourceName, "bootstrap_self_managed_addons"), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Config: testAccClusterConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &cluster2), + testAccCheckClusterNotRecreated(&cluster1, &cluster2), + resource.TestCheckResourceAttr(resourceName, "bootstrap_self_managed_addons", acctest.CtTrue), + ), + }, + }, + }) +} + func TestAccEKSCluster_Encryption_create(t *testing.T) { ctx := acctest.Context(t) var cluster types.Cluster @@ -210,9 +304,10 @@ func TestAccEKSCluster_Encryption_create(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, }, }) @@ -250,9 +345,10 @@ func TestAccEKSCluster_Encryption_update(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, }, }) @@ -284,9 +380,10 @@ func TestAccEKSCluster_Encryption_versionUpdate(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_encryptionVersion(rName, clusterVersionUpgradeUpdated), @@ -324,9 +421,10 @@ func TestAccEKSCluster_version(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_version(rName, clusterVersionUpgradeUpdated), @@ -361,9 +459,10 @@ func TestAccEKSCluster_logging(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_logging(rName, []string{"api", "audit"}), @@ -409,9 +508,10 @@ func TestAccEKSCluster_tags(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), @@ -455,9 +555,10 @@ func TestAccEKSCluster_VPC_securityGroupIDs(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, }, }) @@ -495,9 +596,10 @@ func TestAccEKSCluster_VPC_securityGroupIDsAndSubnetIDs_update(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, }, }) @@ -524,9 +626,10 @@ func TestAccEKSCluster_VPC_endpointPrivateAccess(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_vpcEndpointPrivateAccess(rName, false), @@ -571,9 +674,10 @@ func TestAccEKSCluster_VPC_endpointPublicAccess(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_vpcEndpointPublicAccess(rName, true), @@ -618,9 +722,10 @@ func TestAccEKSCluster_VPC_publicAccessCIDRs(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_vpcPublicAccessCIDRs(rName, `["4.3.2.1/32", "8.7.6.5/32"]`), @@ -675,9 +780,10 @@ func TestAccEKSCluster_Network_serviceIPv4CIDR(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_networkServiceIPv4CIDR(rName, `"192.168.0.0/24"`), @@ -718,9 +824,10 @@ func TestAccEKSCluster_Network_ipFamily(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, { Config: testAccClusterConfig_networkIPFamily(rName, `"ipv6"`), @@ -764,9 +871,10 @@ func TestAccEKSCluster_Outpost_create(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, }, }) @@ -797,9 +905,10 @@ func TestAccEKSCluster_Outpost_placement(t *testing.T) { ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"bootstrap_self_managed_addons"}, }, }, }) @@ -980,6 +1089,22 @@ resource "aws_eks_cluster" "test" { `, rName, authenticationMode)) } +func testAccClusterConfig_bootstrapSelfManagedAddons(rName string, bootstrapSelfManagedAddons bool) string { + return acctest.ConfigCompose(testAccClusterConfig_base(rName), fmt.Sprintf(` +resource "aws_eks_cluster" "test" { + name = %[1]q + role_arn = aws_iam_role.test.arn + bootstrap_self_managed_addons = %[2]t + + vpc_config { + subnet_ids = aws_subnet.test[*].id + } + + depends_on = [aws_iam_role_policy_attachment.test-AmazonEKSClusterPolicy] +} +`, rName, bootstrapSelfManagedAddons)) +} + func testAccClusterConfig_version(rName, version string) string { return acctest.ConfigCompose(testAccClusterConfig_base(rName), fmt.Sprintf(` resource "aws_eks_cluster" "test" { diff --git a/internal/service/eks/exports_test.go b/internal/service/eks/exports_test.go index b5cdf7e2aea..81bdfea5dc9 100644 --- a/internal/service/eks/exports_test.go +++ b/internal/service/eks/exports_test.go @@ -14,6 +14,7 @@ var ( ResourceNodeGroup = resourceNodeGroup ResourcePodIdentityAssociation = newPodIdentityAssociationResource + ClusterStateUpgradeV0 = clusterStateUpgradeV0 FindAccessEntryByTwoPartKey = findAccessEntryByTwoPartKey FindAccessPolicyAssociationByThreePartKey = findAccessPolicyAssociationByThreePartKey FindAddonByTwoPartKey = findAddonByTwoPartKey diff --git a/internal/service/eks/service_endpoint_resolver_gen.go b/internal/service/eks/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ac68cc8cc13 --- /dev/null +++ b/internal/service/eks/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package eks + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ eks_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver eks_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: eks_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params eks_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up eks endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*eks_sdkv2.Options) { + return func(o *eks_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/eks/service_endpoints_gen_test.go b/internal/service/eks/service_endpoints_gen_test.go index 7df8770dbae..df336e50006 100644 --- a/internal/service/eks/service_endpoints_gen_test.go +++ b/internal/service/eks/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := eks_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), eks_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := eks_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), eks_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/eks/service_package_gen.go b/internal/service/eks/service_package_gen.go index afe30d44ab2..bfb87d2f2bf 100644 --- a/internal/service/eks/service_package_gen.go +++ b/internal/service/eks/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package eks @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" eks_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eks" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -135,19 +134,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*eks_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return eks_sdkv2.NewFromConfig(cfg, func(o *eks_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return eks_sdkv2.NewFromConfig(cfg, + eks_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/elasticache/cluster.go b/internal/service/elasticache/cluster.go index 36abbeab6e0..b373ff903a2 100644 --- a/internal/service/elasticache/cluster.go +++ b/internal/service/elasticache/cluster.go @@ -14,15 +14,16 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -75,10 +76,10 @@ func resourceCluster() *schema.Resource { ForceNew: true, }, "az_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(elasticache.AZMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.AZMode](), }, "cache_nodes": { Type: schema.TypeList, @@ -156,10 +157,10 @@ func resourceCluster() *schema.Resource { Optional: true, }, "ip_discovery": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(elasticache.IpDiscovery_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.IpDiscovery](), }, "log_delivery_configuration": { Type: schema.TypeSet, @@ -172,19 +173,19 @@ func resourceCluster() *schema.Resource { Required: true, }, "destination_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.DestinationType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.DestinationType](), }, "log_format": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.LogFormat_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogFormat](), }, "log_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.LogType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogType](), }, }, }, @@ -201,11 +202,11 @@ func resourceCluster() *schema.Resource { ValidateFunc: verify.ValidOnceAWeekWindowFormat, }, "network_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(elasticache.NetworkType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.NetworkType](), }, "node_type": { Type: schema.TypeString, @@ -222,11 +223,11 @@ func resourceCluster() *schema.Resource { Computed: true, }, "outpost_mode": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - RequiredWith: []string{"preferred_outpost_arn"}, - ValidateFunc: validation.StringInSlice(elasticache.OutpostMode_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + RequiredWith: []string{"preferred_outpost_arn"}, + ValidateDiagFunc: enum.Validate[awstypes.OutpostMode](), }, names.AttrParameterGroupName: { Type: schema.TypeString, @@ -347,7 +348,8 @@ func resourceCluster() *schema.Resource { func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) + partition := meta.(*conns.AWSClient).Partition clusterID := d.Get("cluster_id").(string) input := &elasticache.CreateCacheClusterInput{ @@ -358,7 +360,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int if v, ok := d.GetOk("replication_group_id"); ok { input.ReplicationGroupId = aws.String(v.(string)) } else { - input.SecurityGroupIds = flex.ExpandStringSet(d.Get(names.AttrSecurityGroupIDs).(*schema.Set)) + input.SecurityGroupIds = flex.ExpandStringValueSet(d.Get(names.AttrSecurityGroupIDs).(*schema.Set)) } if v, ok := d.GetOk("node_type"); ok { @@ -366,11 +368,11 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("num_cache_nodes"); ok { - input.NumCacheNodes = aws.Int64(int64(v.(int))) + input.NumCacheNodes = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("outpost_mode"); ok { - input.OutpostMode = aws.String(v.(string)) + input.OutpostMode = awstypes.OutpostMode(v.(string)) } if v, ok := d.GetOk("preferred_outpost_arn"); ok { @@ -393,7 +395,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk(names.AttrPort); ok { - input.Port = aws.Int64(int64(v.(int))) + input.Port = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("subnet_group_name"); ok { @@ -406,7 +408,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("snapshot_retention_limit"); ok { - input.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + input.SnapshotRetentionLimit = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("snapshot_window"); ok { @@ -414,11 +416,9 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("log_delivery_configuration"); ok { - input.LogDeliveryConfigurations = []*elasticache.LogDeliveryConfigurationRequest{} - v := v.(*schema.Set).List() - for _, v := range v { - logDeliveryConfigurationRequest := expandLogDeliveryConfigurations(v.(map[string]interface{})) - input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &logDeliveryConfigurationRequest) + input.LogDeliveryConfigurations = []awstypes.LogDeliveryConfigurationRequest{} + for _, v := range v.(*schema.Set).List() { + input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, expandLogDeliveryConfigurationRequests(v.(map[string]interface{}))) } } @@ -430,10 +430,9 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int input.NotificationTopicArn = aws.String(v.(string)) } - snaps := d.Get("snapshot_arns").([]interface{}) - if len(snaps) > 0 { - input.SnapshotArns = flex.ExpandStringList(snaps) - log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", snaps) + if v := d.Get("snapshot_arns").([]interface{}); len(v) > 0 { + input.SnapshotArns = flex.ExpandStringValueList(v) + log.Printf("[DEBUG] Restoring Redis cluster from S3 snapshot: %#v", v) } if v, ok := d.GetOk("snapshot_name"); ok { @@ -445,7 +444,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("az_mode"); ok { - input.AZMode = aws.String(v.(string)) + input.AZMode = awstypes.AZMode(v.(string)) } if v, ok := d.GetOk(names.AttrAvailabilityZone); ok { @@ -453,18 +452,18 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int } if v, ok := d.GetOk("preferred_availability_zones"); ok && len(v.([]interface{})) > 0 { - input.PreferredAvailabilityZones = flex.ExpandStringList(v.([]interface{})) + input.PreferredAvailabilityZones = flex.ExpandStringValueList(v.([]interface{})) } if v, ok := d.GetOk("ip_discovery"); ok { - input.IpDiscovery = aws.String(v.(string)) + input.IpDiscovery = awstypes.IpDiscovery(v.(string)) } if v, ok := d.GetOk("network_type"); ok { - input.NetworkType = aws.String(v.(string)) + input.NetworkType = awstypes.NetworkType(v.(string)) } - id, arn, err := createCacheCluster(ctx, conn, input) + id, arn, err := createCacheCluster(ctx, conn, partition, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache Cache Cluster (%s): %s", clusterID, err) @@ -484,7 +483,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int err := createTags(ctx, conn, arn, tags) // If default tags only, continue. Otherwise, error. - if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(partition, err) { return append(diags, resourceClusterRead(ctx, d, meta)...) } @@ -498,7 +497,7 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) c, err := findCacheClusterWithNodeInfoByID(ctx, conn, d.Id()) @@ -515,7 +514,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("cluster_id", c.CacheClusterId) if err := setFromCacheCluster(d, c); err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Cache Cluster (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } d.Set("log_delivery_configuration", flattenLogDeliveryConfigurations(c.LogDeliveryConfigurations)) @@ -526,7 +525,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter if c.ConfigurationEndpoint != nil { d.Set(names.AttrPort, c.ConfigurationEndpoint.Port) - d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", aws.StringValue(c.ConfigurationEndpoint.Address), aws.Int64Value(c.ConfigurationEndpoint.Port)))) + d.Set("configuration_endpoint", aws.String(fmt.Sprintf("%s:%d", aws.ToString(c.ConfigurationEndpoint.Address), aws.ToInt32(c.ConfigurationEndpoint.Port)))) d.Set("cluster_address", c.ConfigurationEndpoint.Address) } else if len(c.CacheNodes) > 0 { d.Set(names.AttrPort, c.CacheNodes[0].Endpoint.Port) @@ -535,19 +534,19 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("replication_group_id", c.ReplicationGroupId) if c.NotificationConfiguration != nil { - if aws.StringValue(c.NotificationConfiguration.TopicStatus) == "active" { + if aws.ToString(c.NotificationConfiguration.TopicStatus) == "active" { d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn) } } d.Set(names.AttrAvailabilityZone, c.PreferredAvailabilityZone) - if aws.StringValue(c.PreferredAvailabilityZone) == "Multiple" { + if aws.ToString(c.PreferredAvailabilityZone) == "Multiple" { d.Set("az_mode", "cross-az") } else { d.Set("az_mode", "single-az") } if err := setCacheNodeData(d, c); err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Cache Cluster (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } d.Set(names.AttrARN, c.ARN) @@ -562,7 +561,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &elasticache.ModifyCacheClusterInput{ @@ -573,7 +572,7 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int requestUpdate := false if d.HasChange(names.AttrSecurityGroupIDs) { if attr := d.Get(names.AttrSecurityGroupIDs).(*schema.Set); attr.Len() > 0 { - input.SecurityGroupIds = flex.ExpandStringSet(attr) + input.SecurityGroupIds = flex.ExpandStringValueSet(attr) requestUpdate = true } } @@ -584,29 +583,29 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } if d.HasChange("ip_discovery") { - input.IpDiscovery = aws.String(d.Get("ip_discovery").(string)) + input.IpDiscovery = awstypes.IpDiscovery(d.Get("ip_discovery").(string)) requestUpdate = true } if d.HasChange("log_delivery_configuration") { oldLogDeliveryConfig, newLogDeliveryConfig := d.GetChange("log_delivery_configuration") - input.LogDeliveryConfigurations = []*elasticache.LogDeliveryConfigurationRequest{} + input.LogDeliveryConfigurations = []awstypes.LogDeliveryConfigurationRequest{} logTypesToSubmit := make(map[string]bool) currentLogDeliveryConfig := newLogDeliveryConfig.(*schema.Set).List() for _, current := range currentLogDeliveryConfig { - logDeliveryConfigurationRequest := expandLogDeliveryConfigurations(current.(map[string]interface{})) - logTypesToSubmit[*logDeliveryConfigurationRequest.LogType] = true - input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &logDeliveryConfigurationRequest) + logDeliveryConfigurationRequest := expandLogDeliveryConfigurationRequests(current.(map[string]interface{})) + logTypesToSubmit[string(logDeliveryConfigurationRequest.LogType)] = true + input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, logDeliveryConfigurationRequest) } previousLogDeliveryConfig := oldLogDeliveryConfig.(*schema.Set).List() for _, previous := range previousLogDeliveryConfig { - logDeliveryConfigurationRequest := expandEmptyLogDeliveryConfigurations(previous.(map[string]interface{})) + logDeliveryConfigurationRequest := expandEmptyLogDeliveryConfigurationRequest(previous.(map[string]interface{})) // if something was removed, send an empty request - if !logTypesToSubmit[*logDeliveryConfigurationRequest.LogType] { - input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &logDeliveryConfigurationRequest) + if !logTypesToSubmit[string(logDeliveryConfigurationRequest.LogType)] { + input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, logDeliveryConfigurationRequest) } } @@ -652,12 +651,12 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int } if d.HasChange("snapshot_retention_limit") { - input.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + input.SnapshotRetentionLimit = aws.Int32(int32(d.Get("snapshot_retention_limit").(int))) requestUpdate = true } if d.HasChange("az_mode") { - input.AZMode = aws.String(d.Get("az_mode").(string)) + input.AZMode = awstypes.AZMode(d.Get("az_mode").(string)) requestUpdate = true } @@ -683,27 +682,26 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int if len(v.([]interface{})) != n { return sdkdiag.AppendErrorf(diags, "length of preferred_availability_zones (%d) must match num_cache_nodes (%d)", len(v.([]interface{})), n) } - input.NewAvailabilityZones = flex.ExpandStringList(v.([]interface{})[o:]) + input.NewAvailabilityZones = flex.ExpandStringValueList(v.([]interface{})[o:]) } } - input.NumCacheNodes = aws.Int64(int64(d.Get("num_cache_nodes").(int))) + input.NumCacheNodes = aws.Int32(int32(d.Get("num_cache_nodes").(int))) requestUpdate = true } if requestUpdate { - log.Printf("[DEBUG] Modifying ElastiCache Cluster (%s), opts:\n%s", d.Id(), input) - _, err := conn.ModifyCacheClusterWithContext(ctx, input) + _, err := conn.ModifyCacheCluster(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ElastiCache cluster (%s), error: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating ElastiCache Cache Cluster (%s): %s", d.Id(), err) } const ( timeout = 80 * time.Minute ) - _, err = waitCacheClusterAvailable(ctx, conn, d.Id(), timeout) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Cache Cluster (%s) to update: %s", d.Id(), err) + if _, err := waitCacheClusterAvailable(ctx, conn, d.Id(), timeout); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Cache Cluster (%s) update: %s", d.Id(), err) } } } @@ -713,35 +711,39 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int func resourceClusterDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) var finalSnapshotID = d.Get(names.AttrFinalSnapshotIdentifier).(string) - err := DeleteCacheCluster(ctx, conn, d.Id(), finalSnapshotID) + err := deleteCacheCluster(ctx, conn, d.Id(), finalSnapshotID) + + if errs.IsA[*awstypes.CacheClusterNotFoundFault](err) { + return diags + } + if err != nil { - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheClusterNotFoundFault) { - return diags - } return sdkdiag.AppendErrorf(diags, "deleting ElastiCache Cache Cluster (%s): %s", d.Id(), err) } + const ( timeout = 40 * time.Minute ) _, err = waitCacheClusterDeleted(ctx, conn, d.Id(), timeout) + if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Cache Cluster (%s) to be deleted: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Cache Cluster (%s) delete: %s", d.Id(), err) } return diags } -func createCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.CreateCacheClusterInput) (string, string, error) { - output, err := conn.CreateCacheClusterWithContext(ctx, input) +func createCacheCluster(ctx context.Context, conn *elasticache.Client, partition string, input *elasticache.CreateCacheClusterInput) (string, string, error) { + output, err := conn.CreateCacheCluster(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateCacheClusterWithContext(ctx, input) + output, err = conn.CreateCacheCluster(ctx, input) } if err != nil { @@ -751,13 +753,14 @@ func createCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, inpu if output == nil || output.CacheCluster == nil { return "", "", errors.New("missing cluster ID after creation") } + // ElastiCache always retains the id in lower case, so we have to // mimic that or else we won't be able to refresh a resource whose // name contained uppercase characters. - return strings.ToLower(aws.StringValue(output.CacheCluster.CacheClusterId)), aws.StringValue(output.CacheCluster.ARN), nil + return strings.ToLower(aws.ToString(output.CacheCluster.CacheClusterId)), aws.ToString(output.CacheCluster.ARN), nil } -func DeleteCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string, finalSnapshotID string) error { +func deleteCacheCluster(ctx context.Context, conn *elasticache.Client, cacheClusterID string, finalSnapshotID string) error { input := &elasticache.DeleteCacheClusterInput{ CacheClusterId: aws.String(cacheClusterID), } @@ -765,18 +768,18 @@ func DeleteCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, cach input.FinalSnapshotIdentifier = aws.String(finalSnapshotID) } - log.Printf("[DEBUG] Deleting ElastiCache Cache Cluster: %s", input) + log.Printf("[DEBUG] Deleting ElastiCache Cache Cluster: %s", cacheClusterID) err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { - _, err := conn.DeleteCacheClusterWithContext(ctx, input) + _, err := conn.DeleteCacheCluster(ctx, input) if err != nil { - if tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidCacheClusterStateFault, "serving as primary") { + if errs.IsAErrorMessageContains[*awstypes.InvalidCacheClusterStateFault](err, "serving as primary") { return retry.NonRetryableError(err) } - if tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidCacheClusterStateFault, "only member of a replication group") { + if errs.IsAErrorMessageContains[*awstypes.InvalidCacheClusterStateFault](err, "only member of a replication group") { return retry.NonRetryableError(err) } // The cluster may be just snapshotting, so we retry until it's ready for deletion - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeInvalidCacheClusterStateFault) { + if errs.IsA[*awstypes.InvalidCacheClusterStateFault](err) { return retry.RetryableError(err) } return retry.NonRetryableError(err) @@ -784,70 +787,69 @@ func DeleteCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, cach return nil }) if tfresource.TimedOut(err) { - _, err = conn.DeleteCacheClusterWithContext(ctx, input) + _, err = conn.DeleteCacheCluster(ctx, input) } return err } -func findCacheClusterByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.CacheCluster, error) { +func findCacheClusterByID(ctx context.Context, conn *elasticache.Client, id string) (*awstypes.CacheCluster, error) { input := &elasticache.DescribeCacheClustersInput{ CacheClusterId: aws.String(id), } - return findCacheCluster(ctx, conn, input, tfslices.PredicateTrue[*elasticache.CacheCluster]()) + return findCacheCluster(ctx, conn, input, tfslices.PredicateTrue[*awstypes.CacheCluster]()) } -func findCacheClusterWithNodeInfoByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.CacheCluster, error) { + +func findCacheClusterWithNodeInfoByID(ctx context.Context, conn *elasticache.Client, id string) (*awstypes.CacheCluster, error) { input := &elasticache.DescribeCacheClustersInput{ CacheClusterId: aws.String(id), ShowCacheNodeInfo: aws.Bool(true), } - return findCacheCluster(ctx, conn, input, tfslices.PredicateTrue[*elasticache.CacheCluster]()) + return findCacheCluster(ctx, conn, input, tfslices.PredicateTrue[*awstypes.CacheCluster]()) } -func findCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheClustersInput, filter tfslices.Predicate[*elasticache.CacheCluster]) (*elasticache.CacheCluster, error) { +func findCacheCluster(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeCacheClustersInput, filter tfslices.Predicate[*awstypes.CacheCluster]) (*awstypes.CacheCluster, error) { output, err := findCacheClusters(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findCacheClusters(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheClustersInput, filter tfslices.Predicate[*elasticache.CacheCluster]) ([]*elasticache.CacheCluster, error) { - var output []*elasticache.CacheCluster +func findCacheClusters(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeCacheClustersInput, filter tfslices.Predicate[*awstypes.CacheCluster]) ([]awstypes.CacheCluster, error) { + var output []awstypes.CacheCluster - err := conn.DescribeCacheClustersPagesWithContext(ctx, input, func(page *elasticache.DescribeCacheClustersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticache.NewDescribeCacheClustersPaginator(conn, input) - for _, v := range page.CacheClusters { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.CacheClusterNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheClusterNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.CacheClusters { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string) retry.StateRefreshFunc { +func statusCacheCluster(ctx context.Context, conn *elasticache.Client, cacheClusterID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findCacheClusterByID(ctx, conn, cacheClusterID) @@ -859,7 +861,7 @@ func statusCacheCluster(ctx context.Context, conn *elasticache.ElastiCache, cach return nil, "", err } - return output, aws.StringValue(output.CacheClusterStatus), nil + return output, aws.ToString(output.CacheClusterStatus), nil } } @@ -875,7 +877,7 @@ const ( cacheClusterStatusSnapshotting = "snapshotting" ) -func waitCacheClusterAvailable(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) (*elasticache.CacheCluster, error) { //nolint:unparam +func waitCacheClusterAvailable(ctx context.Context, conn *elasticache.Client, cacheClusterID string, timeout time.Duration) (*awstypes.CacheCluster, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{ cacheClusterStatusCreating, @@ -892,14 +894,14 @@ func waitCacheClusterAvailable(ctx context.Context, conn *elasticache.ElastiCach outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.CacheCluster); ok { + if output, ok := outputRaw.(*awstypes.CacheCluster); ok { return output, err } return nil, err } -func waitCacheClusterDeleted(ctx context.Context, conn *elasticache.ElastiCache, cacheClusterID string, timeout time.Duration) (*elasticache.CacheCluster, error) { +func waitCacheClusterDeleted(ctx context.Context, conn *elasticache.Client, cacheClusterID string, timeout time.Duration) (*awstypes.CacheCluster, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ cacheClusterStatusCreating, @@ -919,25 +921,25 @@ func waitCacheClusterDeleted(ctx context.Context, conn *elasticache.ElastiCache, outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.CacheCluster); ok { + if output, ok := outputRaw.(*awstypes.CacheCluster); ok { return output, err } return nil, err } -func getCacheNodesToRemove(oldNumberOfNodes int, cacheNodesToRemove int) []*string { - nodesIdsToRemove := []*string{} +func getCacheNodesToRemove(oldNumberOfNodes int, cacheNodesToRemove int) []string { + nodesIdsToRemove := []string{} for i := oldNumberOfNodes; i > oldNumberOfNodes-cacheNodesToRemove && i > 0; i-- { s := fmt.Sprintf("%04d", i) - nodesIdsToRemove = append(nodesIdsToRemove, &s) + nodesIdsToRemove = append(nodesIdsToRemove, s) } return nodesIdsToRemove } -func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error { - sortedCacheNodes := make([]*elasticache.CacheNode, len(c.CacheNodes)) +func setCacheNodeData(d *schema.ResourceData, c *awstypes.CacheCluster) error { + sortedCacheNodes := make([]awstypes.CacheNode, len(c.CacheNodes)) copy(sortedCacheNodes, c.CacheNodes) sort.Sort(byCacheNodeId(sortedCacheNodes)) @@ -945,41 +947,41 @@ func setCacheNodeData(d *schema.ResourceData, c *elasticache.CacheCluster) error for _, node := range sortedCacheNodes { if node.CacheNodeId == nil || node.Endpoint == nil || node.Endpoint.Address == nil || node.Endpoint.Port == nil || node.CustomerAvailabilityZone == nil { - return fmt.Errorf("Unexpected nil pointer in: %s", node) + return fmt.Errorf("Unexpected nil pointer in: %+v", node) } cacheNodeData = append(cacheNodeData, map[string]interface{}{ - names.AttrID: aws.StringValue(node.CacheNodeId), - names.AttrAddress: aws.StringValue(node.Endpoint.Address), - names.AttrPort: aws.Int64Value(node.Endpoint.Port), - names.AttrAvailabilityZone: aws.StringValue(node.CustomerAvailabilityZone), - "outpost_arn": aws.StringValue(node.CustomerOutpostArn), + names.AttrID: aws.ToString(node.CacheNodeId), + names.AttrAddress: aws.ToString(node.Endpoint.Address), + names.AttrPort: aws.ToInt32(node.Endpoint.Port), + names.AttrAvailabilityZone: aws.ToString(node.CustomerAvailabilityZone), + "outpost_arn": aws.ToString(node.CustomerOutpostArn), }) } return d.Set("cache_nodes", cacheNodeData) } -type byCacheNodeId []*elasticache.CacheNode +type byCacheNodeId []awstypes.CacheNode func (b byCacheNodeId) Len() int { return len(b) } func (b byCacheNodeId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } func (b byCacheNodeId) Less(i, j int) bool { return b[i].CacheNodeId != nil && b[j].CacheNodeId != nil && - aws.StringValue(b[i].CacheNodeId) < aws.StringValue(b[j].CacheNodeId) + aws.ToString(b[i].CacheNodeId) < aws.ToString(b[j].CacheNodeId) } -func setFromCacheCluster(d *schema.ResourceData, c *elasticache.CacheCluster) error { +func setFromCacheCluster(d *schema.ResourceData, c *awstypes.CacheCluster) error { d.Set("node_type", c.CacheNodeType) d.Set(names.AttrEngine, c.Engine) - if aws.StringValue(c.Engine) == engineRedis { + if aws.ToString(c.Engine) == engineRedis { if err := setEngineVersionRedis(d, c.EngineVersion); err != nil { return err // nosemgrep:ci.bare-error-returns } } else { setEngineVersionMemcached(d, c.EngineVersion) } - d.Set(names.AttrAutoMinorVersionUpgrade, strconv.FormatBool(aws.BoolValue(c.AutoMinorVersionUpgrade))) + d.Set(names.AttrAutoMinorVersionUpgrade, strconv.FormatBool(aws.ToBool(c.AutoMinorVersionUpgrade))) d.Set("subnet_group_name", c.CacheSubnetGroupName) if err := d.Set(names.AttrSecurityGroupIDs, flattenSecurityGroupIDs(c.SecurityGroups)); err != nil { diff --git a/internal/service/elasticache/cluster_data_source.go b/internal/service/elasticache/cluster_data_source.go index b95f8540ce6..ba306d66535 100644 --- a/internal/service/elasticache/cluster_data_source.go +++ b/internal/service/elasticache/cluster_data_source.go @@ -9,7 +9,7 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -174,8 +174,9 @@ func dataSourceCluster() *schema.Resource { func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + partition := meta.(*conns.AWSClient).Partition clusterID := d.Get("cluster_id").(string) cluster, err := findCacheClusterWithNodeInfoByID(ctx, conn, clusterID) @@ -184,11 +185,11 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache Cluster", err)) } - d.SetId(aws.StringValue(cluster.CacheClusterId)) + d.SetId(aws.ToString(cluster.CacheClusterId)) d.Set(names.AttrARN, cluster.ARN) d.Set(names.AttrAvailabilityZone, cluster.PreferredAvailabilityZone) if cluster.ConfigurationEndpoint != nil { - clusterAddress, port := aws.StringValue(cluster.ConfigurationEndpoint.Address), aws.Int64Value(cluster.ConfigurationEndpoint.Port) + clusterAddress, port := aws.ToString(cluster.ConfigurationEndpoint.Address), aws.ToInt32(cluster.ConfigurationEndpoint.Port) d.Set("cluster_address", clusterAddress) d.Set("configuration_endpoint", fmt.Sprintf("%s:%d", clusterAddress, port)) d.Set(names.AttrPort, port) @@ -202,7 +203,7 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("network_type", cluster.NetworkType) d.Set("node_type", cluster.CacheNodeType) if cluster.NotificationConfiguration != nil { - if aws.StringValue(cluster.NotificationConfiguration.TopicStatus) == "active" { + if aws.ToString(cluster.NotificationConfiguration.TopicStatus) == "active" { d.Set("notification_topic_arn", cluster.NotificationConfiguration.TopicArn) } } @@ -218,12 +219,12 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("subnet_group_name", cluster.CacheSubnetGroupName) if err := setCacheNodeData(d, cluster); err != nil { - return sdkdiag.AppendErrorf(diags, "setting cache_nodes: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - tags, err := listTags(ctx, conn, aws.StringValue(cluster.ARN)) + tags, err := listTags(ctx, conn, aws.ToString(cluster.ARN)) - if err != nil && !errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if err != nil && !errs.IsUnsupportedOperationInPartitionError(partition, err) { return sdkdiag.AppendErrorf(diags, "listing tags for ElastiCache Cluster (%s): %s", d.Id(), err) } diff --git a/internal/service/elasticache/cluster_data_source_test.go b/internal/service/elasticache/cluster_data_source_test.go index eaaf6552172..5fd2e13eae9 100644 --- a/internal/service/elasticache/cluster_data_source_test.go +++ b/internal/service/elasticache/cluster_data_source_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -63,7 +63,7 @@ func TestAccElastiCacheClusterDataSource_Engine_Redis_LogDeliveryConfigurations( ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { - Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText), + Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, awstypes.DestinationTypeKinesisFirehose, awstypes.LogFormatJson, true, awstypes.DestinationTypeCloudWatchLogs, awstypes.LogFormatText), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, names.AttrEngine, "redis"), resource.TestCheckResourceAttr(dataSourceName, "log_delivery_configuration.#", acctest.Ct2), diff --git a/internal/service/elasticache/cluster_test.go b/internal/service/elasticache/cluster_test.go index 330750ca516..adc687e6881 100644 --- a/internal/service/elasticache/cluster_test.go +++ b/internal/service/elasticache/cluster_test.go @@ -12,8 +12,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -40,7 +40,7 @@ func TestAccElastiCacheCluster_Engine_memcached(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -79,7 +79,7 @@ func TestAccElastiCacheCluster_Engine_redis(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -123,7 +123,7 @@ func TestAccElastiCacheCluster_disappears(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -151,7 +151,7 @@ func TestAccElastiCacheCluster_Engine_redis_v5(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -209,7 +209,7 @@ func TestAccElastiCacheCluster_PortRedis_default(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ @@ -236,7 +236,7 @@ func TestAccElastiCacheCluster_ParameterGroupName_default(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -274,7 +274,7 @@ func TestAccElastiCacheCluster_ipDiscovery(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -310,7 +310,7 @@ func TestAccElastiCacheCluster_port(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster port := 11212 rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -350,7 +350,7 @@ func TestAccElastiCacheCluster_snapshotsWithUpdates(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ @@ -385,7 +385,7 @@ func TestAccElastiCacheCluster_NumCacheNodes_decrease(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -419,7 +419,7 @@ func TestAccElastiCacheCluster_NumCacheNodes_increase(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -453,7 +453,7 @@ func TestAccElastiCacheCluster_NumCacheNodes_increaseWithPreferredAvailabilityZo t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -489,8 +489,8 @@ func TestAccElastiCacheCluster_vpc(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var csg elasticache.CacheSubnetGroup - var ec elasticache.CacheCluster + var csg awstypes.CacheSubnetGroup + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ @@ -517,8 +517,8 @@ func TestAccElastiCacheCluster_multiAZInVPC(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var csg elasticache.CacheSubnetGroup - var ec elasticache.CacheCluster + var csg awstypes.CacheSubnetGroup + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ @@ -545,7 +545,7 @@ func TestAccElastiCacheCluster_AZMode_memcached(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster + var cluster awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -584,7 +584,7 @@ func TestAccElastiCacheCluster_AZMode_redis(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster + var cluster awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -619,7 +619,7 @@ func TestAccElastiCacheCluster_EngineVersion_memcached(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var pre, mid, post elasticache.CacheCluster + var pre, mid, post awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -665,7 +665,7 @@ func TestAccElastiCacheCluster_EngineVersion_redis(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v1, v2, v3, v4, v5, v6 elasticache.CacheCluster + var v1, v2, v3, v4, v5, v6 awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -738,7 +738,7 @@ func TestAccElastiCacheCluster_NodeTypeResize_memcached(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var pre, post elasticache.CacheCluster + var pre, post awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -773,7 +773,7 @@ func TestAccElastiCacheCluster_NodeTypeResize_redis(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var pre, post elasticache.CacheCluster + var pre, post awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -826,8 +826,8 @@ func TestAccElastiCacheCluster_ReplicationGroupID_availabilityZone(t *testing.T) t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster - var replicationGroup elasticache.ReplicationGroup + var cluster awstypes.CacheCluster + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) clusterResourceName := "aws_elasticache_cluster.test" replicationGroupResourceName := "aws_elasticache_replication_group.test" @@ -856,8 +856,8 @@ func TestAccElastiCacheCluster_ReplicationGroupID_transitEncryption(t *testing.T t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster - var replicationGroup elasticache.ReplicationGroup + var cluster awstypes.CacheCluster + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) clusterResourceName := "aws_elasticache_cluster.test" replicationGroupResourceName := "aws_elasticache_replication_group.test" @@ -887,8 +887,8 @@ func TestAccElastiCacheCluster_ReplicationGroupID_singleReplica(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster - var replicationGroup elasticache.ReplicationGroup + var cluster awstypes.CacheCluster + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) clusterResourceName := "aws_elasticache_cluster.test.0" replicationGroupResourceName := "aws_elasticache_replication_group.test" @@ -920,8 +920,8 @@ func TestAccElastiCacheCluster_ReplicationGroupID_multipleReplica(t *testing.T) t.Skip("skipping long-running test in short mode") } - var cluster1, cluster2 elasticache.CacheCluster - var replicationGroup elasticache.ReplicationGroup + var cluster1, cluster2 awstypes.CacheCluster + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) clusterResourceName1 := "aws_elasticache_cluster.test.0" clusterResourceName2 := "aws_elasticache_cluster.test.1" @@ -979,7 +979,7 @@ func TestAccElastiCacheCluster_Redis_finalSnapshot(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster + var cluster awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1006,7 +1006,7 @@ func TestAccElastiCacheCluster_Redis_autoMinorVersionUpgrade(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster + var cluster awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1048,7 +1048,7 @@ func TestAccElastiCacheCluster_Engine_Redis_LogDeliveryConfigurations(t *testing t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1059,7 +1059,7 @@ func TestAccElastiCacheCluster_Engine_Redis_LogDeliveryConfigurations(t *testing CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText), + Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, awstypes.DestinationTypeCloudWatchLogs, awstypes.LogFormatText, true, awstypes.DestinationTypeCloudWatchLogs, awstypes.LogFormatText), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &ec), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -1080,7 +1080,7 @@ func TestAccElastiCacheCluster_Engine_Redis_LogDeliveryConfigurations(t *testing ImportStateVerifyIgnore: []string{names.AttrApplyImmediately}, }, { - Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson), + Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, awstypes.DestinationTypeKinesisFirehose, awstypes.LogFormatJson, true, awstypes.DestinationTypeKinesisFirehose, awstypes.LogFormatJson), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &ec), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -1101,7 +1101,7 @@ func TestAccElastiCacheCluster_Engine_Redis_LogDeliveryConfigurations(t *testing ImportStateVerifyIgnore: []string{names.AttrApplyImmediately}, }, { - Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson), + Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, awstypes.DestinationTypeCloudWatchLogs, awstypes.LogFormatText, true, awstypes.DestinationTypeKinesisFirehose, awstypes.LogFormatJson), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &ec), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -1116,7 +1116,7 @@ func TestAccElastiCacheCluster_Engine_Redis_LogDeliveryConfigurations(t *testing ), }, { - Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText), + Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, awstypes.DestinationTypeKinesisFirehose, awstypes.LogFormatJson, true, awstypes.DestinationTypeCloudWatchLogs, awstypes.LogFormatText), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &ec), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -1146,7 +1146,7 @@ func TestAccElastiCacheCluster_Engine_Redis_LogDeliveryConfigurations(t *testing ), }, { - Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson, false, "", ""), + Config: testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, awstypes.DestinationTypeKinesisFirehose, awstypes.LogFormatJson, false, "", ""), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &ec), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -1176,7 +1176,7 @@ func TestAccElastiCacheCluster_tags(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster + var cluster awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1234,7 +1234,7 @@ func TestAccElastiCacheCluster_tagWithOtherModification(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster + var cluster awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1275,7 +1275,7 @@ func TestAccElastiCacheCluster_TransitEncryption(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } - var cluster elasticache.CacheCluster + var cluster awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1312,7 +1312,7 @@ func TestAccElastiCacheCluster_outpost_memcached(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1352,7 +1352,7 @@ func TestAccElastiCacheCluster_outpost_redis(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var ec elasticache.CacheCluster + var ec awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1392,7 +1392,7 @@ func TestAccElastiCacheCluster_outpostID_memcached(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var pre, post elasticache.CacheCluster + var pre, post awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1425,7 +1425,7 @@ func TestAccElastiCacheCluster_outpostID_redis(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var pre, post elasticache.CacheCluster + var pre, post awstypes.CacheCluster rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_cluster.test" @@ -1452,7 +1452,7 @@ func TestAccElastiCacheCluster_outpostID_redis(t *testing.T) { }) } -func testAccCheckClusterAttributes(v *elasticache.CacheCluster) resource.TestCheckFunc { +func testAccCheckClusterAttributes(v *awstypes.CacheCluster) resource.TestCheckFunc { return func(s *terraform.State) error { if v.NotificationConfiguration == nil { return fmt.Errorf("Expected NotificationConfiguration for ElastiCache Cluster (%s)", *v.CacheClusterId) @@ -1466,13 +1466,13 @@ func testAccCheckClusterAttributes(v *elasticache.CacheCluster) resource.TestChe } } -func testAccCheckClusterReplicationGroupIDAttribute(cluster *elasticache.CacheCluster, replicationGroup *elasticache.ReplicationGroup) resource.TestCheckFunc { +func testAccCheckClusterReplicationGroupIDAttribute(cluster *awstypes.CacheCluster, replicationGroup *awstypes.ReplicationGroup) resource.TestCheckFunc { return func(s *terraform.State) error { if cluster.ReplicationGroupId == nil { return errors.New("expected cluster ReplicationGroupId to be set") } - if aws.StringValue(cluster.ReplicationGroupId) != aws.StringValue(replicationGroup.ReplicationGroupId) { + if aws.ToString(cluster.ReplicationGroupId) != aws.ToString(replicationGroup.ReplicationGroupId) { return errors.New("expected cluster ReplicationGroupId to equal replication group ID") } @@ -1480,9 +1480,9 @@ func testAccCheckClusterReplicationGroupIDAttribute(cluster *elasticache.CacheCl } } -func testAccCheckClusterNotRecreated(i, j *elasticache.CacheCluster) resource.TestCheckFunc { +func testAccCheckClusterNotRecreated(i, j *awstypes.CacheCluster) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.TimeValue(i.CacheClusterCreateTime).Equal(aws.TimeValue(j.CacheClusterCreateTime)) { + if !aws.ToTime(i.CacheClusterCreateTime).Equal(aws.ToTime(j.CacheClusterCreateTime)) { return errors.New("ElastiCache Cluster was recreated") } @@ -1490,9 +1490,9 @@ func testAccCheckClusterNotRecreated(i, j *elasticache.CacheCluster) resource.Te } } -func testAccCheckClusterRecreated(i, j *elasticache.CacheCluster) resource.TestCheckFunc { +func testAccCheckClusterRecreated(i, j *awstypes.CacheCluster) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.TimeValue(i.CacheClusterCreateTime).Equal(aws.TimeValue(j.CacheClusterCreateTime)) { + if aws.ToTime(i.CacheClusterCreateTime).Equal(aws.ToTime(j.CacheClusterCreateTime)) { return errors.New("ElastiCache Cluster was not recreated") } @@ -1502,7 +1502,7 @@ func testAccCheckClusterRecreated(i, j *elasticache.CacheCluster) resource.TestC func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elasticache_cluster" { @@ -1525,7 +1525,7 @@ func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckClusterExists(ctx context.Context, n string, v *elasticache.CacheCluster) resource.TestCheckFunc { +func testAccCheckClusterExists(ctx context.Context, n string, v *awstypes.CacheCluster) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -1536,7 +1536,7 @@ func testAccCheckClusterExists(ctx context.Context, n string, v *elasticache.Cac return fmt.Errorf("No ElastiCache Cluster ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) output, err := tfelasticache.FindCacheClusterByID(ctx, conn, rs.Primary.ID) @@ -2088,7 +2088,7 @@ resource "aws_elasticache_cluster" "test" { `, rName, enable) } -func testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName string, slowLogDeliveryEnabled bool, slowDeliveryDestination string, slowDeliveryFormat string, engineLogDeliveryEnabled bool, engineDeliveryDestination string, engineLogDeliveryFormat string) string { +func testAccClusterConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName string, slowLogDeliveryEnabled bool, slowDeliveryDestination awstypes.DestinationType, slowDeliveryFormat awstypes.LogFormat, engineLogDeliveryEnabled bool, engineDeliveryDestination awstypes.DestinationType, engineLogDeliveryFormat awstypes.LogFormat) string { return fmt.Sprintf(` data "aws_iam_policy_document" "p" { statement { diff --git a/internal/service/elasticache/service.go b/internal/service/elasticache/consts.go similarity index 100% rename from internal/service/elasticache/service.go rename to internal/service/elasticache/consts.go diff --git a/internal/service/elasticache/diff.go b/internal/service/elasticache/diff.go index 3a629dc6a25..5cc20eaa812 100644 --- a/internal/service/elasticache/diff.go +++ b/internal/service/elasticache/diff.go @@ -7,20 +7,19 @@ import ( "context" "errors" - "github.com/aws/aws-sdk-go/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/names" ) // customizeDiffValidateClusterAZMode validates that `num_cache_nodes` is greater than 1 when `az_mode` is "cross-az" func customizeDiffValidateClusterAZMode(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - if v, ok := diff.GetOk("az_mode"); !ok || v.(string) != elasticache.AZModeCrossAz { + if v, ok := diff.GetOk("az_mode"); !ok || awstypes.AZMode(v.(string)) != awstypes.AZModeCrossAz { return nil } if v, ok := diff.GetOk("num_cache_nodes"); !ok || v.(int) != 1 { return nil } - return errors.New(`az_mode "cross-az" is not supported with num_cache_nodes = 1`) } @@ -29,7 +28,6 @@ func customizeDiffValidateClusterNumCacheNodes(_ context.Context, diff *schema.R if v, ok := diff.GetOk(names.AttrEngine); !ok || v.(string) == engineMemcached { return nil } - if v, ok := diff.GetOk("num_cache_nodes"); !ok || v.(int) == 1 { return nil } diff --git a/internal/service/elasticache/engine_version.go b/internal/service/elasticache/engine_version.go index 71b0bd817b0..448069ed2f4 100644 --- a/internal/service/elasticache/engine_version.go +++ b/internal/service/elasticache/engine_version.go @@ -11,7 +11,7 @@ import ( "regexp" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" gversion "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/names" @@ -167,7 +167,7 @@ func setEngineVersionMemcached(d *schema.ResourceData, version *string) { } func setEngineVersionRedis(d *schema.ResourceData, version *string) error { - engineVersion, err := gversion.NewVersion(aws.StringValue(version)) + engineVersion, err := gversion.NewVersion(aws.ToString(version)) if err != nil { return fmt.Errorf("reading engine version: %w", err) } @@ -187,11 +187,11 @@ func setEngineVersionRedis(d *schema.ResourceData, version *string) error { return nil } -type VersionDiff [3]int +type versionDiff [3]int // diffVersion returns a diff of the versions, component by component. // Only reports the first diff, since subsequent segments are unimportant for us. -func diffVersion(n, o *gversion.Version) (result VersionDiff) { +func diffVersion(n, o *gversion.Version) (result versionDiff) { if n.String() == o.String() { return } diff --git a/internal/service/elasticache/exports_test.go b/internal/service/elasticache/exports_test.go index f9fd80061e5..c4d719c6f15 100644 --- a/internal/service/elasticache/exports_test.go +++ b/internal/service/elasticache/exports_test.go @@ -30,14 +30,21 @@ var ( WaitCacheClusterDeleted = waitCacheClusterDeleted WaitReplicationGroupAvailable = waitReplicationGroupAvailable + DeleteCacheCluster = deleteCacheCluster DiffVersion = diffVersion + EmptyDescription = emptyDescription EngineMemcached = engineMemcached EngineRedis = engineRedis EngineVersionForceNewOnDowngrade = engineVersionForceNewOnDowngrade EngineVersionIsDowngrade = engineVersionIsDowngrade + GlobalReplicationGroupRegionPrefixFormat = globalReplicationGroupRegionPrefixFormat NormalizeEngineVersion = normalizeEngineVersion ParamGroupNameRequiresMajorVersionUpgrade = paramGroupNameRequiresMajorVersionUpgrade ValidateClusterEngineVersion = validateClusterEngineVersion ValidMemcachedVersionString = validMemcachedVersionString ValidRedisVersionString = validRedisVersionString ) + +type ( + VersionDiff = versionDiff +) diff --git a/internal/service/elasticache/flex.go b/internal/service/elasticache/flex.go index 2fb70a3936d..d7d241ff5a2 100644 --- a/internal/service/elasticache/flex.go +++ b/internal/service/elasticache/flex.go @@ -4,74 +4,72 @@ package elasticache import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/names" ) -func flattenSecurityGroupIDs(securityGroups []*elasticache.SecurityGroupMembership) []string { - result := make([]string, 0, len(securityGroups)) - for _, sg := range securityGroups { - if sg.SecurityGroupId != nil { - result = append(result, *sg.SecurityGroupId) - } - } - return result +func flattenSecurityGroupIDs(apiObjects []awstypes.SecurityGroupMembership) []string { + return tfslices.ApplyToAll(apiObjects, func(v awstypes.SecurityGroupMembership) string { + return aws.ToString(v.SecurityGroupId) + }) } -func flattenLogDeliveryConfigurations(logDeliveryConfiguration []*elasticache.LogDeliveryConfiguration) []map[string]interface{} { - if len(logDeliveryConfiguration) == 0 { +func flattenLogDeliveryConfigurations(apiObjects []awstypes.LogDeliveryConfiguration) []interface{} { + if len(apiObjects) == 0 { return nil } - var logDeliveryConfigurations []map[string]interface{} - for _, v := range logDeliveryConfiguration { - logDeliveryConfig := make(map[string]interface{}) + var tfList []interface{} + + for _, apiObject := range apiObjects { + tfMap := make(map[string]interface{}) - switch aws.StringValue(v.DestinationType) { - case elasticache.DestinationTypeKinesisFirehose: - logDeliveryConfig[names.AttrDestination] = aws.StringValue(v.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) - case elasticache.DestinationTypeCloudwatchLogs: - logDeliveryConfig[names.AttrDestination] = aws.StringValue(v.DestinationDetails.CloudWatchLogsDetails.LogGroup) + switch apiObject.DestinationType { + case awstypes.DestinationTypeKinesisFirehose: + tfMap[names.AttrDestination] = aws.ToString(apiObject.DestinationDetails.KinesisFirehoseDetails.DeliveryStream) + case awstypes.DestinationTypeCloudWatchLogs: + tfMap[names.AttrDestination] = aws.ToString(apiObject.DestinationDetails.CloudWatchLogsDetails.LogGroup) } + tfMap["destination_type"] = apiObject.DestinationType + tfMap["log_format"] = apiObject.LogFormat + tfMap["log_type"] = apiObject.LogType - logDeliveryConfig["destination_type"] = aws.StringValue(v.DestinationType) - logDeliveryConfig["log_format"] = aws.StringValue(v.LogFormat) - logDeliveryConfig["log_type"] = aws.StringValue(v.LogType) - logDeliveryConfigurations = append(logDeliveryConfigurations, logDeliveryConfig) + tfList = append(tfList, tfMap) } - return logDeliveryConfigurations + return tfList } -func expandEmptyLogDeliveryConfigurations(v map[string]interface{}) elasticache.LogDeliveryConfigurationRequest { - logDeliveryConfigurationRequest := elasticache.LogDeliveryConfigurationRequest{} - logDeliveryConfigurationRequest.SetEnabled(false) - logDeliveryConfigurationRequest.SetLogType(v["log_type"].(string)) +func expandEmptyLogDeliveryConfigurationRequest(tfMap map[string]interface{}) awstypes.LogDeliveryConfigurationRequest { + apiObject := awstypes.LogDeliveryConfigurationRequest{} - return logDeliveryConfigurationRequest -} + apiObject.Enabled = aws.Bool(false) + apiObject.LogType = awstypes.LogType(tfMap["log_type"].(string)) -func expandLogDeliveryConfigurations(v map[string]interface{}) elasticache.LogDeliveryConfigurationRequest { - logDeliveryConfigurationRequest := elasticache.LogDeliveryConfigurationRequest{} + return apiObject +} - logDeliveryConfigurationRequest.LogType = aws.String(v["log_type"].(string)) - logDeliveryConfigurationRequest.DestinationType = aws.String(v["destination_type"].(string)) - logDeliveryConfigurationRequest.LogFormat = aws.String(v["log_format"].(string)) - destinationDetails := elasticache.DestinationDetails{} +func expandLogDeliveryConfigurationRequests(v map[string]interface{}) awstypes.LogDeliveryConfigurationRequest { + apiObject := awstypes.LogDeliveryConfigurationRequest{} - switch v["destination_type"].(string) { - case elasticache.DestinationTypeCloudwatchLogs: - destinationDetails.CloudWatchLogsDetails = &elasticache.CloudWatchLogsDestinationDetails{ + destinationType := awstypes.DestinationType(v["destination_type"].(string)) + apiObject.DestinationType = destinationType + destinationDetails := &awstypes.DestinationDetails{} + switch destinationType { + case awstypes.DestinationTypeCloudWatchLogs: + destinationDetails.CloudWatchLogsDetails = &awstypes.CloudWatchLogsDestinationDetails{ LogGroup: aws.String(v[names.AttrDestination].(string)), } - case elasticache.DestinationTypeKinesisFirehose: - destinationDetails.KinesisFirehoseDetails = &elasticache.KinesisFirehoseDestinationDetails{ + case awstypes.DestinationTypeKinesisFirehose: + destinationDetails.KinesisFirehoseDetails = &awstypes.KinesisFirehoseDestinationDetails{ DeliveryStream: aws.String(v[names.AttrDestination].(string)), } } + apiObject.DestinationDetails = destinationDetails + apiObject.LogType = awstypes.LogType(v["log_type"].(string)) + apiObject.LogFormat = awstypes.LogFormat(v["log_format"].(string)) - logDeliveryConfigurationRequest.DestinationDetails = &destinationDetails - - return logDeliveryConfigurationRequest + return apiObject } diff --git a/internal/service/elasticache/generate.go b/internal/service/elasticache/generate.go index 0436db54324..9ac233d5186 100644 --- a/internal/service/elasticache/generate.go +++ b/internal/service/elasticache/generate.go @@ -1,8 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsInIDElem=ResourceName -ListTagsOutTagsElem=TagList -ServiceTagsSlice -TagOp=AddTagsToResource -TagInIDElem=ResourceName -UntagOp=RemoveTagsFromResource -UpdateTags -CreateTags -RetryTagsListTagsType=TagListMessage -RetryTagsErrorCodes=elasticache.ErrCodeInvalidReplicationGroupStateFault "-RetryTagsErrorMessages=not in available state" -RetryTagsTimeout=15m -//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -TagsFunc=TagsV2 -KeyValueTagsFunc=keyValueTagsV2 -GetTagsInFunc=getTagsInV2 -SetTagsOutFunc=setTagsOutV2 -SkipAWSServiceImp -KVTValues -ServiceTagsSlice -- tagsv2_gen.go +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -CreateTags -ListTags -ListTagsInIDElem=ResourceName -ListTagsOutTagsElem=TagList -ServiceTagsSlice -TagOp=AddTagsToResource -TagInIDElem=ResourceName -UntagOp=RemoveTagsFromResource -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/elasticache/global_replication_group.go b/internal/service/elasticache/global_replication_group.go index fe284d944d5..7043cc75a64 100644 --- a/internal/service/elasticache/global_replication_group.go +++ b/internal/service/elasticache/global_replication_group.go @@ -14,9 +14,9 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" gversion "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -32,11 +33,11 @@ import ( ) const ( - EmptyDescription = " " + emptyDescription = " " ) const ( - GlobalReplicationGroupRegionPrefixFormat = "[[:alpha:]]{5}-" + globalReplicationGroupRegionPrefixFormat = "[[:alpha:]]{5}-" ) const ( @@ -54,7 +55,7 @@ func resourceGlobalReplicationGroup() *schema.Resource { Importer: &schema.ResourceImporter{ StateContext: func(ctx context.Context, d *schema.ResourceData, meta any) ([]*schema.ResourceData, error) { - re := regexache.MustCompile("^" + GlobalReplicationGroupRegionPrefixFormat) + re := regexache.MustCompile("^" + globalReplicationGroupRegionPrefixFormat) d.Set("global_replication_group_id_suffix", re.ReplaceAllLiteralString(d.Id(), "")) return []*schema.ResourceData{d}, nil @@ -141,7 +142,7 @@ func resourceGlobalReplicationGroup() *schema.Resource { Type: schema.TypeString, Optional: true, DiffSuppressFunc: func(_, old, new string, _ *schema.ResourceData) bool { - if (old == EmptyDescription && new == "") || (old == "" && new == EmptyDescription) { + if (old == emptyDescription && new == "") || (old == "" && new == emptyDescription) { return true } return false @@ -149,7 +150,7 @@ func resourceGlobalReplicationGroup() *schema.Resource { StateFunc: func(v any) string { s := v.(string) if s == "" { - return EmptyDescription + return emptyDescription } return s }, @@ -268,7 +269,7 @@ func paramGroupNameRequiresMajorVersionUpgrade(diff sdkv2.ResourceDiffer) error func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) id := d.Get("global_replication_group_id_suffix").(string) input := &elasticache.CreateGlobalReplicationGroupInput{ @@ -280,13 +281,13 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc input.GlobalReplicationGroupDescription = aws.String(v.(string)) } - output, err := conn.CreateGlobalReplicationGroupWithContext(ctx, input) + output, err := conn.CreateGlobalReplicationGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache Global Replication Group (%s): %s", id, err) } - d.SetId(aws.StringValue(output.GlobalReplicationGroup.GlobalReplicationGroupId)) + d.SetId(aws.ToString(output.GlobalReplicationGroup.GlobalReplicationGroupId)) globalReplicationGroup, err := waitGlobalReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) @@ -305,7 +306,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc } if v, ok := d.GetOk("cache_node_type"); ok { - if v.(string) == aws.StringValue(globalReplicationGroup.CacheNodeType) { + if v.(string) == aws.ToString(globalReplicationGroup.CacheNodeType) { log.Printf("[DEBUG] Not updating ElastiCache Global Replication Group (%s) node type: no change from %q", d.Id(), v) } else { if err := updateGlobalReplicationGroup(ctx, conn, d.Id(), globalReplicationGroupNodeTypeUpdater(v.(string)), "node type", d.Timeout(schema.TimeoutCreate)); err != nil { @@ -317,7 +318,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc if v, ok := d.GetOk(names.AttrEngineVersion); ok { requestedVersion, _ := normalizeEngineVersion(v.(string)) - engineVersion, err := gversion.NewVersion(aws.StringValue(globalReplicationGroup.EngineVersion)) + engineVersion, err := gversion.NewVersion(aws.ToString(globalReplicationGroup.EngineVersion)) if err != nil { return sdkdiag.AppendErrorf(diags, "updating ElastiCache Global Replication Group (%s) engine version on creation: error reading engine version: %s", d.Id(), err) } @@ -353,8 +354,8 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc return sdkdiag.AppendFromErr(diags, err) } } else if newNodeGroupCount < oldNodeGroupCount { - ids := tfslices.ApplyToAll(globalReplicationGroup.GlobalNodeGroups, func(v *elasticache.GlobalNodeGroup) string { - return aws.StringValue(v.GlobalNodeGroupId) + ids := tfslices.ApplyToAll(globalReplicationGroup.GlobalNodeGroups, func(v awstypes.GlobalNodeGroup) string { + return aws.ToString(v.GlobalNodeGroupId) }) if err := decreaseGlobalReplicationGroupNodeGroupCount(ctx, conn, d.Id(), newNodeGroupCount, ids, d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendFromErr(diags, err) @@ -368,7 +369,7 @@ func resourceGlobalReplicationGroupCreate(ctx context.Context, d *schema.Resourc func resourceGlobalReplicationGroupRead(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) globalReplicationGroup, err := findGlobalReplicationGroupByID(ctx, conn, d.Id()) @@ -382,7 +383,7 @@ func resourceGlobalReplicationGroupRead(ctx context.Context, d *schema.ResourceD return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): %s", d.Id(), err) } - if status := aws.StringValue(globalReplicationGroup.Status); !d.IsNewResource() && (status == globalReplicationGroupStatusDeleting || status == globalReplicationGroupStatusDeleted) { + if status := aws.ToString(globalReplicationGroup.Status); !d.IsNewResource() && (status == globalReplicationGroupStatusDeleting || status == globalReplicationGroupStatusDeleted) { log.Printf("[WARN] ElastiCache Global Replication Group (%s) in deleted state (%s), removing from state", d.Id(), status) d.SetId("") return diags @@ -415,7 +416,7 @@ func resourceGlobalReplicationGroupRead(ctx context.Context, d *schema.ResourceD func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) // Only one field can be changed per request if d.HasChange("cache_node_type") { @@ -481,7 +482,7 @@ func resourceGlobalReplicationGroupUpdate(ctx context.Context, d *schema.Resourc func resourceGlobalReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, meta any) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) // Using Update timeout because the Global Replication Group could be in the middle of an update operation. if err := deleteGlobalReplicationGroup(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), d.Timeout(schema.TimeoutDelete)); err != nil { @@ -524,14 +525,14 @@ func globalReplicationGroupNodeTypeUpdater(nodeType string) globalReplicationGro } } -func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, f globalReplicationGroupUpdater, propertyName string, timeout time.Duration) error { +func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.Client, id string, f globalReplicationGroupUpdater, propertyName string, timeout time.Duration) error { input := &elasticache.ModifyGlobalReplicationGroupInput{ ApplyImmediately: aws.Bool(true), GlobalReplicationGroupId: aws.String(id), } f(input) - if _, err := conn.ModifyGlobalReplicationGroupWithContext(ctx, input); err != nil { + if _, err := conn.ModifyGlobalReplicationGroup(ctx, input); err != nil { return fmt.Errorf("updating ElastiCache Global Replication Group (%s) %s: %w", id, propertyName, err) } @@ -542,14 +543,14 @@ func updateGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiC return nil } -func increaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *elasticache.ElastiCache, id string, newNodeGroupCount int, timeout time.Duration) error { +func increaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *elasticache.Client, id string, newNodeGroupCount int, timeout time.Duration) error { input := &elasticache.IncreaseNodeGroupsInGlobalReplicationGroupInput{ ApplyImmediately: aws.Bool(true), GlobalReplicationGroupId: aws.String(id), - NodeGroupCount: aws.Int64(int64(newNodeGroupCount)), + NodeGroupCount: aws.Int32(int32(newNodeGroupCount)), } - _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + _, err := conn.IncreaseNodeGroupsInGlobalReplicationGroup(ctx, input) if err != nil { return fmt.Errorf("increasing ElastiCache Global Replication Group (%s) node group count (%d): %w", id, newNodeGroupCount, err) @@ -562,7 +563,7 @@ func increaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *ela return nil } -func decreaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *elasticache.ElastiCache, id string, newNodeGroupCount int, nodeGroupIDs []string, timeout time.Duration) error { +func decreaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *elasticache.Client, id string, newNodeGroupCount int, nodeGroupIDs []string, timeout time.Duration) error { slices.SortFunc(nodeGroupIDs, func(a, b string) int { if globalReplicationGroupNodeNumber(a) < globalReplicationGroupNodeNumber(b) { return -1 @@ -576,12 +577,12 @@ func decreaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *ela input := &elasticache.DecreaseNodeGroupsInGlobalReplicationGroupInput{ ApplyImmediately: aws.Bool(true), - GlobalNodeGroupsToRetain: aws.StringSlice(nodeGroupIDs), + GlobalNodeGroupsToRetain: nodeGroupIDs, GlobalReplicationGroupId: aws.String(id), - NodeGroupCount: aws.Int64(int64(newNodeGroupCount)), + NodeGroupCount: aws.Int32(int32(newNodeGroupCount)), } - _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroupWithContext(ctx, input) + _, err := conn.DecreaseNodeGroupsInGlobalReplicationGroup(ctx, input) if err != nil { return fmt.Errorf("decreasing ElastiCache Global Replication Group (%s) node group count (%d): %w", id, newNodeGroupCount, err) @@ -594,17 +595,17 @@ func decreaseGlobalReplicationGroupNodeGroupCount(ctx context.Context, conn *ela return nil } -func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, id string, readyTimeout, deleteTimeout time.Duration) error { +func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.Client, id string, readyTimeout, deleteTimeout time.Duration) error { input := &elasticache.DeleteGlobalReplicationGroupInput{ GlobalReplicationGroupId: aws.String(id), RetainPrimaryReplicationGroup: aws.Bool(true), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, readyTimeout, func() (interface{}, error) { - return conn.DeleteGlobalReplicationGroupWithContext(ctx, input) - }, elasticache.ErrCodeInvalidGlobalReplicationGroupStateFault) + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidGlobalReplicationGroupStateFault](ctx, readyTimeout, func() (interface{}, error) { + return conn.DeleteGlobalReplicationGroup(ctx, input) + }) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { + if errs.IsA[*awstypes.GlobalReplicationGroupNotFoundFault](err) { return nil } @@ -619,57 +620,55 @@ func deleteGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiC return nil } -func findGlobalReplicationGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.GlobalReplicationGroup, error) { +func findGlobalReplicationGroupByID(ctx context.Context, conn *elasticache.Client, id string) (*awstypes.GlobalReplicationGroup, error) { input := &elasticache.DescribeGlobalReplicationGroupsInput{ GlobalReplicationGroupId: aws.String(id), ShowMemberInfo: aws.Bool(true), } - return findGlobalReplicationGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.GlobalReplicationGroup]()) + return findGlobalReplicationGroup(ctx, conn, input, tfslices.PredicateTrue[*awstypes.GlobalReplicationGroup]()) } -func findGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeGlobalReplicationGroupsInput, filter tfslices.Predicate[*elasticache.GlobalReplicationGroup]) (*elasticache.GlobalReplicationGroup, error) { +func findGlobalReplicationGroup(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeGlobalReplicationGroupsInput, filter tfslices.Predicate[*awstypes.GlobalReplicationGroup]) (*awstypes.GlobalReplicationGroup, error) { output, err := findGlobalReplicationGroups(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findGlobalReplicationGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeGlobalReplicationGroupsInput, filter tfslices.Predicate[*elasticache.GlobalReplicationGroup]) ([]*elasticache.GlobalReplicationGroup, error) { - var output []*elasticache.GlobalReplicationGroup +func findGlobalReplicationGroups(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeGlobalReplicationGroupsInput, filter tfslices.Predicate[*awstypes.GlobalReplicationGroup]) ([]awstypes.GlobalReplicationGroup, error) { + var output []awstypes.GlobalReplicationGroup - err := conn.DescribeGlobalReplicationGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeGlobalReplicationGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticache.NewDescribeGlobalReplicationGroupsPaginator(conn, input) - for _, v := range page.GlobalReplicationGroups { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.GlobalReplicationGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.GlobalReplicationGroups { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string) retry.StateRefreshFunc { +func statusGlobalReplicationGroup(ctx context.Context, conn *elasticache.Client, globalReplicationGroupID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findGlobalReplicationGroupByID(ctx, conn, globalReplicationGroupID) @@ -681,7 +680,7 @@ func statusGlobalReplicationGroup(ctx context.Context, conn *elasticache.ElastiC return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } @@ -700,7 +699,7 @@ const ( globalReplicationGroupStatusPrimaryOnly = "primary-only" ) -func waitGlobalReplicationGroupAvailable(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroup, error) { +func waitGlobalReplicationGroupAvailable(ctx context.Context, conn *elasticache.Client, globalReplicationGroupID string, timeout time.Duration) (*awstypes.GlobalReplicationGroup, error) { stateConf := &retry.StateChangeConf{ Pending: []string{globalReplicationGroupStatusCreating, globalReplicationGroupStatusModifying}, Target: []string{globalReplicationGroupStatusAvailable, globalReplicationGroupStatusPrimaryOnly}, @@ -712,14 +711,14 @@ func waitGlobalReplicationGroupAvailable(ctx context.Context, conn *elasticache. outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.GlobalReplicationGroup); ok { + if output, ok := outputRaw.(*awstypes.GlobalReplicationGroup); ok { return output, err } return nil, err } -func waitGlobalReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroup, error) { +func waitGlobalReplicationGroupDeleted(ctx context.Context, conn *elasticache.Client, globalReplicationGroupID string, timeout time.Duration) (*awstypes.GlobalReplicationGroup, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ globalReplicationGroupStatusAvailable, @@ -736,14 +735,14 @@ func waitGlobalReplicationGroupDeleted(ctx context.Context, conn *elasticache.El outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.GlobalReplicationGroup); ok { + if output, ok := outputRaw.(*awstypes.GlobalReplicationGroup); ok { return output, err } return nil, err } -func findGlobalReplicationGroupMemberByID(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, replicationGroupID string) (*elasticache.GlobalReplicationGroupMember, error) { +func findGlobalReplicationGroupMemberByID(ctx context.Context, conn *elasticache.Client, globalReplicationGroupID, replicationGroupID string) (*awstypes.GlobalReplicationGroupMember, error) { globalReplicationGroup, err := findGlobalReplicationGroupByID(ctx, conn, globalReplicationGroupID) if err != nil { @@ -755,8 +754,8 @@ func findGlobalReplicationGroupMemberByID(ctx context.Context, conn *elasticache } for _, v := range globalReplicationGroup.Members { - if aws.StringValue(v.ReplicationGroupId) == replicationGroupID { - return v, nil + if aws.ToString(v.ReplicationGroupId) == replicationGroupID { + return &v, nil } } @@ -765,7 +764,7 @@ func findGlobalReplicationGroupMemberByID(ctx context.Context, conn *elasticache } } -func statusGlobalReplicationGroupMember(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, replicationGroupID string) retry.StateRefreshFunc { +func statusGlobalReplicationGroupMember(ctx context.Context, conn *elasticache.Client, globalReplicationGroupID, replicationGroupID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findGlobalReplicationGroupMemberByID(ctx, conn, globalReplicationGroupID, replicationGroupID) @@ -777,7 +776,7 @@ func statusGlobalReplicationGroupMember(ctx context.Context, conn *elasticache.E return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } @@ -785,7 +784,7 @@ const ( globalReplicationGroupMemberStatusAssociated = "associated" ) -func waitGlobalReplicationGroupMemberDetached(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, replicationGroupID string, timeout time.Duration) (*elasticache.GlobalReplicationGroupMember, error) { +func waitGlobalReplicationGroupMemberDetached(ctx context.Context, conn *elasticache.Client, globalReplicationGroupID, replicationGroupID string, timeout time.Duration) (*awstypes.GlobalReplicationGroupMember, error) { stateConf := &retry.StateChangeConf{ Pending: []string{globalReplicationGroupMemberStatusAssociated}, Target: []string{}, @@ -797,23 +796,23 @@ func waitGlobalReplicationGroupMemberDetached(ctx context.Context, conn *elastic outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.GlobalReplicationGroupMember); ok { + if output, ok := outputRaw.(*awstypes.GlobalReplicationGroupMember); ok { return output, err } return nil, err } -func flattenGlobalReplicationGroupAutomaticFailoverEnabled(members []*elasticache.GlobalReplicationGroupMember) bool { +func flattenGlobalReplicationGroupAutomaticFailoverEnabled(members []awstypes.GlobalReplicationGroupMember) bool { if len(members) == 0 { return false } member := members[0] - return aws.StringValue(member.AutomaticFailover) == elasticache.AutomaticFailoverStatusEnabled + return member.AutomaticFailover == awstypes.AutomaticFailoverStatusEnabled } -func flattenGlobalNodeGroups(nodeGroups []*elasticache.GlobalNodeGroup) []any { +func flattenGlobalNodeGroups(nodeGroups []awstypes.GlobalNodeGroup) []any { if len(nodeGroups) == 0 { return nil } @@ -821,38 +820,30 @@ func flattenGlobalNodeGroups(nodeGroups []*elasticache.GlobalNodeGroup) []any { var l []any for _, nodeGroup := range nodeGroups { - if nodeGroup == nil { - continue - } - l = append(l, flattenGlobalNodeGroup(nodeGroup)) } return l } -func flattenGlobalNodeGroup(nodeGroup *elasticache.GlobalNodeGroup) map[string]any { - if nodeGroup == nil { - return nil - } - +func flattenGlobalNodeGroup(nodeGroup awstypes.GlobalNodeGroup) map[string]any { m := map[string]interface{}{} if v := nodeGroup.GlobalNodeGroupId; v != nil { - m["global_node_group_id"] = aws.StringValue(v) + m["global_node_group_id"] = aws.ToString(v) } if v := nodeGroup.Slots; v != nil { - m["slots"] = aws.StringValue(v) + m["slots"] = aws.ToString(v) } return m } -func flattenGlobalReplicationGroupPrimaryGroupID(members []*elasticache.GlobalReplicationGroupMember) string { +func flattenGlobalReplicationGroupPrimaryGroupID(members []awstypes.GlobalReplicationGroupMember) string { for _, member := range members { - if aws.StringValue(member.Role) == globalReplicationGroupMemberRolePrimary { - return aws.StringValue(member.ReplicationGroupId) + if aws.ToString(member.Role) == globalReplicationGroupMemberRolePrimary { + return aws.ToString(member.ReplicationGroupId) } } return "" diff --git a/internal/service/elasticache/global_replication_group_test.go b/internal/service/elasticache/global_replication_group_test.go index e5cdce7df59..399bd8b19f9 100644 --- a/internal/service/elasticache/global_replication_group_test.go +++ b/internal/service/elasticache/global_replication_group_test.go @@ -10,14 +10,15 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfelasticache "github.com/hashicorp/terraform-provider-aws/internal/service/elasticache" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -29,8 +30,8 @@ func TestAccElastiCacheGlobalReplicationGroup_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var primaryReplicationGroup elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var primaryReplicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -83,7 +84,7 @@ func TestAccElastiCacheGlobalReplicationGroup_disappears(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -112,7 +113,7 @@ func TestAccElastiCacheGlobalReplicationGroup_description(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) description1 := sdkacctest.RandString(10) @@ -154,7 +155,7 @@ func TestAccElastiCacheGlobalReplicationGroup_nodeType_createNoChange(t *testing t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) nodeType := "cache.m5.large" @@ -188,7 +189,7 @@ func TestAccElastiCacheGlobalReplicationGroup_nodeType_createWithChange(t *testi t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) nodeType := "cache.m5.large" @@ -223,7 +224,7 @@ func TestAccElastiCacheGlobalReplicationGroup_nodeType_setNoChange(t *testing.T) t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) nodeType := "cache.m5.large" @@ -265,7 +266,7 @@ func TestAccElastiCacheGlobalReplicationGroup_nodeType_update(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) nodeType := "cache.m5.large" @@ -307,7 +308,7 @@ func TestAccElastiCacheGlobalReplicationGroup_automaticFailover_createNoChange(t t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -340,7 +341,7 @@ func TestAccElastiCacheGlobalReplicationGroup_automaticFailover_createWithChange t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -373,7 +374,7 @@ func TestAccElastiCacheGlobalReplicationGroup_automaticFailover_setNoChange(t *t t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -414,7 +415,7 @@ func TestAccElastiCacheGlobalReplicationGroup_automaticFailover_update(t *testin t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -454,7 +455,7 @@ func TestAccElastiCacheGlobalReplicationGroup_multipleSecondaries(t *testing.T) t.Skip("skipping long-running test in short mode") } - var globalReplcationGroup elasticache.GlobalReplicationGroup + var globalReplcationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -483,7 +484,7 @@ func TestAccElastiCacheGlobalReplicationGroup_ReplaceSecondary_differentRegion(t t.Skip("skipping long-running test in short mode") } - var globalReplcationGroup elasticache.GlobalReplicationGroup + var globalReplcationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -518,8 +519,8 @@ func TestAccElastiCacheGlobalReplicationGroup_clusterMode_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var primaryReplicationGroup elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var primaryReplicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -558,8 +559,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnCreate_NoChange( t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var primaryReplicationGroup elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var primaryReplicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -598,8 +599,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnCreate_Increase( t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var primaryReplicationGroup elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var primaryReplicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -647,8 +648,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnCreate_Decrease( t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var primaryReplicationGroup elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var primaryReplicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -691,8 +692,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnUpdate_Increase( t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var primaryReplicationGroup elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var primaryReplicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -757,8 +758,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetNumNodeGroupsOnUpdate_Decrease( t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var primaryReplicationGroup elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var primaryReplicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -818,7 +819,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_NoChange_ t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -851,7 +852,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_NoChange_ t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -885,7 +886,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_NoChange_ t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -918,8 +919,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_MinorUpgr t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var rg elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -956,8 +957,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_MinorUpgr t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var rg elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -989,8 +990,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_MajorUpgr t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var rg elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1028,8 +1029,8 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnCreate_MajorUpgr t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup - var rg elasticache.ReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1129,7 +1130,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MinorUpgr t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -1166,7 +1167,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MinorUpgr t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -1200,7 +1201,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MinorDown t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -1247,7 +1248,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MajorUpgr t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -1289,7 +1290,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetEngineVersionOnUpdate_MajorUpgr t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_global_replication_group.test" @@ -1331,7 +1332,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetParameterGroupOnUpdate_NoVersio t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1366,7 +1367,7 @@ func TestAccElastiCacheGlobalReplicationGroup_SetParameterGroupOnUpdate_MinorUpg t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1401,7 +1402,7 @@ func TestAccElastiCacheGlobalReplicationGroup_UpdateParameterGroupName(t *testin t.Skip("skipping long-running test in short mode") } - var globalReplicationGroup elasticache.GlobalReplicationGroup + var globalReplicationGroup awstypes.GlobalReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) primaryReplicationGroupId := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1431,7 +1432,7 @@ func TestAccElastiCacheGlobalReplicationGroup_UpdateParameterGroupName(t *testin }) } -func testAccCheckGlobalReplicationGroupExists(ctx context.Context, resourceName string, v *elasticache.GlobalReplicationGroup) resource.TestCheckFunc { +func testAccCheckGlobalReplicationGroupExists(ctx context.Context, resourceName string, v *awstypes.GlobalReplicationGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[resourceName] if !ok { @@ -1442,14 +1443,14 @@ func testAccCheckGlobalReplicationGroupExists(ctx context.Context, resourceName return fmt.Errorf("No ElastiCache Global Replication Group ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) grg, err := tfelasticache.FindGlobalReplicationGroupByID(ctx, conn, rs.Primary.ID) if err != nil { return fmt.Errorf("retrieving ElastiCache Global Replication Group (%s): %w", rs.Primary.ID, err) } - if aws.StringValue(grg.Status) == "deleting" || aws.StringValue(grg.Status) == "deleted" { - return fmt.Errorf("ElastiCache Global Replication Group (%s) exists, but is in a non-available state: %s", rs.Primary.ID, aws.StringValue(grg.Status)) + if aws.ToString(grg.Status) == "deleting" || aws.ToString(grg.Status) == "deleted" { + return fmt.Errorf("ElastiCache Global Replication Group (%s) exists, but is in a non-available state: %s", rs.Primary.ID, aws.ToString(grg.Status)) } *v = *grg @@ -1460,7 +1461,7 @@ func testAccCheckGlobalReplicationGroupExists(ctx context.Context, resourceName func testAccCheckGlobalReplicationGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elasticache_global_replication_group" { @@ -1482,13 +1483,13 @@ func testAccCheckGlobalReplicationGroupDestroy(ctx context.Context) resource.Tes } func testAccPreCheckGlobalReplicationGroup(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) input := &elasticache.DescribeGlobalReplicationGroupsInput{} - _, err := conn.DescribeGlobalReplicationGroupsWithContext(ctx, input) + _, err := conn.DescribeGlobalReplicationGroups(ctx, input) if acctest.PreCheckSkipError(err) || - tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidParameterValueException, "Access Denied to API Version: APIGlobalDatastore") { + errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "Access Denied to API Version: APIGlobalDatastore") { t.Skipf("skipping acceptance testing: %s", err) } @@ -1497,18 +1498,18 @@ func testAccPreCheckGlobalReplicationGroup(ctx context.Context, t *testing.T) { } } -func testAccMatchReplicationGroupActualVersion(ctx context.Context, j *elasticache.ReplicationGroup, r *regexp.Regexp) resource.TestCheckFunc { +func testAccMatchReplicationGroupActualVersion(ctx context.Context, j *awstypes.ReplicationGroup, r *regexp.Regexp) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) cacheCluster := j.NodeGroups[0].NodeGroupMembers[0] - cluster, err := tfelasticache.FindCacheClusterByID(ctx, conn, aws.StringValue(cacheCluster.CacheClusterId)) + cluster, err := tfelasticache.FindCacheClusterByID(ctx, conn, aws.ToString(cacheCluster.CacheClusterId)) if err != nil { return err } - if !r.MatchString(aws.StringValue(cluster.EngineVersion)) { - return fmt.Errorf("Actual engine version didn't match %q, got %q", r.String(), aws.StringValue(cluster.EngineVersion)) + if !r.MatchString(aws.ToString(cluster.EngineVersion)) { + return fmt.Errorf("Actual engine version didn't match %q, got %q", r.String(), aws.ToString(cluster.EngineVersion)) } return nil } diff --git a/internal/service/elasticache/parameter_group.go b/internal/service/elasticache/parameter_group.go index 28dbb026be6..c4ae8478271 100644 --- a/internal/service/elasticache/parameter_group.go +++ b/internal/service/elasticache/parameter_group.go @@ -10,9 +10,9 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -91,7 +91,8 @@ func resourceParameterGroup() *schema.Resource { func resourceParameterGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) + partition := meta.(*conns.AWSClient).Partition name := d.Get(names.AttrName).(string) input := &elasticache.CreateCacheParameterGroupInput{ @@ -101,20 +102,20 @@ func resourceParameterGroupCreate(ctx context.Context, d *schema.ResourceData, m Tags: getTagsIn(ctx), } - output, err := conn.CreateCacheParameterGroupWithContext(ctx, input) + output, err := conn.CreateCacheParameterGroup(ctx, input) - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { log.Printf("[WARN] failed creating ElastiCache Parameter Group with tags: %s. Trying create without tags.", err) input.Tags = nil - output, err = conn.CreateCacheParameterGroupWithContext(ctx, input) + output, err = conn.CreateCacheParameterGroup(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache Parameter Group (%s): %s", name, err) } - d.SetId(aws.StringValue(output.CacheParameterGroup.CacheParameterGroupName)) + d.SetId(aws.ToString(output.CacheParameterGroup.CacheParameterGroupName)) d.Set(names.AttrARN, output.CacheParameterGroup.ARN) return append(diags, resourceParameterGroupUpdate(ctx, d, meta)...) @@ -122,7 +123,7 @@ func resourceParameterGroupCreate(ctx context.Context, d *schema.ResourceData, m func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) parameterGroup, err := findCacheParameterGroupByName(ctx, conn, d.Id()) @@ -147,7 +148,7 @@ func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, met Source: aws.String("user"), } - output, err := conn.DescribeCacheParametersWithContext(ctx, input) + output, err := conn.DescribeCacheParameters(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading ElastiCache Parameter Group (%s) parameters: %s", d.Id(), err) @@ -160,7 +161,7 @@ func resourceParameterGroupRead(ctx context.Context, d *schema.ResourceData, met func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) if d.HasChange(names.AttrParameter) { o, n := d.GetChange(names.AttrParameter) @@ -171,7 +172,7 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m const maxParams = 20 for len(toRemove) > 0 { - var paramsToModify []*elasticache.ParameterNameValue + var paramsToModify []*awstypes.ParameterNameValue if len(toRemove) <= maxParams { paramsToModify, toRemove = toRemove[:], nil } else { @@ -194,9 +195,9 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m // above, which may become out of date, here we add logic to // workaround this API behavior - if tfresource.TimedOut(err) || tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidParameterValueException, "Parameter reserved-memory doesn't exist") { + if tfresource.TimedOut(err) || errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "Parameter reserved-memory doesn't exist") { for i, paramToModify := range paramsToModify { - if aws.StringValue(paramToModify.ParameterName) != "reserved-memory" { + if aws.ToString(paramToModify.ParameterName) != "reserved-memory" { continue } @@ -211,7 +212,7 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m tryReservedMemoryPercentageWorkaround := true for _, configuredParameter := range toAdd { - if aws.StringValue(configuredParameter.ParameterName) == "reserved-memory-percent" { + if aws.ToString(configuredParameter.ParameterName) == "reserved-memory-percent" { tryReservedMemoryPercentageWorkaround = false break } @@ -228,7 +229,7 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m break } - workaroundParams := []*elasticache.ParameterNameValue{ + workaroundParams := []*awstypes.ParameterNameValue{ { ParameterName: aws.String("reserved-memory-percent"), ParameterValue: aws.String("0"), @@ -260,7 +261,7 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m } for len(toAdd) > 0 { - var paramsToModify []*elasticache.ParameterNameValue + var paramsToModify []*awstypes.ParameterNameValue if len(toAdd) <= maxParams { paramsToModify, toAdd = toAdd[:], nil } else { @@ -279,7 +280,7 @@ func resourceParameterGroupUpdate(ctx context.Context, d *schema.ResourceData, m func resourceParameterGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) log.Printf("[INFO] Deleting ElastiCache Parameter Group: %s", d.Id()) if err := deleteParameterGroup(ctx, conn, d.Id()); err != nil { @@ -289,17 +290,17 @@ func resourceParameterGroupDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func deleteParameterGroup(ctx context.Context, conn *elasticache.ElastiCache, name string) error { +func deleteParameterGroup(ctx context.Context, conn *elasticache.Client, name string) error { const ( timeout = 3 * time.Minute ) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { - return conn.DeleteCacheParameterGroupWithContext(ctx, &elasticache.DeleteCacheParameterGroupInput{ + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidCacheParameterGroupStateFault](ctx, timeout, func() (interface{}, error) { + return conn.DeleteCacheParameterGroup(ctx, &elasticache.DeleteCacheParameterGroupInput{ CacheParameterGroupName: aws.String(name), }) - }, elasticache.ErrCodeInvalidCacheParameterGroupStateFault) + }) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault) { + if errs.IsA[*awstypes.CacheParameterGroupNotFoundFault](err) { return nil } @@ -314,7 +315,7 @@ var ( parameterHash = sdkv2.SimpleSchemaSetFunc(names.AttrName, names.AttrValue) ) -func parameterChanges(o, n interface{}) (remove, addOrUpdate []*elasticache.ParameterNameValue) { +func parameterChanges(o, n interface{}) (remove, addOrUpdate []*awstypes.ParameterNameValue) { if o == nil { o = new(schema.Set) } @@ -324,19 +325,19 @@ func parameterChanges(o, n interface{}) (remove, addOrUpdate []*elasticache.Para os := o.(*schema.Set) ns := n.(*schema.Set) - om := make(map[string]*elasticache.ParameterNameValue, os.Len()) + om := make(map[string]*awstypes.ParameterNameValue, os.Len()) for _, raw := range os.List() { param := raw.(map[string]interface{}) om[param[names.AttrName].(string)] = expandParameter(param) } - nm := make(map[string]*elasticache.ParameterNameValue, len(addOrUpdate)) + nm := make(map[string]*awstypes.ParameterNameValue, len(addOrUpdate)) for _, raw := range ns.List() { param := raw.(map[string]interface{}) nm[param[names.AttrName].(string)] = expandParameter(param) } // Remove: key is in old, but not in new - remove = make([]*elasticache.ParameterNameValue, 0, os.Len()) + remove = make([]*awstypes.ParameterNameValue, 0, os.Len()) for k := range om { if _, ok := nm[k]; !ok { remove = append(remove, om[k]) @@ -344,10 +345,10 @@ func parameterChanges(o, n interface{}) (remove, addOrUpdate []*elasticache.Para } // Add or Update: key is in new, but not in old or has changed value - addOrUpdate = make([]*elasticache.ParameterNameValue, 0, ns.Len()) + addOrUpdate = make([]*awstypes.ParameterNameValue, 0, ns.Len()) for k, nv := range nm { ov, ok := om[k] - if !ok || ok && (aws.StringValue(nv.ParameterValue) != aws.StringValue(ov.ParameterValue)) { + if !ok || ok && (aws.ToString(nv.ParameterValue) != aws.ToString(ov.ParameterValue)) { addOrUpdate = append(addOrUpdate, nm[k]) } } @@ -355,15 +356,15 @@ func parameterChanges(o, n interface{}) (remove, addOrUpdate []*elasticache.Para return remove, addOrUpdate } -func resourceResetParameterGroup(ctx context.Context, conn *elasticache.ElastiCache, name string, parameters []*elasticache.ParameterNameValue) error { +func resourceResetParameterGroup(ctx context.Context, conn *elasticache.Client, name string, parameters []*awstypes.ParameterNameValue) error { input := elasticache.ResetCacheParameterGroupInput{ CacheParameterGroupName: aws.String(name), - ParameterNameValues: parameters, + ParameterNameValues: tfslices.Values(parameters), } return retry.RetryContext(ctx, 30*time.Second, func() *retry.RetryError { - _, err := conn.ResetCacheParameterGroupWithContext(ctx, &input) + _, err := conn.ResetCacheParameterGroup(ctx, &input) if err != nil { - if tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidCacheParameterGroupStateFault, " has pending changes") { + if errs.IsAErrorMessageContains[*awstypes.InvalidCacheParameterGroupStateFault](err, " has pending changes") { return retry.RetryableError(err) } return retry.NonRetryableError(err) @@ -372,79 +373,77 @@ func resourceResetParameterGroup(ctx context.Context, conn *elasticache.ElastiCa }) } -func resourceModifyParameterGroup(ctx context.Context, conn *elasticache.ElastiCache, name string, parameters []*elasticache.ParameterNameValue) error { +func resourceModifyParameterGroup(ctx context.Context, conn *elasticache.Client, name string, parameters []*awstypes.ParameterNameValue) error { input := elasticache.ModifyCacheParameterGroupInput{ CacheParameterGroupName: aws.String(name), - ParameterNameValues: parameters, + ParameterNameValues: tfslices.Values(parameters), } - _, err := conn.ModifyCacheParameterGroupWithContext(ctx, &input) + _, err := conn.ModifyCacheParameterGroup(ctx, &input) return err } -func findCacheParameterGroupByName(ctx context.Context, conn *elasticache.ElastiCache, name string) (*elasticache.CacheParameterGroup, error) { +func findCacheParameterGroupByName(ctx context.Context, conn *elasticache.Client, name string) (*awstypes.CacheParameterGroup, error) { input := &elasticache.DescribeCacheParameterGroupsInput{ CacheParameterGroupName: aws.String(name), } - return findCacheParameterGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.CacheParameterGroup]()) + return findCacheParameterGroup(ctx, conn, input, tfslices.PredicateTrue[*awstypes.CacheParameterGroup]()) } -func findCacheParameterGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheParameterGroupsInput, filter tfslices.Predicate[*elasticache.CacheParameterGroup]) (*elasticache.CacheParameterGroup, error) { +func findCacheParameterGroup(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeCacheParameterGroupsInput, filter tfslices.Predicate[*awstypes.CacheParameterGroup]) (*awstypes.CacheParameterGroup, error) { output, err := findCacheParameterGroups(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findCacheParameterGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheParameterGroupsInput, filter tfslices.Predicate[*elasticache.CacheParameterGroup]) ([]*elasticache.CacheParameterGroup, error) { - var output []*elasticache.CacheParameterGroup +func findCacheParameterGroups(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeCacheParameterGroupsInput, filter tfslices.Predicate[*awstypes.CacheParameterGroup]) ([]awstypes.CacheParameterGroup, error) { + var output []awstypes.CacheParameterGroup - err := conn.DescribeCacheParameterGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeCacheParameterGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticache.NewDescribeCacheParameterGroupsPaginator(conn, input) - for _, v := range page.CacheParameterGroups { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.CacheParameterGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheParameterGroupNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.CacheParameterGroups { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func expandParameter(tfMap map[string]interface{}) *elasticache.ParameterNameValue { - return &elasticache.ParameterNameValue{ +func expandParameter(tfMap map[string]interface{}) *awstypes.ParameterNameValue { + return &awstypes.ParameterNameValue{ ParameterName: aws.String(tfMap[names.AttrName].(string)), ParameterValue: aws.String(tfMap[names.AttrValue].(string)), } } -func flattenParameters(apiObjects []*elasticache.Parameter) []interface{} { +func flattenParameters(apiObjects []awstypes.Parameter) []interface{} { tfList := make([]interface{}, 0, len(apiObjects)) for _, apiObject := range apiObjects { if apiObject.ParameterValue != nil { tfList = append(tfList, map[string]interface{}{ - names.AttrName: strings.ToLower(aws.StringValue(apiObject.ParameterName)), - names.AttrValue: aws.StringValue(apiObject.ParameterValue), + names.AttrName: strings.ToLower(aws.ToString(apiObject.ParameterName)), + names.AttrValue: aws.ToString(apiObject.ParameterValue), }) } } diff --git a/internal/service/elasticache/parameter_group_test.go b/internal/service/elasticache/parameter_group_test.go index d67114f9498..3ca46ae3cb9 100644 --- a/internal/service/elasticache/parameter_group_test.go +++ b/internal/service/elasticache/parameter_group_test.go @@ -9,8 +9,8 @@ import ( "reflect" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -24,7 +24,7 @@ import ( func TestAccElastiCacheParameterGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var v elasticache.CacheParameterGroup + var v awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -56,7 +56,7 @@ func TestAccElastiCacheParameterGroup_basic(t *testing.T) { func TestAccElastiCacheParameterGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var v elasticache.CacheParameterGroup + var v awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -80,7 +80,7 @@ func TestAccElastiCacheParameterGroup_disappears(t *testing.T) { func TestAccElastiCacheParameterGroup_addParameter(t *testing.T) { ctx := acctest.Context(t) - var v elasticache.CacheParameterGroup + var v awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -128,7 +128,7 @@ func TestAccElastiCacheParameterGroup_addParameter(t *testing.T) { // Regression for https://github.com/hashicorp/terraform-provider-aws/issues/116 func TestAccElastiCacheParameterGroup_removeAllParameters(t *testing.T) { ctx := acctest.Context(t) - var v elasticache.CacheParameterGroup + var v awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -168,7 +168,7 @@ func TestAccElastiCacheParameterGroup_removeAllParameters(t *testing.T) { // This covers our custom logic handling for this situation. func TestAccElastiCacheParameterGroup_RemoveReservedMemoryParameter_allParameters(t *testing.T) { ctx := acctest.Context(t) - var cacheParameterGroup1 elasticache.CacheParameterGroup + var cacheParameterGroup1 awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -209,7 +209,7 @@ func TestAccElastiCacheParameterGroup_RemoveReservedMemoryParameter_allParameter // This covers our custom logic handling for this situation. func TestAccElastiCacheParameterGroup_RemoveReservedMemoryParameter_remainingParameters(t *testing.T) { ctx := acctest.Context(t) - var cacheParameterGroup1 elasticache.CacheParameterGroup + var cacheParameterGroup1 awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -258,7 +258,7 @@ func TestAccElastiCacheParameterGroup_RemoveReservedMemoryParameter_remainingPar // This covers our custom logic handling for this situation. func TestAccElastiCacheParameterGroup_switchReservedMemoryParameter(t *testing.T) { ctx := acctest.Context(t) - var cacheParameterGroup1 elasticache.CacheParameterGroup + var cacheParameterGroup1 awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -303,7 +303,7 @@ func TestAccElastiCacheParameterGroup_switchReservedMemoryParameter(t *testing.T // This covers our custom logic handling for this situation. func TestAccElastiCacheParameterGroup_updateReservedMemoryParameter(t *testing.T) { ctx := acctest.Context(t) - var cacheParameterGroup1 elasticache.CacheParameterGroup + var cacheParameterGroup1 awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -346,7 +346,7 @@ func TestAccElastiCacheParameterGroup_updateReservedMemoryParameter(t *testing.T func TestAccElastiCacheParameterGroup_uppercaseName(t *testing.T) { ctx := acctest.Context(t) - var v elasticache.CacheParameterGroup + var v awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rInt := sdkacctest.RandInt() rName := fmt.Sprintf("TF-ELASTIPG-%d", rInt) @@ -375,7 +375,7 @@ func TestAccElastiCacheParameterGroup_uppercaseName(t *testing.T) { func TestAccElastiCacheParameterGroup_description(t *testing.T) { ctx := acctest.Context(t) - var v elasticache.CacheParameterGroup + var v awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -403,7 +403,7 @@ func TestAccElastiCacheParameterGroup_description(t *testing.T) { func TestAccElastiCacheParameterGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var cacheParameterGroup1 elasticache.CacheParameterGroup + var cacheParameterGroup1 awstypes.CacheParameterGroup resourceName := "aws_elasticache_parameter_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -448,7 +448,7 @@ func TestAccElastiCacheParameterGroup_tags(t *testing.T) { func testAccCheckParameterGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elasticache_parameter_group" { @@ -472,7 +472,7 @@ func testAccCheckParameterGroupDestroy(ctx context.Context) resource.TestCheckFu } } -func testAccCheckParameterGroupExists(ctx context.Context, n string, v *elasticache.CacheParameterGroup) resource.TestCheckFunc { +func testAccCheckParameterGroupExists(ctx context.Context, n string, v *awstypes.CacheParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -483,7 +483,7 @@ func testAccCheckParameterGroupExists(ctx context.Context, n string, v *elastica return fmt.Errorf("No ElastiCache Parameter Group ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) output, err := tfelasticache.FindCacheParameterGroupByName(ctx, conn, rs.Primary.ID) @@ -497,7 +497,7 @@ func testAccCheckParameterGroupExists(ctx context.Context, n string, v *elastica } } -func testAccCheckParameterGroupAttributes(v *elasticache.CacheParameterGroup, rName string) resource.TestCheckFunc { +func testAccCheckParameterGroupAttributes(v *awstypes.CacheParameterGroup, rName string) resource.TestCheckFunc { return func(s *terraform.State) error { if *v.CacheParameterGroupName != rName { return fmt.Errorf("bad name: %#v", v.CacheParameterGroupName) @@ -597,15 +597,15 @@ func TestParameterChanges(t *testing.T) { Name string Old *schema.Set New *schema.Set - ExpectedRemove []*elasticache.ParameterNameValue - ExpectedAddOrUpdate []*elasticache.ParameterNameValue + ExpectedRemove []*awstypes.ParameterNameValue + ExpectedAddOrUpdate []*awstypes.ParameterNameValue }{ { Name: "Empty", Old: new(schema.Set), New: new(schema.Set), - ExpectedRemove: []*elasticache.ParameterNameValue{}, - ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{}, + ExpectedRemove: []*awstypes.ParameterNameValue{}, + ExpectedAddOrUpdate: []*awstypes.ParameterNameValue{}, }, { Name: "Remove all", @@ -616,13 +616,13 @@ func TestParameterChanges(t *testing.T) { }, }), New: new(schema.Set), - ExpectedRemove: []*elasticache.ParameterNameValue{ + ExpectedRemove: []*awstypes.ParameterNameValue{ { ParameterName: aws.String("reserved-memory"), ParameterValue: aws.String(acctest.Ct0), }, }, - ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{}, + ExpectedAddOrUpdate: []*awstypes.ParameterNameValue{}, }, { Name: "No change", @@ -638,8 +638,8 @@ func TestParameterChanges(t *testing.T) { names.AttrValue: acctest.Ct0, }, }), - ExpectedRemove: []*elasticache.ParameterNameValue{}, - ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{}, + ExpectedRemove: []*awstypes.ParameterNameValue{}, + ExpectedAddOrUpdate: []*awstypes.ParameterNameValue{}, }, { Name: "Remove partial", @@ -659,13 +659,13 @@ func TestParameterChanges(t *testing.T) { names.AttrValue: "yes", }, }), - ExpectedRemove: []*elasticache.ParameterNameValue{ + ExpectedRemove: []*awstypes.ParameterNameValue{ { ParameterName: aws.String("reserved-memory"), ParameterValue: aws.String(acctest.Ct0), }, }, - ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{}, + ExpectedAddOrUpdate: []*awstypes.ParameterNameValue{}, }, { Name: "Add to existing", @@ -685,8 +685,8 @@ func TestParameterChanges(t *testing.T) { names.AttrValue: "always", }, }), - ExpectedRemove: []*elasticache.ParameterNameValue{}, - ExpectedAddOrUpdate: []*elasticache.ParameterNameValue{ + ExpectedRemove: []*awstypes.ParameterNameValue{}, + ExpectedAddOrUpdate: []*awstypes.ParameterNameValue{ { ParameterName: aws.String("appendfsync"), ParameterValue: aws.String("always"), diff --git a/internal/service/elasticache/replication_group.go b/internal/service/elasticache/replication_group.go index 8de672a108a..bd0910baae9 100644 --- a/internal/service/elasticache/replication_group.go +++ b/internal/service/elasticache/replication_group.go @@ -12,9 +12,9 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -22,6 +22,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -72,10 +73,10 @@ func resourceReplicationGroup() *schema.Resource { ConflictsWith: []string{"user_group_ids"}, }, "auth_token_update_strategy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(elasticache.AuthTokenUpdateStrategyType_Values(), true), - Default: elasticache.AuthTokenUpdateStrategyTypeRotate, + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.AuthTokenUpdateStrategyType](), + Default: awstypes.AuthTokenUpdateStrategyTypeRotate, }, names.AttrAutoMinorVersionUpgrade: { Type: nullable.TypeNullableBool, @@ -93,10 +94,10 @@ func resourceReplicationGroup() *schema.Resource { Computed: true, }, "cluster_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(elasticache.ClusterMode_Values(), true), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.ClusterMode](), }, "configuration_endpoint_address": { Type: schema.TypeString, @@ -155,10 +156,10 @@ func resourceReplicationGroup() *schema.Resource { }, }, "ip_discovery": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(elasticache.IpDiscovery_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.IpDiscovery](), }, names.AttrKMSKeyID: { Type: schema.TypeString, @@ -172,23 +173,23 @@ func resourceReplicationGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "destination_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.DestinationType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.DestinationType](), }, names.AttrDestination: { Type: schema.TypeString, Required: true, }, "log_format": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.LogFormat_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogFormat](), }, "log_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.LogType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogType](), }, }, }, @@ -214,11 +215,11 @@ func resourceReplicationGroup() *schema.Resource { Default: false, }, "network_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(elasticache.NetworkType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.NetworkType](), }, "node_type": { Type: schema.TypeString, @@ -345,10 +346,10 @@ func resourceReplicationGroup() *schema.Resource { Computed: true, }, "transit_encryption_mode": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(elasticache.TransitEncryptionMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.TransitEncryptionMode](), }, "user_group_ids": { Type: schema.TypeSet, @@ -404,7 +405,8 @@ func resourceReplicationGroup() *schema.Resource { func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) + partition := meta.(*conns.AWSClient).Partition replicationGroupID := d.Get("replication_group_id").(string) input := &elasticache.CreateReplicationGroupInput{ @@ -427,7 +429,7 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("cluster_mode"); ok { - input.ClusterMode = aws.String(v.(string)) + input.ClusterMode = awstypes.ClusterMode(v.(string)) } if v, ok := d.GetOk("data_tiering_enabled"); ok { @@ -456,7 +458,7 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("ip_discovery"); ok { - input.IpDiscovery = aws.String(v.(string)) + input.IpDiscovery = awstypes.IpDiscovery(v.(string)) } if v, ok := d.GetOk(names.AttrKMSKeyID); ok { @@ -470,8 +472,8 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, continue } - apiObject := expandLogDeliveryConfigurations(tfMap) - input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &apiObject) + apiObject := expandLogDeliveryConfigurationRequests(tfMap) + input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, apiObject) } } @@ -484,7 +486,7 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("network_type"); ok { - input.NetworkType = aws.String(v.(string)) + input.NetworkType = awstypes.NetworkType(v.(string)) } if v, ok := d.GetOk("notification_topic_arn"); ok { @@ -492,11 +494,11 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("num_cache_clusters"); ok { - input.NumCacheClusters = aws.Int64(int64(v.(int))) + input.NumCacheClusters = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("num_node_groups"); ok && v != 0 { - input.NumNodeGroups = aws.Int64(int64(v.(int))) + input.NumNodeGroups = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk(names.AttrParameterGroupName); ok { @@ -504,15 +506,15 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk(names.AttrPort); ok { - input.Port = aws.Int64(int64(v.(int))) + input.Port = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("preferred_cache_cluster_azs"); ok && len(v.([]interface{})) > 0 { - input.PreferredCacheClusterAZs = flex.ExpandStringList(v.([]interface{})) + input.PreferredCacheClusterAZs = flex.ExpandStringValueList(v.([]interface{})) } if v, ok := d.GetOk("replicas_per_node_group"); ok { - input.ReplicasPerNodeGroup = aws.Int64(int64(v.(int))) + input.ReplicasPerNodeGroup = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("subnet_group_name"); ok { @@ -520,15 +522,15 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk(names.AttrSecurityGroupIDs); ok && v.(*schema.Set).Len() > 0 { - input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("security_group_names"); ok && v.(*schema.Set).Len() > 0 { - input.CacheSecurityGroupNames = flex.ExpandStringSet(v.(*schema.Set)) + input.CacheSecurityGroupNames = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("snapshot_arns"); ok && v.(*schema.Set).Len() > 0 { - input.SnapshotArns = flex.ExpandStringSet(v.(*schema.Set)) + input.SnapshotArns = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("snapshot_name"); ok { @@ -536,7 +538,7 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("snapshot_retention_limit"); ok { - input.SnapshotRetentionLimit = aws.Int64(int64(v.(int))) + input.SnapshotRetentionLimit = aws.Int32(int32(v.(int))) } if v, ok := d.GetOk("snapshot_window"); ok { @@ -548,27 +550,27 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("transit_encryption_mode"); ok { - input.TransitEncryptionMode = aws.String(v.(string)) + input.TransitEncryptionMode = awstypes.TransitEncryptionMode(v.(string)) } if v, ok := d.GetOk("user_group_ids"); ok && v.(*schema.Set).Len() > 0 { - input.UserGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + input.UserGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } - output, err := conn.CreateReplicationGroupWithContext(ctx, input) + output, err := conn.CreateReplicationGroup(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateReplicationGroupWithContext(ctx, input) + output, err = conn.CreateReplicationGroup(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache Replication Group (%s): %s", replicationGroupID, err) } - d.SetId(aws.StringValue(output.ReplicationGroup.ReplicationGroupId)) + d.SetId(aws.ToString(output.ReplicationGroup.ReplicationGroupId)) const ( delay = 30 * time.Second @@ -589,10 +591,10 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, // For partitions not supporting tag-on-create, attempt tag after create. if tags := getTagsIn(ctx); input.Tags == nil && len(tags) > 0 { - err := createTags(ctx, conn, aws.StringValue(output.ReplicationGroup.ARN), tags) + err := createTags(ctx, conn, aws.ToString(output.ReplicationGroup.ARN), tags) // If default tags only, continue. Otherwise, error. - if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(partition, err) { return append(diags, resourceReplicationGroupRead(ctx, d, meta)...) } @@ -606,7 +608,7 @@ func resourceReplicationGroupCreate(ctx context.Context, d *schema.ResourceData, func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) rgp, err := findReplicationGroupByID(ctx, conn, d.Id()) @@ -620,7 +622,7 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): %s", d.Id(), err) } - if aws.StringValue(rgp.Status) == replicationGroupStatusDeleting { + if aws.ToString(rgp.Status) == replicationGroupStatusDeleting { log.Printf("[WARN] ElastiCache Replication Group (%s) is currently in the `deleting` status, removing from state", d.Id()) d.SetId("") return diags @@ -630,32 +632,28 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m d.Set("global_replication_group_id", rgp.GlobalReplicationGroupInfo.GlobalReplicationGroupId) } - if rgp.AutomaticFailover != nil { - switch strings.ToLower(aws.StringValue(rgp.AutomaticFailover)) { - case elasticache.AutomaticFailoverStatusDisabled, elasticache.AutomaticFailoverStatusDisabling: - d.Set("automatic_failover_enabled", false) - case elasticache.AutomaticFailoverStatusEnabled, elasticache.AutomaticFailoverStatusEnabling: - d.Set("automatic_failover_enabled", true) - default: - log.Printf("Unknown AutomaticFailover state %q", aws.StringValue(rgp.AutomaticFailover)) - } + switch rgp.AutomaticFailover { + case awstypes.AutomaticFailoverStatusDisabled, awstypes.AutomaticFailoverStatusDisabling: + d.Set("automatic_failover_enabled", false) + case awstypes.AutomaticFailoverStatusEnabled, awstypes.AutomaticFailoverStatusEnabling: + d.Set("automatic_failover_enabled", true) + default: + log.Printf("Unknown AutomaticFailover state %q", string(rgp.AutomaticFailover)) } - if rgp.MultiAZ != nil { - switch strings.ToLower(aws.StringValue(rgp.MultiAZ)) { - case elasticache.MultiAZStatusEnabled: - d.Set("multi_az_enabled", true) - case elasticache.MultiAZStatusDisabled: - d.Set("multi_az_enabled", false) - default: - log.Printf("Unknown MultiAZ state %q", aws.StringValue(rgp.MultiAZ)) - } + switch rgp.MultiAZ { + case awstypes.MultiAZStatusEnabled: + d.Set("multi_az_enabled", true) + case awstypes.MultiAZStatusDisabled: + d.Set("multi_az_enabled", false) + default: + log.Printf("Unknown MultiAZ state %q", string(rgp.MultiAZ)) } d.Set(names.AttrKMSKeyID, rgp.KmsKeyId) d.Set(names.AttrDescription, rgp.Description) d.Set("num_cache_clusters", len(rgp.MemberClusters)) - if err := d.Set("member_clusters", flex.FlattenStringSet(rgp.MemberClusters)); err != nil { + if err := d.Set("member_clusters", flex.FlattenStringValueSet(rgp.MemberClusters)); err != nil { return sdkdiag.AppendErrorf(diags, "setting member_clusters: %s", err) } @@ -666,7 +664,7 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m d.Set("cluster_mode", rgp.ClusterMode) d.Set("replication_group_id", rgp.ReplicationGroupId) d.Set(names.AttrARN, rgp.ARN) - d.Set("data_tiering_enabled", aws.StringValue(rgp.DataTiering) == elasticache.DataTieringStatusEnabled) + d.Set("data_tiering_enabled", rgp.DataTiering == awstypes.DataTieringStatusEnabled) d.Set("ip_discovery", rgp.IpDiscovery) d.Set("network_type", rgp.NetworkType) @@ -700,40 +698,41 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m const ( delay = 0 * time.Second ) - _, err = waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group to be available (%s): %s", aws.StringValue(rgp.ARN), err) + if _, err := waitReplicationGroupAvailable(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate), delay); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) create: %s", aws.ToString(rgp.ARN), err) } log.Printf("[DEBUG] ElastiCache Replication Group (%s): Checking underlying cache clusters", d.Id()) // This section reads settings that require checking the underlying cache clusters - if rgp.NodeGroups != nil && rgp.NodeGroups[0] != nil && len(rgp.NodeGroups[0].NodeGroupMembers) != 0 { + if rgp.NodeGroups != nil && len(rgp.NodeGroups[0].NodeGroupMembers) != 0 { cacheCluster := rgp.NodeGroups[0].NodeGroupMembers[0] - - res, err := conn.DescribeCacheClustersWithContext(ctx, &elasticache.DescribeCacheClustersInput{ + input := &elasticache.DescribeCacheClustersInput{ CacheClusterId: cacheCluster.CacheClusterId, ShowCacheNodeInfo: aws.Bool(true), - }) + } + + output, err := conn.DescribeCacheClusters(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): reading Cache Cluster (%s): %s", d.Id(), aws.StringValue(cacheCluster.CacheClusterId), err) + return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): reading Cache Cluster (%s): %s", d.Id(), aws.ToString(cacheCluster.CacheClusterId), err) } - if len(res.CacheClusters) == 0 { + if len(output.CacheClusters) == 0 { return diags } - c := res.CacheClusters[0] + c := output.CacheClusters[0] - if err := setFromCacheCluster(d, c); err != nil { - return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): reading Cache Cluster (%s): %s", d.Id(), aws.StringValue(cacheCluster.CacheClusterId), err) + if err := setFromCacheCluster(d, &c); err != nil { + return sdkdiag.AppendErrorf(diags, "reading ElastiCache Replication Group (%s): reading Cache Cluster (%s): %s", d.Id(), aws.ToString(cacheCluster.CacheClusterId), err) } d.Set("at_rest_encryption_enabled", c.AtRestEncryptionEnabled) d.Set("transit_encryption_enabled", c.TransitEncryptionEnabled) d.Set("transit_encryption_mode", c.TransitEncryptionMode) - if c.AuthTokenEnabled != nil && !aws.BoolValue(c.AuthTokenEnabled) { + if c.AuthTokenEnabled != nil && !aws.ToBool(c.AuthTokenEnabled) { d.Set("auth_token", nil) } } @@ -743,7 +742,7 @@ func resourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, m func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { o, n := d.GetChange("num_cache_clusters") @@ -787,7 +786,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("cluster_mode") { - input.ClusterMode = aws.String(d.Get("cluster_mode").(string)) + input.ClusterMode = awstypes.ClusterMode(d.Get("cluster_mode").(string)) requestUpdate = true } @@ -797,29 +796,29 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("ip_discovery") { - input.IpDiscovery = aws.String(d.Get("ip_discovery").(string)) + input.IpDiscovery = awstypes.IpDiscovery(d.Get("ip_discovery").(string)) requestUpdate = true } if d.HasChange("log_delivery_configuration") { o, n := d.GetChange("log_delivery_configuration") - input.LogDeliveryConfigurations = []*elasticache.LogDeliveryConfigurationRequest{} - logTypesToSubmit := make(map[string]bool) + input.LogDeliveryConfigurations = []awstypes.LogDeliveryConfigurationRequest{} + logTypesToSubmit := make(map[awstypes.LogType]bool) currentLogDeliveryConfig := n.(*schema.Set).List() for _, current := range currentLogDeliveryConfig { - logDeliveryConfigurationRequest := expandLogDeliveryConfigurations(current.(map[string]interface{})) - logTypesToSubmit[*logDeliveryConfigurationRequest.LogType] = true - input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &logDeliveryConfigurationRequest) + logDeliveryConfigurationRequest := expandLogDeliveryConfigurationRequests(current.(map[string]interface{})) + logTypesToSubmit[logDeliveryConfigurationRequest.LogType] = true + input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, logDeliveryConfigurationRequest) } previousLogDeliveryConfig := o.(*schema.Set).List() for _, previous := range previousLogDeliveryConfig { - logDeliveryConfigurationRequest := expandEmptyLogDeliveryConfigurations(previous.(map[string]interface{})) + logDeliveryConfigurationRequest := expandEmptyLogDeliveryConfigurationRequest(previous.(map[string]interface{})) //if something was removed, send an empty request - if !logTypesToSubmit[*logDeliveryConfigurationRequest.LogType] { - input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, &logDeliveryConfigurationRequest) + if !logTypesToSubmit[logDeliveryConfigurationRequest.LogType] { + input.LogDeliveryConfigurations = append(input.LogDeliveryConfigurations, logDeliveryConfigurationRequest) } } @@ -837,7 +836,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("network_type") { - input.IpDiscovery = aws.String(d.Get("network_type").(string)) + input.IpDiscovery = awstypes.IpDiscovery(d.Get("network_type").(string)) requestUpdate = true } @@ -858,14 +857,14 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, if d.HasChange(names.AttrSecurityGroupIDs) { if v, ok := d.GetOk(names.AttrSecurityGroupIDs); ok && v.(*schema.Set).Len() > 0 { - input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) requestUpdate = true } } if d.HasChange("security_group_names") { if v, ok := d.GetOk("security_group_names"); ok && v.(*schema.Set).Len() > 0 { - input.CacheSecurityGroupNames = flex.ExpandStringSet(v.(*schema.Set)) + input.CacheSecurityGroupNames = flex.ExpandStringValueSet(v.(*schema.Set)) requestUpdate = true } } @@ -877,7 +876,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, input.SnapshottingClusterId = aws.String(fmt.Sprintf("%s-001", d.Id())) } - input.SnapshotRetentionLimit = aws.Int64(int64(d.Get("snapshot_retention_limit").(int))) + input.SnapshotRetentionLimit = aws.Int32(int32(d.Get("snapshot_retention_limit").(int))) requestUpdate = true } @@ -892,7 +891,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("transit_encryption_mode") { - input.TransitEncryptionMode = aws.String(d.Get("transit_encryption_mode").(string)) + input.TransitEncryptionMode = awstypes.TransitEncryptionMode(d.Get("transit_encryption_mode").(string)) requestUpdate = true } @@ -902,12 +901,12 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, add, del := ns.Difference(os), os.Difference(ns) if add.Len() > 0 { - input.UserGroupIdsToAdd = flex.ExpandStringSet(add) + input.UserGroupIdsToAdd = flex.ExpandStringValueSet(add) requestUpdate = true } if del.Len() > 0 { - input.UserGroupIdsToRemove = flex.ExpandStringSet(del) + input.UserGroupIdsToRemove = flex.ExpandStringValueSet(del) requestUpdate = true } } @@ -921,7 +920,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) update: %s", d.Id(), err) } - _, err := conn.ModifyReplicationGroupWithContext(ctx, input) + _, err := conn.ModifyReplicationGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ElastiCache Replication Group (%s): %s", d.Id(), err) @@ -936,7 +935,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, input := &elasticache.ModifyReplicationGroupInput{ ApplyImmediately: aws.Bool(true), AuthToken: aws.String(d.Get("auth_token").(string)), - AuthTokenUpdateStrategy: aws.String(d.Get("auth_token_update_strategy").(string)), + AuthTokenUpdateStrategy: awstypes.AuthTokenUpdateStrategyType(d.Get("auth_token_update_strategy").(string)), ReplicationGroupId: aws.String(d.Id()), } @@ -948,7 +947,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache Replication Group (%s) update: %s", d.Id(), err) } - _, err := conn.ModifyReplicationGroupWithContext(ctx, input) + _, err := conn.ModifyReplicationGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ElastiCache Replication Group (%s) authentication: %s", d.Id(), err) @@ -973,7 +972,7 @@ func resourceReplicationGroupUpdate(ctx context.Context, d *schema.ResourceData, func resourceReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) v, hasGlobalReplicationGroupID := d.GetOk("global_replication_group_id") if hasGlobalReplicationGroupID { @@ -996,12 +995,12 @@ func resourceReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, timeout = 10 * time.Minute // 10 minutes should give any creating/deleting cache clusters or snapshots time to complete. ) log.Printf("[INFO] Deleting ElastiCache Replication Group: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { - return conn.DeleteReplicationGroupWithContext(ctx, input) - }, elasticache.ErrCodeInvalidReplicationGroupStateFault) + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidReplicationGroupStateFault](ctx, timeout, func() (interface{}, error) { + return conn.DeleteReplicationGroup(ctx, input) + }) switch { - case tfawserr.ErrCodeEquals(err, elasticache.ErrCodeReplicationGroupNotFoundFault): + case errs.IsA[*awstypes.ReplicationGroupNotFoundFault](err): case err != nil: return sdkdiag.AppendErrorf(diags, "deleting ElastiCache Replication Group (%s): %s", d.Id(), err) default: @@ -1021,22 +1020,22 @@ func resourceReplicationGroupDelete(ctx context.Context, d *schema.ResourceData, return diags } -func disassociateReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroupID, replicationGroupID, region string, timeout time.Duration) error { +func disassociateReplicationGroup(ctx context.Context, conn *elasticache.Client, globalReplicationGroupID, replicationGroupID, region string, timeout time.Duration) error { input := &elasticache.DisassociateGlobalReplicationGroupInput{ GlobalReplicationGroupId: aws.String(globalReplicationGroupID), ReplicationGroupId: aws.String(replicationGroupID), ReplicationGroupRegion: aws.String(region), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { - return conn.DisassociateGlobalReplicationGroupWithContext(ctx, input) - }, elasticache.ErrCodeInvalidGlobalReplicationGroupStateFault) + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidGlobalReplicationGroupStateFault](ctx, timeout, func() (interface{}, error) { + return conn.DisassociateGlobalReplicationGroup(ctx, input) + }) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeGlobalReplicationGroupNotFoundFault) { + if errs.IsA[*awstypes.GlobalReplicationGroupNotFoundFault](err) { return nil } - if tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidParameterValueException, "is not associated with Global Replication Group") { + if errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "is not associated with Global Replication Group") { return nil } @@ -1051,7 +1050,7 @@ func disassociateReplicationGroup(ctx context.Context, conn *elasticache.ElastiC return nil } -func modifyReplicationGroupShardConfiguration(ctx context.Context, conn *elasticache.ElastiCache, d *schema.ResourceData) error { +func modifyReplicationGroupShardConfiguration(ctx context.Context, conn *elasticache.Client, d *schema.ResourceData) error { if d.HasChange("num_node_groups") { if err := modifyReplicationGroupShardConfigurationNumNodeGroups(ctx, conn, d, "num_node_groups"); err != nil { return err @@ -1067,13 +1066,13 @@ func modifyReplicationGroupShardConfiguration(ctx context.Context, conn *elastic return nil } -func modifyReplicationGroupShardConfigurationNumNodeGroups(ctx context.Context, conn *elasticache.ElastiCache, d *schema.ResourceData, argument string) error { +func modifyReplicationGroupShardConfigurationNumNodeGroups(ctx context.Context, conn *elasticache.Client, d *schema.ResourceData, argument string) error { o, n := d.GetChange(argument) oldNodeGroupCount, newNodeGroupCount := o.(int), n.(int) input := &elasticache.ModifyReplicationGroupShardConfigurationInput{ ApplyImmediately: aws.Bool(true), - NodeGroupCount: aws.Int64(int64(newNodeGroupCount)), + NodeGroupCount: aws.Int32(int32(newNodeGroupCount)), ReplicationGroupId: aws.String(d.Id()), } @@ -1085,10 +1084,10 @@ func modifyReplicationGroupShardConfigurationNumNodeGroups(ctx context.Context, nodeGroupID := fmt.Sprintf("%04d", i) nodeGroupsToRemove = append(nodeGroupsToRemove, nodeGroupID) } - input.NodeGroupsToRemove = aws.StringSlice(nodeGroupsToRemove) + input.NodeGroupsToRemove = nodeGroupsToRemove } - _, err := conn.ModifyReplicationGroupShardConfigurationWithContext(ctx, input) + _, err := conn.ModifyReplicationGroupShardConfiguration(ctx, input) if err != nil { return fmt.Errorf("modifying ElastiCache Replication Group (%s) shard configuration: %w", d.Id(), err) @@ -1104,18 +1103,18 @@ func modifyReplicationGroupShardConfigurationNumNodeGroups(ctx context.Context, return nil } -func modifyReplicationGroupShardConfigurationReplicasPerNodeGroup(ctx context.Context, conn *elasticache.ElastiCache, d *schema.ResourceData, argument string) error { +func modifyReplicationGroupShardConfigurationReplicasPerNodeGroup(ctx context.Context, conn *elasticache.Client, d *schema.ResourceData, argument string) error { o, n := d.GetChange(argument) oldReplicaCount, newReplicaCount := o.(int), n.(int) if newReplicaCount > oldReplicaCount { input := &elasticache.IncreaseReplicaCountInput{ ApplyImmediately: aws.Bool(true), - NewReplicaCount: aws.Int64(int64(newReplicaCount)), + NewReplicaCount: aws.Int32(int32(newReplicaCount)), ReplicationGroupId: aws.String(d.Id()), } - _, err := conn.IncreaseReplicaCountWithContext(ctx, input) + _, err := conn.IncreaseReplicaCount(ctx, input) if err != nil { return fmt.Errorf("increasing ElastiCache Replication Group (%s) replica count (%d): %w", d.Id(), newReplicaCount, err) @@ -1130,11 +1129,11 @@ func modifyReplicationGroupShardConfigurationReplicasPerNodeGroup(ctx context.Co } else if newReplicaCount < oldReplicaCount { input := &elasticache.DecreaseReplicaCountInput{ ApplyImmediately: aws.Bool(true), - NewReplicaCount: aws.Int64(int64(newReplicaCount)), + NewReplicaCount: aws.Int32(int32(newReplicaCount)), ReplicationGroupId: aws.String(d.Id()), } - _, err := conn.DecreaseReplicaCountWithContext(ctx, input) + _, err := conn.DecreaseReplicaCount(ctx, input) if err != nil { return fmt.Errorf("decreasing ElastiCache Replication Group (%s) replica count (%d): %w", d.Id(), newReplicaCount, err) @@ -1151,14 +1150,14 @@ func modifyReplicationGroupShardConfigurationReplicasPerNodeGroup(ctx context.Co return nil } -func increaseReplicationGroupReplicaCount(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, newReplicaCount int, timeout time.Duration) error { +func increaseReplicationGroupReplicaCount(ctx context.Context, conn *elasticache.Client, replicationGroupID string, newReplicaCount int, timeout time.Duration) error { input := &elasticache.IncreaseReplicaCountInput{ ApplyImmediately: aws.Bool(true), - NewReplicaCount: aws.Int64(int64(newReplicaCount - 1)), + NewReplicaCount: aws.Int32(int32(newReplicaCount - 1)), ReplicationGroupId: aws.String(replicationGroupID), } - _, err := conn.IncreaseReplicaCountWithContext(ctx, input) + _, err := conn.IncreaseReplicaCount(ctx, input) if err != nil { return fmt.Errorf("increasing ElastiCache Replication Group (%s) replica count (%d): %w", replicationGroupID, newReplicaCount-1, err) @@ -1171,14 +1170,14 @@ func increaseReplicationGroupReplicaCount(ctx context.Context, conn *elasticache return nil } -func decreaseReplicationGroupReplicaCount(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, newReplicaCount int, timeout time.Duration) error { +func decreaseReplicationGroupReplicaCount(ctx context.Context, conn *elasticache.Client, replicationGroupID string, newReplicaCount int, timeout time.Duration) error { input := &elasticache.DecreaseReplicaCountInput{ ApplyImmediately: aws.Bool(true), - NewReplicaCount: aws.Int64(int64(newReplicaCount - 1)), + NewReplicaCount: aws.Int32(int32(newReplicaCount - 1)), ReplicationGroupId: aws.String(replicationGroupID), } - _, err := conn.DecreaseReplicaCountWithContext(ctx, input) + _, err := conn.DecreaseReplicaCount(ctx, input) if err != nil { return fmt.Errorf("decreasing ElastiCache Replication Group (%s) replica count (%d): %w", replicationGroupID, newReplicaCount-1, err) @@ -1191,56 +1190,54 @@ func decreaseReplicationGroupReplicaCount(ctx context.Context, conn *elasticache return nil } -func findReplicationGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.ReplicationGroup, error) { +func findReplicationGroupByID(ctx context.Context, conn *elasticache.Client, id string) (*awstypes.ReplicationGroup, error) { input := &elasticache.DescribeReplicationGroupsInput{ ReplicationGroupId: aws.String(id), } - return findReplicationGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.ReplicationGroup]()) + return findReplicationGroup(ctx, conn, input, tfslices.PredicateTrue[*awstypes.ReplicationGroup]()) } -func findReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeReplicationGroupsInput, filter tfslices.Predicate[*elasticache.ReplicationGroup]) (*elasticache.ReplicationGroup, error) { +func findReplicationGroup(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeReplicationGroupsInput, filter tfslices.Predicate[*awstypes.ReplicationGroup]) (*awstypes.ReplicationGroup, error) { output, err := findReplicationGroups(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findReplicationGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeReplicationGroupsInput, filter tfslices.Predicate[*elasticache.ReplicationGroup]) ([]*elasticache.ReplicationGroup, error) { - var output []*elasticache.ReplicationGroup +func findReplicationGroups(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeReplicationGroupsInput, filter tfslices.Predicate[*awstypes.ReplicationGroup]) ([]awstypes.ReplicationGroup, error) { + var output []awstypes.ReplicationGroup - err := conn.DescribeReplicationGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeReplicationGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticache.NewDescribeReplicationGroupsPaginator(conn, input) - for _, v := range page.ReplicationGroups { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ReplicationGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeReplicationGroupNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.ReplicationGroups { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string) retry.StateRefreshFunc { +func statusReplicationGroup(ctx context.Context, conn *elasticache.Client, replicationGroupID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findReplicationGroupByID(ctx, conn, replicationGroupID) @@ -1252,7 +1249,7 @@ func statusReplicationGroup(ctx context.Context, conn *elasticache.ElastiCache, return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } @@ -1265,7 +1262,7 @@ const ( replicationGroupStatusSnapshotting = "snapshotting" ) -func waitReplicationGroupAvailable(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration, delay time.Duration) (*elasticache.ReplicationGroup, error) { +func waitReplicationGroupAvailable(ctx context.Context, conn *elasticache.Client, replicationGroupID string, timeout time.Duration, delay time.Duration) (*awstypes.ReplicationGroup, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ replicationGroupStatusCreating, @@ -1281,14 +1278,14 @@ func waitReplicationGroupAvailable(ctx context.Context, conn *elasticache.Elasti outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.ReplicationGroup); ok { + if output, ok := outputRaw.(*awstypes.ReplicationGroup); ok { return output, err } return nil, err } -func waitReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) (*elasticache.ReplicationGroup, error) { +func waitReplicationGroupDeleted(ctx context.Context, conn *elasticache.Client, replicationGroupID string, timeout time.Duration) (*awstypes.ReplicationGroup, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ replicationGroupStatusCreating, @@ -1304,23 +1301,22 @@ func waitReplicationGroupDeleted(ctx context.Context, conn *elasticache.ElastiCa outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.ReplicationGroup); ok { + if output, ok := outputRaw.(*awstypes.ReplicationGroup); ok { return output, err } return nil, err } -func findReplicationGroupMemberClustersByID(ctx context.Context, conn *elasticache.ElastiCache, id string) ([]*elasticache.CacheCluster, error) { +func findReplicationGroupMemberClustersByID(ctx context.Context, conn *elasticache.Client, id string) ([]awstypes.CacheCluster, error) { rg, err := findReplicationGroupByID(ctx, conn, id) if err != nil { return nil, err } - - ids := aws.StringValueSlice(rg.MemberClusters) - clusters, err := findCacheClusters(ctx, conn, &elasticache.DescribeCacheClustersInput{}, func(v *elasticache.CacheCluster) bool { - return slices.Contains(ids, aws.StringValue(v.CacheClusterId)) + ids := rg.MemberClusters + clusters, err := findCacheClusters(ctx, conn, &elasticache.DescribeCacheClustersInput{}, func(v *awstypes.CacheCluster) bool { + return slices.Contains(ids, aws.ToString(v.CacheClusterId)) }) if err != nil { @@ -1336,7 +1332,7 @@ func findReplicationGroupMemberClustersByID(ctx context.Context, conn *elasticac // statusReplicationGroupMemberClusters fetches the Replication Group's Member Clusters and either "available" or the first non-"available" status. // NOTE: This function assumes that the intended end-state is to have all member clusters in "available" status. -func statusReplicationGroupMemberClusters(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string) retry.StateRefreshFunc { +func statusReplicationGroupMemberClusters(ctx context.Context, conn *elasticache.Client, replicationGroupID string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findReplicationGroupMemberClustersByID(ctx, conn, replicationGroupID) @@ -1350,7 +1346,7 @@ func statusReplicationGroupMemberClusters(ctx context.Context, conn *elasticache status := cacheClusterStatusAvailable for _, v := range output { - if clusterStatus := aws.StringValue(v.CacheClusterStatus); clusterStatus != cacheClusterStatusAvailable { + if clusterStatus := aws.ToString(v.CacheClusterStatus); clusterStatus != cacheClusterStatusAvailable { status = clusterStatus break } @@ -1360,7 +1356,7 @@ func statusReplicationGroupMemberClusters(ctx context.Context, conn *elasticache } } -func waitReplicationGroupMemberClustersAvailable(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) ([]*elasticache.CacheCluster, error) { //nolint:unparam +func waitReplicationGroupMemberClustersAvailable(ctx context.Context, conn *elasticache.Client, replicationGroupID string, timeout time.Duration) ([]*awstypes.CacheCluster, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{ cacheClusterStatusCreating, @@ -1377,7 +1373,7 @@ func waitReplicationGroupMemberClustersAvailable(ctx context.Context, conn *elas outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.([]*elasticache.CacheCluster); ok { + if output, ok := outputRaw.([]*awstypes.CacheCluster); ok { return output, err } diff --git a/internal/service/elasticache/replication_group_data_source.go b/internal/service/elasticache/replication_group_data_source.go index 0c03edb9c24..f582054f0f2 100644 --- a/internal/service/elasticache/replication_group_data_source.go +++ b/internal/service/elasticache/replication_group_data_source.go @@ -6,10 +6,9 @@ package elasticache import ( "context" "log" - "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -124,7 +123,7 @@ func dataSourceReplicationGroup() *schema.Resource { func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) groupID := d.Get("replication_group_id").(string) @@ -134,29 +133,25 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache Replication Group", err)) } - d.SetId(aws.StringValue(rg.ReplicationGroupId)) + d.SetId(aws.ToString(rg.ReplicationGroupId)) d.Set(names.AttrDescription, rg.Description) d.Set(names.AttrARN, rg.ARN) d.Set("auth_token_enabled", rg.AuthTokenEnabled) - if rg.AutomaticFailover != nil { - switch aws.StringValue(rg.AutomaticFailover) { - case elasticache.AutomaticFailoverStatusDisabled, elasticache.AutomaticFailoverStatusDisabling: - d.Set("automatic_failover_enabled", false) - case elasticache.AutomaticFailoverStatusEnabled, elasticache.AutomaticFailoverStatusEnabling: - d.Set("automatic_failover_enabled", true) - } + switch rg.AutomaticFailover { + case awstypes.AutomaticFailoverStatusDisabled, awstypes.AutomaticFailoverStatusDisabling: + d.Set("automatic_failover_enabled", false) + case awstypes.AutomaticFailoverStatusEnabled, awstypes.AutomaticFailoverStatusEnabling: + d.Set("automatic_failover_enabled", true) } - if rg.MultiAZ != nil { - switch strings.ToLower(aws.StringValue(rg.MultiAZ)) { - case elasticache.MultiAZStatusEnabled: - d.Set("multi_az_enabled", true) - case elasticache.MultiAZStatusDisabled: - d.Set("multi_az_enabled", false) - default: - log.Printf("Unknown MultiAZ state %q", aws.StringValue(rg.MultiAZ)) - } + switch rg.MultiAZ { + case awstypes.MultiAZStatusEnabled: + d.Set("multi_az_enabled", true) + case awstypes.MultiAZStatusDisabled: + d.Set("multi_az_enabled", false) + default: + log.Printf("Unknown MultiAZ state %q", string(rg.MultiAZ)) } if rg.ConfigurationEndpoint != nil { @@ -165,7 +160,7 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, } else { if rg.NodeGroups == nil { d.SetId("") - return sdkdiag.AppendErrorf(diags, "ElastiCache Replication Group (%s) doesn't have node groups", aws.StringValue(rg.ReplicationGroupId)) + return sdkdiag.AppendErrorf(diags, "ElastiCache Replication Group (%s) doesn't have node groups", aws.ToString(rg.ReplicationGroupId)) } d.Set(names.AttrPort, rg.NodeGroups[0].PrimaryEndpoint.Port) d.Set("primary_endpoint_address", rg.NodeGroups[0].PrimaryEndpoint.Address) @@ -173,7 +168,7 @@ func dataSourceReplicationGroupRead(ctx context.Context, d *schema.ResourceData, } d.Set("num_cache_clusters", len(rg.MemberClusters)) - if err := d.Set("member_clusters", flex.FlattenStringList(rg.MemberClusters)); err != nil { + if err := d.Set("member_clusters", flex.FlattenStringValueList(rg.MemberClusters)); err != nil { return sdkdiag.AppendErrorf(diags, "setting member_clusters: %s", err) } d.Set("node_type", rg.CacheNodeType) diff --git a/internal/service/elasticache/replication_group_data_source_test.go b/internal/service/elasticache/replication_group_data_source_test.go index a5f6be280b6..6a4d98e0243 100644 --- a/internal/service/elasticache/replication_group_data_source_test.go +++ b/internal/service/elasticache/replication_group_data_source_test.go @@ -7,8 +7,7 @@ import ( "fmt" "testing" - "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -112,21 +111,6 @@ func TestAccElastiCacheReplicationGroupDataSource_multiAZ(t *testing.T) { }) } -func TestAccElastiCacheReplicationGroupDataSource_nonExistent(t *testing.T) { - ctx := acctest.Context(t) - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.ElastiCacheServiceID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Steps: []resource.TestStep{ - { - Config: testAccReplicationGroupDataSourceConfig_nonExistent, - ExpectError: regexache.MustCompile(`couldn't find resource`), - }, - }, - }) -} - func TestAccElastiCacheReplicationGroupDataSource_Engine_Redis_LogDeliveryConfigurations(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -142,7 +126,7 @@ func TestAccElastiCacheReplicationGroupDataSource_Engine_Redis_LogDeliveryConfig ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { - Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, false, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson), + Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, false, true, string(awstypes.DestinationTypeCloudWatchLogs), string(awstypes.LogFormatText), true, string(awstypes.DestinationTypeKinesisFirehose), string(awstypes.LogFormatJson)), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "log_delivery_configuration.0.destination", rName), resource.TestCheckResourceAttr(dataSourceName, "log_delivery_configuration.0.destination_type", "cloudwatch-logs"), @@ -212,9 +196,3 @@ data "aws_elasticache_replication_group" "test" { } `, rName) } - -const testAccReplicationGroupDataSourceConfig_nonExistent = ` -data "aws_elasticache_replication_group" "test" { - replication_group_id = "tf-acc-test-nonexistent" -} -` diff --git a/internal/service/elasticache/replication_group_migrate.go b/internal/service/elasticache/replication_group_migrate.go index 4518254d63e..f94db85fcc3 100644 --- a/internal/service/elasticache/replication_group_migrate.go +++ b/internal/service/elasticache/replication_group_migrate.go @@ -7,9 +7,10 @@ import ( "context" "strings" - "github.com/aws/aws-sdk-go/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/sdkv2/types/nullable" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -22,7 +23,7 @@ func replicationGroupStateUpgradeV1(ctx context.Context, rawState map[string]int } // Set auth_token_update_strategy to new default value - rawState["auth_token_update_strategy"] = elasticache.AuthTokenUpdateStrategyTypeRotate + rawState["auth_token_update_strategy"] = awstypes.AuthTokenUpdateStrategyTypeRotate return rawState, nil } @@ -120,10 +121,10 @@ func resourceReplicationGroupConfigV1() *schema.Resource { }, }, "ip_discovery": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(elasticache.IpDiscovery_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.IpDiscovery](), }, "log_delivery_configuration": { Type: schema.TypeSet, @@ -132,23 +133,23 @@ func resourceReplicationGroupConfigV1() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "destination_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.DestinationType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.DestinationType](), }, names.AttrDestination: { Type: schema.TypeString, Required: true, }, "log_format": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.LogFormat_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogFormat](), }, "log_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.LogType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogType](), }, }, }, @@ -175,11 +176,11 @@ func resourceReplicationGroupConfigV1() *schema.Resource { Default: false, }, "network_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(elasticache.NetworkType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.NetworkType](), }, "node_type": { Type: schema.TypeString, diff --git a/internal/service/elasticache/replication_group_test.go b/internal/service/elasticache/replication_group_test.go index 1e710126458..d0e66f53770 100644 --- a/internal/service/elasticache/replication_group_test.go +++ b/internal/service/elasticache/replication_group_test.go @@ -13,8 +13,9 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -32,7 +33,7 @@ func TestAccElastiCacheReplicationGroup_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -76,7 +77,7 @@ func TestAccElastiCacheReplicationGroup_basic_v5(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -113,7 +114,7 @@ func TestAccElastiCacheReplicationGroup_uppercase(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -146,7 +147,7 @@ func TestAccElastiCacheReplicationGroup_EngineVersion_v7(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -181,7 +182,7 @@ func TestAccElastiCacheReplicationGroup_EngineVersion_update(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var v1, v2, v3, v4, v5, v6 elasticache.ReplicationGroup + var v1, v2, v3, v4, v5, v6 awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -260,7 +261,7 @@ func TestAccElastiCacheReplicationGroup_EngineVersion_6xToRealVersion(t *testing t.Skip("skipping long-running test in short mode") } - var v1, v2 elasticache.ReplicationGroup + var v1, v2 awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -299,7 +300,7 @@ func TestAccElastiCacheReplicationGroup_disappears(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -327,7 +328,7 @@ func TestAccElastiCacheReplicationGroup_updateDescription(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -369,7 +370,7 @@ func TestAccElastiCacheReplicationGroup_updateMaintenanceWindow(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -409,7 +410,7 @@ func TestAccElastiCacheReplicationGroup_updateUserGroups(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) userGroup := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -453,7 +454,7 @@ func TestAccElastiCacheReplicationGroup_updateNodeSize(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -496,7 +497,7 @@ func TestAccElastiCacheReplicationGroup_updateParameterGroup(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup parameterGroupResourceName1 := "aws_elasticache_parameter_group.test.0" parameterGroupResourceName2 := "aws_elasticache_parameter_group.test.1" resourceName := "aws_elasticache_replication_group.test" @@ -542,7 +543,7 @@ func TestAccElastiCacheReplicationGroup_authToken(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" token1 := sdkacctest.RandString(16) @@ -569,21 +570,21 @@ func TestAccElastiCacheReplicationGroup_authToken(t *testing.T) { { // When adding an auth_token to a previously passwordless replication // group, the SET strategy can be used. - Config: testAccReplicationGroupConfig_authToken(rName, token1, elasticache.AuthTokenUpdateStrategyTypeSet), + Config: testAccReplicationGroupConfig_authToken(rName, token1, string(awstypes.AuthTokenUpdateStrategyTypeSet)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "transit_encryption_enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "auth_token", token1), - resource.TestCheckResourceAttr(resourceName, "auth_token_update_strategy", elasticache.AuthTokenUpdateStrategyTypeSet), + resource.TestCheckResourceAttr(resourceName, "auth_token_update_strategy", string(awstypes.AuthTokenUpdateStrategyTypeSet)), ), }, { - Config: testAccReplicationGroupConfig_authToken(rName, token2, elasticache.AuthTokenUpdateStrategyTypeRotate), + Config: testAccReplicationGroupConfig_authToken(rName, token2, string(awstypes.AuthTokenUpdateStrategyTypeRotate)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "transit_encryption_enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "auth_token", token2), - resource.TestCheckResourceAttr(resourceName, "auth_token_update_strategy", elasticache.AuthTokenUpdateStrategyTypeRotate), + resource.TestCheckResourceAttr(resourceName, "auth_token_update_strategy", string(awstypes.AuthTokenUpdateStrategyTypeRotate)), ), }, { @@ -591,12 +592,12 @@ func TestAccElastiCacheReplicationGroup_authToken(t *testing.T) { // should include the auth_token to be kept and the SET auth_token_update_strategy. // // Ref: https://docs.aws.amazon.com/AmazonElastiCache/latest/red-ug/auth.html#auth-modifyng-token - Config: testAccReplicationGroupConfig_authToken(rName, token2, elasticache.AuthTokenUpdateStrategyTypeSet), + Config: testAccReplicationGroupConfig_authToken(rName, token2, string(awstypes.AuthTokenUpdateStrategyTypeSet)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, "transit_encryption_enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "auth_token", token2), - resource.TestCheckResourceAttr(resourceName, "auth_token_update_strategy", elasticache.AuthTokenUpdateStrategyTypeSet), + resource.TestCheckResourceAttr(resourceName, "auth_token_update_strategy", string(awstypes.AuthTokenUpdateStrategyTypeSet)), ), }, }, @@ -609,7 +610,7 @@ func TestAccElastiCacheReplicationGroup_stateUpgrade5270(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -648,7 +649,7 @@ func TestAccElastiCacheReplicationGroup_vpc(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup resourceName := "aws_elasticache_replication_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -682,7 +683,7 @@ func TestAccElastiCacheReplicationGroup_multiAzNotInVPC(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -720,7 +721,7 @@ func TestAccElastiCacheReplicationGroup_multiAzNotInVPC_repeated(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -760,7 +761,7 @@ func TestAccElastiCacheReplicationGroup_multiAzInVPC(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -781,11 +782,11 @@ func TestAccElastiCacheReplicationGroup_multiAzInVPC(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet(resourceName, "primary_endpoint_address"), func(s *terraform.State) error { - return resource.TestMatchResourceAttr(resourceName, "primary_endpoint_address", regexache.MustCompile(fmt.Sprintf("%s\\..+\\.%s", aws.StringValue(rg.ReplicationGroupId), acctest.PartitionDNSSuffix())))(s) + return resource.TestMatchResourceAttr(resourceName, "primary_endpoint_address", regexache.MustCompile(fmt.Sprintf("%s\\..+\\.%s", aws.ToString(rg.ReplicationGroupId), acctest.PartitionDNSSuffix())))(s) }, resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint_address"), func(s *terraform.State) error { - return resource.TestMatchResourceAttr(resourceName, "reader_endpoint_address", regexache.MustCompile(fmt.Sprintf("%s-ro\\..+\\.%s", aws.StringValue(rg.ReplicationGroupId), acctest.PartitionDNSSuffix())))(s) + return resource.TestMatchResourceAttr(resourceName, "reader_endpoint_address", regexache.MustCompile(fmt.Sprintf("%s-ro\\..+\\.%s", aws.ToString(rg.ReplicationGroupId), acctest.PartitionDNSSuffix())))(s) }, ), }, @@ -805,7 +806,7 @@ func TestAccElastiCacheReplicationGroup_deprecatedAvailabilityZones_multiAzInVPC t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -826,11 +827,11 @@ func TestAccElastiCacheReplicationGroup_deprecatedAvailabilityZones_multiAzInVPC resource.TestCheckResourceAttr(resourceName, "snapshot_retention_limit", "7"), resource.TestCheckResourceAttrSet(resourceName, "primary_endpoint_address"), func(s *terraform.State) error { - return resource.TestMatchResourceAttr(resourceName, "primary_endpoint_address", regexache.MustCompile(fmt.Sprintf("%s\\..+\\.%s", aws.StringValue(rg.ReplicationGroupId), acctest.PartitionDNSSuffix())))(s) + return resource.TestMatchResourceAttr(resourceName, "primary_endpoint_address", regexache.MustCompile(fmt.Sprintf("%s\\..+\\.%s", aws.ToString(rg.ReplicationGroupId), acctest.PartitionDNSSuffix())))(s) }, resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint_address"), func(s *terraform.State) error { - return resource.TestMatchResourceAttr(resourceName, "reader_endpoint_address", regexache.MustCompile(fmt.Sprintf("%s-ro\\..+\\.%s", aws.StringValue(rg.ReplicationGroupId), acctest.PartitionDNSSuffix())))(s) + return resource.TestMatchResourceAttr(resourceName, "reader_endpoint_address", regexache.MustCompile(fmt.Sprintf("%s-ro\\..+\\.%s", aws.ToString(rg.ReplicationGroupId), acctest.PartitionDNSSuffix())))(s) }, ), }, @@ -868,7 +869,7 @@ func TestAccElastiCacheReplicationGroup_ipDiscovery(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup resourceName := "aws_elasticache_replication_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -909,7 +910,7 @@ func TestAccElastiCacheReplicationGroup_networkType(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup resourceName := "aws_elasticache_replication_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -951,7 +952,7 @@ func TestAccElastiCacheReplicationGroup_ClusterMode_basic(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -993,7 +994,7 @@ func TestAccElastiCacheReplicationGroup_ClusterMode_nonClusteredParameterGroup(t t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1037,7 +1038,7 @@ func TestAccElastiCacheReplicationGroup_ClusterModeUpdateNumNodeGroups_scaleUp(t t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" clusterDataSourcePrefix := "data.aws_elasticache_cluster.test" @@ -1087,7 +1088,7 @@ func TestAccElastiCacheReplicationGroup_ClusterModeUpdateNumNodeGroups_scaleDown t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1131,7 +1132,7 @@ func TestAccElastiCacheReplicationGroup_ClusterMode_updateReplicasPerNodeGroup(t t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1187,7 +1188,7 @@ func TestAccElastiCacheReplicationGroup_ClusterModeUpdateNumNodeGroupsAndReplica t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1231,7 +1232,7 @@ func TestAccElastiCacheReplicationGroup_ClusterModeUpdateNumNodeGroupsAndReplica t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1275,7 +1276,7 @@ func TestAccElastiCacheReplicationGroup_ClusterMode_singleNode(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1319,7 +1320,7 @@ func TestAccElastiCacheReplicationGroup_ClusterMode_updateFromDisabled_Compatibl t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1393,7 +1394,7 @@ func TestAccElastiCacheReplicationGroup_enableSnapshotting(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1433,7 +1434,7 @@ func TestAccElastiCacheReplicationGroup_transitEncryptionWithAuthToken(t *testin t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" authToken := sdkacctest.RandString(16) @@ -1469,7 +1470,7 @@ func TestAccElastiCacheReplicationGroup_transitEncryption5x(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg1, rg2 elasticache.ReplicationGroup + var rg1, rg2 awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1513,7 +1514,7 @@ func TestAccElastiCacheReplicationGroup_transitEncryption7x(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg1, rg2, rg3, rg4 elasticache.ReplicationGroup + var rg1, rg2, rg3, rg4 awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1524,11 +1525,11 @@ func TestAccElastiCacheReplicationGroup_transitEncryption7x(t *testing.T) { CheckDestroy: testAccCheckReplicationGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccReplicationGroupConfig_transitEncryptionEnabled7x(rName, elasticache.TransitEncryptionModePreferred), + Config: testAccReplicationGroupConfig_transitEncryptionEnabled7x(rName, string(awstypes.TransitEncryptionModePreferred)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg1), resource.TestCheckResourceAttr(resourceName, "transit_encryption_enabled", acctest.CtTrue), - resource.TestCheckResourceAttr(resourceName, "transit_encryption_mode", elasticache.TransitEncryptionModePreferred), + resource.TestCheckResourceAttr(resourceName, "transit_encryption_mode", string(awstypes.TransitEncryptionModePreferred)), ), }, { @@ -1539,22 +1540,22 @@ func TestAccElastiCacheReplicationGroup_transitEncryption7x(t *testing.T) { }, { // With Redis engine versions >= 7.0.5, transit_encryption_mode can be modified in-place. - Config: testAccReplicationGroupConfig_transitEncryptionEnabled7x(rName, elasticache.TransitEncryptionModeRequired), + Config: testAccReplicationGroupConfig_transitEncryptionEnabled7x(rName, string(awstypes.TransitEncryptionModeRequired)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg2), testAccCheckReplicationGroupNotRecreated(&rg1, &rg2), resource.TestCheckResourceAttr(resourceName, "transit_encryption_enabled", acctest.CtTrue), - resource.TestCheckResourceAttr(resourceName, "transit_encryption_mode", elasticache.TransitEncryptionModeRequired), + resource.TestCheckResourceAttr(resourceName, "transit_encryption_mode", string(awstypes.TransitEncryptionModeRequired)), ), }, { // Before disabling transit encryption, mode must be transitioned back to "preferred" first. - Config: testAccReplicationGroupConfig_transitEncryptionEnabled7x(rName, elasticache.TransitEncryptionModePreferred), + Config: testAccReplicationGroupConfig_transitEncryptionEnabled7x(rName, string(awstypes.TransitEncryptionModePreferred)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg3), testAccCheckReplicationGroupNotRecreated(&rg2, &rg3), resource.TestCheckResourceAttr(resourceName, "transit_encryption_enabled", acctest.CtTrue), - resource.TestCheckResourceAttr(resourceName, "transit_encryption_mode", elasticache.TransitEncryptionModePreferred), + resource.TestCheckResourceAttr(resourceName, "transit_encryption_mode", string(awstypes.TransitEncryptionModePreferred)), ), }, { @@ -1576,7 +1577,7 @@ func TestAccElastiCacheReplicationGroup_enableAtRestEncryption(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1609,7 +1610,7 @@ func TestAccElastiCacheReplicationGroup_useCMKKMSKeyID(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" resource.ParallelTest(t, resource.TestCase{ @@ -1635,7 +1636,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClusters_basic(t *testing.T) t.Skip("skipping long-running test in short mode") } - var replicationGroup elasticache.ReplicationGroup + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" clusterDataSourcePrefix := "data.aws_elasticache_cluster.test" @@ -1701,7 +1702,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersFailover_autoFailover t.Skip("skipping long-running test in short mode") } - var replicationGroup elasticache.ReplicationGroup + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1733,7 +1734,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersFailover_autoFailover { PreConfig: func() { // Ensure that primary is on the node we are trying to delete - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) timeout := 40 * time.Minute if err := resourceReplicationGroupSetPrimaryClusterID(ctx, conn, rName, formatReplicationGroupClusterID(rName, 3), timeout); err != nil { @@ -1759,7 +1760,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersFailover_autoFailover t.Skip("skipping long-running test in short mode") } - var replicationGroup elasticache.ReplicationGroup + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1785,7 +1786,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersFailover_autoFailover { PreConfig: func() { // Ensure that primary is on the node we are trying to delete - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) timeout := 40 * time.Minute // Must disable automatic failover first @@ -1822,7 +1823,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClusters_multiAZEnabled(t *te t.Skip("skipping long-running test in short mode") } - var replicationGroup elasticache.ReplicationGroup + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1848,7 +1849,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClusters_multiAZEnabled(t *te { PreConfig: func() { // Ensure that primary is on the node we are trying to delete - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) timeout := 40 * time.Minute // Must disable automatic failover first @@ -1885,7 +1886,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersMemberClusterDisappea t.Skip("skipping long-running test in short mode") } - var replicationGroup elasticache.ReplicationGroup + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1906,7 +1907,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersMemberClusterDisappea { PreConfig: func() { // Remove one of the Cache Clusters - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) timeout := 40 * time.Minute cacheClusterID := formatReplicationGroupClusterID(rName, 2) @@ -1936,7 +1937,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersMemberClusterDisappea t.Skip("skipping long-running test in short mode") } - var replicationGroup elasticache.ReplicationGroup + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -1957,7 +1958,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersMemberClusterDisappea { PreConfig: func() { // Remove one of the Cache Clusters - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) timeout := 40 * time.Minute cacheClusterID := formatReplicationGroupClusterID(rName, 2) @@ -1987,7 +1988,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersMemberClusterDisappea t.Skip("skipping long-running test in short mode") } - var replicationGroup elasticache.ReplicationGroup + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -2008,7 +2009,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersMemberClusterDisappea { PreConfig: func() { // Remove one of the Cache Clusters - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) timeout := 40 * time.Minute cacheClusterID := formatReplicationGroupClusterID(rName, 2) @@ -2038,7 +2039,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersMemberClusterDisappea t.Skip("skipping long-running test in short mode") } - var replicationGroup elasticache.ReplicationGroup + var replicationGroup awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -2059,7 +2060,7 @@ func TestAccElastiCacheReplicationGroup_NumberCacheClustersMemberClusterDisappea { PreConfig: func() { // Remove one of the Cache Clusters - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) timeout := 40 * time.Minute cacheClusterID := formatReplicationGroupClusterID(rName, 2) @@ -2089,7 +2090,7 @@ func TestAccElastiCacheReplicationGroup_tags(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" clusterDataSourcePrefix := "data.aws_elasticache_cluster.test" @@ -2144,7 +2145,7 @@ func TestAccElastiCacheReplicationGroup_tagWithOtherModification(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" clusterDataSourcePrefix := "data.aws_elasticache_cluster.test" @@ -2185,7 +2186,7 @@ func TestAccElastiCacheReplicationGroup_finalSnapshot(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -2212,7 +2213,7 @@ func TestAccElastiCacheReplicationGroup_autoMinorVersionUpgrade(t *testing.T) { t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -2298,8 +2299,8 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupID_basic(t *testin t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup - var pg elasticache.CacheParameterGroup + var rg awstypes.ReplicationGroup + var pg awstypes.CacheParameterGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" primaryGroupResourceName := "aws_elasticache_replication_group.primary" @@ -2347,8 +2348,8 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupID_full(t *testing t.Skip("skipping long-running test in short mode") } - var rg1, rg2 elasticache.ReplicationGroup - var pg1, pg2 elasticache.CacheParameterGroup + var rg1, rg2 awstypes.ReplicationGroup + var pg1, pg2 awstypes.CacheParameterGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" primaryGroupResourceName := "aws_elasticache_replication_group.primary" @@ -2417,7 +2418,7 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupID_disappears(t *t t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -2448,8 +2449,8 @@ func TestAccElastiCacheReplicationGroup_GlobalReplicationGroupIDClusterMode_basi t.Skip("skipping long-running test in short mode") } - var rg1, rg2 elasticache.ReplicationGroup - var pg1, pg2 elasticache.CacheParameterGroup + var rg1, rg2 awstypes.ReplicationGroup + var pg1, pg2 awstypes.CacheParameterGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" primaryGroupResourceName := "aws_elasticache_replication_group.primary" @@ -2536,8 +2537,8 @@ func TestAccElastiCacheReplicationGroup_dataTiering(t *testing.T) { } var ( - rg elasticache.ReplicationGroup - version elasticache.CacheEngineVersion + rg awstypes.ReplicationGroup + version awstypes.CacheEngineVersion ) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elasticache_replication_group.test" @@ -2576,7 +2577,7 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -2587,7 +2588,7 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C CheckDestroy: testAccCheckReplicationGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, false, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText), + Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, false, true, string(awstypes.DestinationTypeCloudWatchLogs), string(awstypes.LogFormatText), true, string(awstypes.DestinationTypeCloudWatchLogs), string(awstypes.LogFormatText)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -2613,7 +2614,7 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C }, }, { - Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, false, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson), + Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, false, true, string(awstypes.DestinationTypeCloudWatchLogs), string(awstypes.LogFormatText), true, string(awstypes.DestinationTypeKinesisFirehose), string(awstypes.LogFormatJson)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -2629,7 +2630,7 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C ), }, { - Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, false, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson, false, "", ""), + Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, false, true, string(awstypes.DestinationTypeKinesisFirehose), string(awstypes.LogFormatJson), false, "", ""), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -2679,7 +2680,7 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C t.Skip("skipping long-running test in short mode") } - var rg elasticache.ReplicationGroup + var rg awstypes.ReplicationGroup rName := sdkacctest.RandomWithPrefix("tf-acc-test") resourceName := "aws_elasticache_replication_group.test" @@ -2690,7 +2691,7 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C CheckDestroy: testAccCheckReplicationGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText), + Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, true, string(awstypes.DestinationTypeCloudWatchLogs), string(awstypes.LogFormatText), true, string(awstypes.DestinationTypeCloudWatchLogs), string(awstypes.LogFormatText)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -2717,7 +2718,7 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C }, }, { - Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, true, elasticache.DestinationTypeCloudwatchLogs, elasticache.LogFormatText, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson), + Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, true, string(awstypes.DestinationTypeCloudWatchLogs), string(awstypes.LogFormatText), true, string(awstypes.DestinationTypeKinesisFirehose), string(awstypes.LogFormatJson)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -2734,7 +2735,7 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C ), }, { - Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, true, elasticache.DestinationTypeKinesisFirehose, elasticache.LogFormatJson, false, "", ""), + Config: testAccReplicationGroupConfig_dataSourceEngineRedisLogDeliveryConfigurations(rName, true, true, string(awstypes.DestinationTypeKinesisFirehose), string(awstypes.LogFormatJson), false, "", ""), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationGroupExists(ctx, resourceName, &rg), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "redis"), @@ -2780,14 +2781,14 @@ func TestAccElastiCacheReplicationGroup_Engine_Redis_LogDeliveryConfigurations_C }) } -func testAccCheckReplicationGroupExists(ctx context.Context, n string, v *elasticache.ReplicationGroup) resource.TestCheckFunc { +func testAccCheckReplicationGroupExists(ctx context.Context, n string, v *awstypes.ReplicationGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) output, err := tfelasticache.FindReplicationGroupByID(ctx, conn, rs.Primary.ID) @@ -2803,7 +2804,7 @@ func testAccCheckReplicationGroupExists(ctx context.Context, n string, v *elasti func testAccCheckReplicationGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elasticache_replication_group" { @@ -2826,18 +2827,18 @@ func testAccCheckReplicationGroupDestroy(ctx context.Context) resource.TestCheck } } -func testAccCheckReplicationGroupParameterGroupExists(ctx context.Context, rg *elasticache.ReplicationGroup, v *elasticache.CacheParameterGroup) resource.TestCheckFunc { +func testAccCheckReplicationGroupParameterGroupExists(ctx context.Context, rg *awstypes.ReplicationGroup, v *awstypes.CacheParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) - cacheClusterID := aws.StringValue(rg.NodeGroups[0].NodeGroupMembers[0].CacheClusterId) + cacheClusterID := aws.ToString(rg.NodeGroups[0].NodeGroupMembers[0].CacheClusterId) cluster, err := tfelasticache.FindCacheClusterByID(ctx, conn, cacheClusterID) if err != nil { return fmt.Errorf("reading ElastiCache Cluster (%s): %w", cacheClusterID, err) } - name := aws.StringValue(cluster.CacheParameterGroup.CacheParameterGroupName) + name := aws.ToString(cluster.CacheParameterGroup.CacheParameterGroupName) output, err := tfelasticache.FindCacheParameterGroupByName(ctx, conn, name) if err != nil { @@ -2850,11 +2851,11 @@ func testAccCheckReplicationGroupParameterGroupExists(ctx context.Context, rg *e } } -func testAccCheckGlobalReplicationGroupMemberParameterGroupDestroy(ctx context.Context, v *elasticache.CacheParameterGroup) resource.TestCheckFunc { +func testAccCheckGlobalReplicationGroupMemberParameterGroupDestroy(ctx context.Context, v *awstypes.CacheParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) - name := aws.StringValue(v.CacheParameterGroupName) + name := aws.ToString(v.CacheParameterGroupName) _, err := tfelasticache.FindCacheParameterGroupByName(ctx, conn, name) if tfresource.NotFound(err) { @@ -2876,7 +2877,7 @@ func testAccCheckReplicationGroupUserGroup(ctx context.Context, n, userGroupID s return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) id := rs.Primary.ID output, err := tfelasticache.FindReplicationGroupByID(ctx, conn, id) @@ -2889,7 +2890,7 @@ func testAccCheckReplicationGroupUserGroup(ctx context.Context, n, userGroupID s return fmt.Errorf("ElastiCache Replication Group (%s) was not assigned any User Groups", id) } - if v := aws.StringValue(output.UserGroupIds[0]); v != userGroupID { + if v := output.UserGroupIds[0]; v != userGroupID { return fmt.Errorf("ElastiCache Replication Group (%s) was not assigned User Group (%s), User Group was (%s) instead", n, userGroupID, v) } @@ -2897,9 +2898,9 @@ func testAccCheckReplicationGroupUserGroup(ctx context.Context, n, userGroupID s } } -func testAccCheckReplicationGroupRecreated(i, j *elasticache.ReplicationGroup) resource.TestCheckFunc { +func testAccCheckReplicationGroupRecreated(i, j *awstypes.ReplicationGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.TimeValue(i.ReplicationGroupCreateTime).Equal(aws.TimeValue(j.ReplicationGroupCreateTime)) { + if aws.ToTime(i.ReplicationGroupCreateTime).Equal(aws.ToTime(j.ReplicationGroupCreateTime)) { return errors.New("ElastiCache Replication Group not recreated") } @@ -2907,9 +2908,9 @@ func testAccCheckReplicationGroupRecreated(i, j *elasticache.ReplicationGroup) r } } -func testAccCheckReplicationGroupNotRecreated(i, j *elasticache.ReplicationGroup) resource.TestCheckFunc { +func testAccCheckReplicationGroupNotRecreated(i, j *awstypes.ReplicationGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if !aws.TimeValue(i.ReplicationGroupCreateTime).Equal(aws.TimeValue(j.ReplicationGroupCreateTime)) { + if !aws.ToTime(i.ReplicationGroupCreateTime).Equal(aws.ToTime(j.ReplicationGroupCreateTime)) { return errors.New("ElastiCache Replication Group recreated") } @@ -2919,8 +2920,8 @@ func testAccCheckReplicationGroupNotRecreated(i, j *elasticache.ReplicationGroup func testCheckEngineStuffDefault(ctx context.Context, resourceName string) resource.TestCheckFunc { var ( - version elasticache.CacheEngineVersion - parameterGroup elasticache.CacheParameterGroup + version awstypes.CacheEngineVersion + parameterGroup awstypes.CacheParameterGroup ) checks := []resource.TestCheckFunc{ @@ -2940,11 +2941,11 @@ func testCheckEngineStuffDefault(ctx context.Context, resourceName string) resou return resource.ComposeAggregateTestCheckFunc(checks...) } -func testCheckRedisEngineVersionLatest(ctx context.Context, v *elasticache.CacheEngineVersion) resource.TestCheckFunc { +func testCheckRedisEngineVersionLatest(ctx context.Context, v *awstypes.CacheEngineVersion) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) - versions, err := conn.DescribeCacheEngineVersionsWithContext(ctx, &elasticache.DescribeCacheEngineVersionsInput{ + versions, err := conn.DescribeCacheEngineVersions(ctx, &elasticache.DescribeCacheEngineVersionsInput{ Engine: aws.String("redis"), DefaultOnly: aws.Bool(true), }) @@ -2958,22 +2959,22 @@ func testCheckRedisEngineVersionLatest(ctx context.Context, v *elasticache.Cache return fmt.Errorf("too many results: %d", l) } - *v = *(versions.CacheEngineVersions[0]) + *v = versions.CacheEngineVersions[0] return nil } } -func testCheckRedisParameterGroupDefault(ctx context.Context, version *elasticache.CacheEngineVersion, v *elasticache.CacheParameterGroup) resource.TestCheckFunc { +func testCheckRedisParameterGroupDefault(ctx context.Context, version *awstypes.CacheEngineVersion, v *awstypes.CacheParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) output, err := tfelasticache.FindCacheParameterGroup(ctx, conn, &elasticache.DescribeCacheParameterGroupsInput{}, tfslices.PredicateAnd( - func(v *elasticache.CacheParameterGroup) bool { - return aws.StringValue(v.CacheParameterGroupFamily) == aws.StringValue(version.CacheParameterGroupFamily) + func(v *awstypes.CacheParameterGroup) bool { + return aws.ToString(v.CacheParameterGroupFamily) == aws.ToString(version.CacheParameterGroupFamily) }, - func(v *elasticache.CacheParameterGroup) bool { - name := aws.StringValue(v.CacheParameterGroupName) + func(v *awstypes.CacheParameterGroup) bool { + name := aws.ToString(v.CacheParameterGroupName) return strings.HasPrefix(name, "default.") && !strings.HasSuffix(name, ".cluster.on") }, )) @@ -2990,8 +2991,8 @@ func testCheckRedisParameterGroupDefault(ctx context.Context, version *elasticac func testCheckEngineStuffClusterEnabledDefault(ctx context.Context, resourceName string) resource.TestCheckFunc { var ( - version elasticache.CacheEngineVersion - parameterGroup elasticache.CacheParameterGroup + version awstypes.CacheEngineVersion + parameterGroup awstypes.CacheParameterGroup ) checks := []resource.TestCheckFunc{ @@ -3011,16 +3012,16 @@ func testCheckEngineStuffClusterEnabledDefault(ctx context.Context, resourceName return resource.ComposeAggregateTestCheckFunc(checks...) } -func testCheckRedisParameterGroupClusterEnabledDefault(ctx context.Context, version *elasticache.CacheEngineVersion, v *elasticache.CacheParameterGroup) resource.TestCheckFunc { +func testCheckRedisParameterGroupClusterEnabledDefault(ctx context.Context, version *awstypes.CacheEngineVersion, v *awstypes.CacheParameterGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) output, err := tfelasticache.FindCacheParameterGroup(ctx, conn, &elasticache.DescribeCacheParameterGroupsInput{}, tfslices.PredicateAnd( - func(v *elasticache.CacheParameterGroup) bool { - return aws.StringValue(v.CacheParameterGroupFamily) == aws.StringValue(version.CacheParameterGroupFamily) + func(v *awstypes.CacheParameterGroup) bool { + return aws.ToString(v.CacheParameterGroupFamily) == aws.ToString(version.CacheParameterGroupFamily) }, - func(v *elasticache.CacheParameterGroup) bool { - name := aws.StringValue(v.CacheParameterGroupName) + func(v *awstypes.CacheParameterGroup) bool { + name := aws.ToString(v.CacheParameterGroupName) return strings.HasPrefix(name, "default.") && strings.HasSuffix(name, ".cluster.on") }, )) @@ -4439,7 +4440,7 @@ data "aws_elasticache_replication_group" "test" { `, rName, enableClusterMode, slowLogDeliveryEnabled, slowDeliveryDestination, slowDeliveryFormat, engineLogDeliveryEnabled, engineDeliveryDestination, engineLogDeliveryFormat) } -func resourceReplicationGroupDisableAutomaticFailover(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, timeout time.Duration) error { +func resourceReplicationGroupDisableAutomaticFailover(ctx context.Context, conn *elasticache.Client, replicationGroupID string, timeout time.Duration) error { return resourceReplicationGroupModify(ctx, conn, timeout, &elasticache.ModifyReplicationGroupInput{ ReplicationGroupId: aws.String(replicationGroupID), ApplyImmediately: aws.Bool(true), @@ -4448,7 +4449,7 @@ func resourceReplicationGroupDisableAutomaticFailover(ctx context.Context, conn }) } -func resourceReplicationGroupEnableAutomaticFailover(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID string, multiAZEnabled bool, timeout time.Duration) error { +func resourceReplicationGroupEnableAutomaticFailover(ctx context.Context, conn *elasticache.Client, replicationGroupID string, multiAZEnabled bool, timeout time.Duration) error { return resourceReplicationGroupModify(ctx, conn, timeout, &elasticache.ModifyReplicationGroupInput{ ReplicationGroupId: aws.String(replicationGroupID), ApplyImmediately: aws.Bool(true), @@ -4457,7 +4458,7 @@ func resourceReplicationGroupEnableAutomaticFailover(ctx context.Context, conn * }) } -func resourceReplicationGroupSetPrimaryClusterID(ctx context.Context, conn *elasticache.ElastiCache, replicationGroupID, primaryClusterID string, timeout time.Duration) error { +func resourceReplicationGroupSetPrimaryClusterID(ctx context.Context, conn *elasticache.Client, replicationGroupID, primaryClusterID string, timeout time.Duration) error { return resourceReplicationGroupModify(ctx, conn, timeout, &elasticache.ModifyReplicationGroupInput{ ReplicationGroupId: aws.String(replicationGroupID), ApplyImmediately: aws.Bool(true), @@ -4465,8 +4466,8 @@ func resourceReplicationGroupSetPrimaryClusterID(ctx context.Context, conn *elas }) } -func resourceReplicationGroupModify(ctx context.Context, conn *elasticache.ElastiCache, timeout time.Duration, input *elasticache.ModifyReplicationGroupInput) error { - _, err := conn.ModifyReplicationGroupWithContext(ctx, input) +func resourceReplicationGroupModify(ctx context.Context, conn *elasticache.Client, timeout time.Duration, input *elasticache.ModifyReplicationGroupInput) error { + _, err := conn.ModifyReplicationGroup(ctx, input) if err != nil { return fmt.Errorf("error requesting modification: %w", err) } @@ -4474,7 +4475,7 @@ func resourceReplicationGroupModify(ctx context.Context, conn *elasticache.Elast const ( delay = 30 * time.Second ) - _, err = tfelasticache.WaitReplicationGroupAvailable(ctx, conn, aws.StringValue(input.ReplicationGroupId), timeout, delay) + _, err = tfelasticache.WaitReplicationGroupAvailable(ctx, conn, aws.ToString(input.ReplicationGroupId), timeout, delay) if err != nil { return fmt.Errorf("error waiting for modification: %w", err) } diff --git a/internal/service/elasticache/serverless_cache.go b/internal/service/elasticache/serverless_cache.go index 5de88df8118..cdc148a212e 100644 --- a/internal/service/elasticache/serverless_cache.go +++ b/internal/service/elasticache/serverless_cache.go @@ -195,15 +195,9 @@ func (r *serverlessCacheResource) Schema(ctx context.Context, request resource.S Attributes: map[string]schema.Attribute{ "maximum": schema.Int64Attribute{ Optional: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.RequiresReplace(), - }, }, "minimum": schema.Int64Attribute{ Optional: true, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.RequiresReplace(), - }, }, names.AttrUnit: schema.StringAttribute{ CustomType: fwtypes.StringEnumType[awstypes.DataStorageUnit](), @@ -224,18 +218,12 @@ func (r *serverlessCacheResource) Schema(ctx context.Context, request resource.S Validators: []validator.Int64{ int64validator.Between(1000, 15000000), }, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.RequiresReplace(), - }, }, "minimum": schema.Int64Attribute{ Optional: true, Validators: []validator.Int64{ int64validator.Between(1000, 15000000), }, - PlanModifiers: []planmodifier.Int64{ - int64planmodifier.RequiresReplace(), - }, }, }, }, @@ -267,7 +255,7 @@ func (r *serverlessCacheResource) Create(ctx context.Context, request resource.C return } - input.Tags = getTagsInV2(ctx) + input.Tags = getTagsIn(ctx) _, err := conn.CreateServerlessCache(ctx, input) diff --git a/internal/service/elasticache/serverless_cache_test.go b/internal/service/elasticache/serverless_cache_test.go index a8bdb9a6b24..eb7b2f82203 100644 --- a/internal/service/elasticache/serverless_cache_test.go +++ b/internal/service/elasticache/serverless_cache_test.go @@ -5,9 +5,11 @@ package elasticache_test import ( "context" + "errors" "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -270,7 +272,7 @@ func TestAccElastiCacheServerlessCache_updatesc(t *testing.T) { descriptionOld := "Memcached Serverless Cluster" descriptionNew := "Memcached Serverless Cluster updated" resourceName := "aws_elasticache_serverless_cache.test" - var serverlessElasticCache awstypes.ServerlessCache + var v1, v2, v3, v4 awstypes.ServerlessCache resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -283,7 +285,7 @@ func TestAccElastiCacheServerlessCache_updatesc(t *testing.T) { { Config: testAccServerlessCacheConfig_updatesc(rName, descriptionOld, 1, 1000), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckServerlessCacheExists(ctx, resourceName, &serverlessElasticCache), + testAccCheckServerlessCacheExists(ctx, resourceName, &v1), resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), resource.TestCheckResourceAttrSet(resourceName, "cache_usage_limits.#"), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreateTime), @@ -302,10 +304,47 @@ func TestAccElastiCacheServerlessCache_updatesc(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccServerlessCacheConfig_updatesc(rName, descriptionOld, 2, 1000), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServerlessCacheExists(ctx, resourceName, &v2), + testAccCheckServerlessCacheNotRecreated(&v1, &v2), + resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), + resource.TestCheckResourceAttrSet(resourceName, "cache_usage_limits.#"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreateTime), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, descriptionOld), + resource.TestCheckResourceAttrSet(resourceName, "endpoint.#"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrEngine), + resource.TestCheckResourceAttrSet(resourceName, "full_engine_version"), + resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint.#"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrStatus), + resource.TestCheckResourceAttrSet(resourceName, "subnet_ids.#"), + ), + }, + { + Config: testAccServerlessCacheConfig_updatesc(rName, descriptionNew, 2, 1000), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckServerlessCacheExists(ctx, resourceName, &v3), + testAccCheckServerlessCacheNotRecreated(&v2, &v3), + resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), + resource.TestCheckResourceAttrSet(resourceName, "cache_usage_limits.#"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreateTime), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, descriptionNew), + resource.TestCheckResourceAttrSet(resourceName, "endpoint.#"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrEngine), + resource.TestCheckResourceAttrSet(resourceName, "full_engine_version"), + resource.TestCheckResourceAttrSet(resourceName, "reader_endpoint.#"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrStatus), + resource.TestCheckResourceAttrSet(resourceName, "subnet_ids.#"), + ), + }, { Config: testAccServerlessCacheConfig_updatesc(rName, descriptionNew, 2, 1010), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckServerlessCacheExists(ctx, resourceName, &serverlessElasticCache), + testAccCheckServerlessCacheExists(ctx, resourceName, &v4), + testAccCheckServerlessCacheNotRecreated(&v3, &v4), resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), resource.TestCheckResourceAttrSet(resourceName, "cache_usage_limits.#"), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreateTime), @@ -461,6 +500,16 @@ func testAccCheckServerlessCacheDestroy(ctx context.Context) resource.TestCheckF } } +func testAccCheckServerlessCacheNotRecreated(i, j *awstypes.ServerlessCache) resource.TestCheckFunc { + return func(s *terraform.State) error { + if !aws.ToTime(i.CreateTime).Equal(aws.ToTime(j.CreateTime)) { + return errors.New("ElastiCache Serverless Cache was recreated") + } + + return nil + } +} + func testAccServerlessCacheConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_elasticache_serverless_cache" "test" { @@ -549,19 +598,19 @@ resource "aws_elasticache_serverless_cache" "test" { snapshot_retention_limit = 1 security_group_ids = [aws_security_group.test.id] subnet_ids = aws_subnet.test[*].id + tags = { Name = %[1]q } } resource "aws_kms_key" "test" { - description = "tf-test-cmk-kms-key-id" + description = %[1]q } resource "aws_security_group" "test" { - name = %[1]q - description = %[1]q - vpc_id = aws_vpc.test.id + name = %[1]q + vpc_id = aws_vpc.test.id ingress { from_port = -1 @@ -569,6 +618,10 @@ resource "aws_security_group" "test" { protocol = "icmp" cidr_blocks = ["0.0.0.0/0"] } + + tags = { + Name = %[1]q + } } `, rName)) } diff --git a/internal/service/elasticache/service_endpoint_resolver_gen.go b/internal/service/elasticache/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..b29997cfee0 --- /dev/null +++ b/internal/service/elasticache/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package elasticache + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + elasticache_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticache" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ elasticache_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver elasticache_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: elasticache_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params elasticache_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up elasticache endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*elasticache_sdkv2.Options) { + return func(o *elasticache_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/elasticache/service_endpoints_gen_test.go b/internal/service/elasticache/service_endpoints_gen_test.go index 7577ac6846b..a51cb4b40ac 100644 --- a/internal/service/elasticache/service_endpoints_gen_test.go +++ b/internal/service/elasticache/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -16,8 +18,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" elasticache_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticache" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - elasticache_sdkv1 "github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" @@ -88,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -234,45 +234,33 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }, } - t.Run("v1", func(t *testing.T) { - for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv - testcase := testcase + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase - t.Run(name, func(t *testing.T) { - testEndpointCase(t, providerRegion, testcase, callServiceV1) - }) - } - }) - - t.Run("v2", func(t *testing.T) { - for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv - testcase := testcase - - t.Run(name, func(t *testing.T) { - testEndpointCase(t, providerRegion, testcase, callServiceV2) - }) - } - }) + t.Run(name, func(t *testing.T) { + testEndpointCase(t, providerRegion, testcase, callService) + }) + } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := elasticache_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), elasticache_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := elasticache_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), elasticache_sdkv2.EndpointParameters{ @@ -280,17 +268,17 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() client := meta.ElastiCacheClient(ctx) @@ -315,21 +303,6 @@ func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) api return result } -func callServiceV1(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { - t.Helper() - - client := meta.ElastiCacheConn(ctx) - - req, _ := client.DescribeCacheClustersRequest(&elasticache_sdkv1.DescribeCacheClustersInput{}) - - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), - } -} - func withNoConfig(_ *caseSetup) { // no-op } @@ -364,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/elasticache/service_package_gen.go b/internal/service/elasticache/service_package_gen.go index 40f7853654e..dc973a3bc2f 100644 --- a/internal/service/elasticache/service_package_gen.go +++ b/internal/service/elasticache/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package elasticache @@ -7,11 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" elasticache_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticache" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - elasticache_sdkv1 "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -127,44 +122,14 @@ func (p *servicePackage) ServicePackageName() string { return names.ElastiCache } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*elasticache_sdkv1.ElastiCache, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return elasticache_sdkv1.New(sess.Copy(&cfg)), nil -} - // NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*elasticache_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return elasticache_sdkv2.NewFromConfig(cfg, func(o *elasticache_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return elasticache_sdkv2.NewFromConfig(cfg, + elasticache_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/elasticache/subnet_group.go b/internal/service/elasticache/subnet_group.go index b00bb973e42..44f753bb0c2 100644 --- a/internal/service/elasticache/subnet_group.go +++ b/internal/service/elasticache/subnet_group.go @@ -9,9 +9,9 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -83,23 +83,24 @@ func resourceSubnetGroup() *schema.Resource { func resourceSubnetGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) + partition := meta.(*conns.AWSClient).Partition name := d.Get(names.AttrName).(string) input := &elasticache.CreateCacheSubnetGroupInput{ CacheSubnetGroupDescription: aws.String(d.Get(names.AttrDescription).(string)), CacheSubnetGroupName: aws.String(name), - SubnetIds: flex.ExpandStringSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), + SubnetIds: flex.ExpandStringValueSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), Tags: getTagsIn(ctx), } - output, err := conn.CreateCacheSubnetGroupWithContext(ctx, input) + output, err := conn.CreateCacheSubnetGroup(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateCacheSubnetGroupWithContext(ctx, input) + output, err = conn.CreateCacheSubnetGroup(ctx, input) } if err != nil { @@ -114,10 +115,10 @@ func resourceSubnetGroupCreate(ctx context.Context, d *schema.ResourceData, meta // For partitions not supporting tag-on-create, attempt tag after create. if tags := getTagsIn(ctx); input.Tags == nil && len(tags) > 0 { - err := createTags(ctx, conn, aws.StringValue(output.CacheSubnetGroup.ARN), tags) + err := createTags(ctx, conn, aws.ToString(output.CacheSubnetGroup.ARN), tags) // If default tags only, continue. Otherwise, error. - if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(partition, err) { return append(diags, resourceSubnetGroupRead(ctx, d, meta)...) } @@ -131,7 +132,7 @@ func resourceSubnetGroupCreate(ctx context.Context, d *schema.ResourceData, meta func resourceSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) group, err := findCacheSubnetGroupByName(ctx, conn, d.Id()) @@ -148,8 +149,8 @@ func resourceSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta i d.Set(names.AttrARN, group.ARN) d.Set(names.AttrDescription, group.CacheSubnetGroupDescription) d.Set(names.AttrName, group.CacheSubnetGroupName) - d.Set(names.AttrSubnetIDs, tfslices.ApplyToAll(group.Subnets, func(v *elasticache.Subnet) string { - return aws.StringValue(v.SubnetIdentifier) + d.Set(names.AttrSubnetIDs, tfslices.ApplyToAll(group.Subnets, func(v awstypes.Subnet) string { + return aws.ToString(v.SubnetIdentifier) })) d.Set(names.AttrVPCID, group.VpcId) @@ -158,16 +159,16 @@ func resourceSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta i func resourceSubnetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) if d.HasChanges(names.AttrSubnetIDs, names.AttrDescription) { input := &elasticache.ModifyCacheSubnetGroupInput{ CacheSubnetGroupDescription: aws.String(d.Get(names.AttrDescription).(string)), CacheSubnetGroupName: aws.String(d.Get(names.AttrName).(string)), - SubnetIds: flex.ExpandStringSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), + SubnetIds: flex.ExpandStringValueSet(d.Get(names.AttrSubnetIDs).(*schema.Set)), } - _, err := conn.ModifyCacheSubnetGroupWithContext(ctx, input) + _, err := conn.ModifyCacheSubnetGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating ElastiCache Subnet Group (%s): %s", d.Id(), err) @@ -179,16 +180,16 @@ func resourceSubnetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceSubnetGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) log.Printf("[DEBUG] Deleting ElastiCache Subnet Group: %s", d.Id()) _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 5*time.Minute, func() (interface{}, error) { - return conn.DeleteCacheSubnetGroupWithContext(ctx, &elasticache.DeleteCacheSubnetGroupInput{ + return conn.DeleteCacheSubnetGroup(ctx, &elasticache.DeleteCacheSubnetGroupInput{ CacheSubnetGroupName: aws.String(d.Id()), }) }, "DependencyViolation") - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheSubnetGroupNotFoundFault) { + if errs.IsA[*awstypes.CacheSubnetGroupNotFoundFault](err) { return diags } @@ -211,50 +212,48 @@ func resourceSubnetGroupCustomizeDiff(ctx context.Context, diff *schema.Resource return nil } -func findCacheSubnetGroupByName(ctx context.Context, conn *elasticache.ElastiCache, name string) (*elasticache.CacheSubnetGroup, error) { +func findCacheSubnetGroupByName(ctx context.Context, conn *elasticache.Client, name string) (*awstypes.CacheSubnetGroup, error) { input := &elasticache.DescribeCacheSubnetGroupsInput{ CacheSubnetGroupName: aws.String(name), } - return findCacheSubnetGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.CacheSubnetGroup]()) + return findCacheSubnetGroup(ctx, conn, input, tfslices.PredicateTrue[*awstypes.CacheSubnetGroup]()) } -func findCacheSubnetGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheSubnetGroupsInput, filter tfslices.Predicate[*elasticache.CacheSubnetGroup]) (*elasticache.CacheSubnetGroup, error) { +func findCacheSubnetGroup(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeCacheSubnetGroupsInput, filter tfslices.Predicate[*awstypes.CacheSubnetGroup]) (*awstypes.CacheSubnetGroup, error) { output, err := findCacheSubnetGroups(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findCacheSubnetGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeCacheSubnetGroupsInput, filter tfslices.Predicate[*elasticache.CacheSubnetGroup]) ([]*elasticache.CacheSubnetGroup, error) { - var output []*elasticache.CacheSubnetGroup +func findCacheSubnetGroups(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeCacheSubnetGroupsInput, filter tfslices.Predicate[*awstypes.CacheSubnetGroup]) ([]awstypes.CacheSubnetGroup, error) { + var output []awstypes.CacheSubnetGroup - err := conn.DescribeCacheSubnetGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeCacheSubnetGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticache.NewDescribeCacheSubnetGroupsPaginator(conn, input) - for _, v := range page.CacheSubnetGroups { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.CacheSubnetGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeCacheSubnetGroupNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.CacheSubnetGroups { + if filter(&v) { + output = append(output, v) + } + } } return output, nil diff --git a/internal/service/elasticache/subnet_group_data_source.go b/internal/service/elasticache/subnet_group_data_source.go index 6809fc1aec0..eea470408c2 100644 --- a/internal/service/elasticache/subnet_group_data_source.go +++ b/internal/service/elasticache/subnet_group_data_source.go @@ -6,8 +6,8 @@ package elasticache import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -52,7 +52,7 @@ func dataSourceSubnetGroup() *schema.Resource { func dataSourceSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig name := d.Get(names.AttrName).(string) @@ -63,12 +63,12 @@ func dataSourceSubnetGroupRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache Subnet Group", err)) } - d.SetId(aws.StringValue(group.CacheSubnetGroupName)) + d.SetId(aws.ToString(group.CacheSubnetGroupName)) d.Set(names.AttrARN, group.ARN) d.Set(names.AttrDescription, group.CacheSubnetGroupDescription) d.Set(names.AttrName, group.CacheSubnetGroupName) - d.Set(names.AttrSubnetIDs, tfslices.ApplyToAll(group.Subnets, func(v *elasticache.Subnet) string { - return aws.StringValue(v.SubnetIdentifier) + d.Set(names.AttrSubnetIDs, tfslices.ApplyToAll(group.Subnets, func(v awstypes.Subnet) string { + return aws.ToString(v.SubnetIdentifier) })) d.Set(names.AttrVPCID, group.VpcId) diff --git a/internal/service/elasticache/subnet_group_test.go b/internal/service/elasticache/subnet_group_test.go index 368f3483550..3f3fde2c1dd 100644 --- a/internal/service/elasticache/subnet_group_test.go +++ b/internal/service/elasticache/subnet_group_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccElastiCacheSubnetGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var csg elasticache.CacheSubnetGroup + var csg awstypes.CacheSubnetGroup resourceName := "aws_elasticache_subnet_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -54,7 +54,7 @@ func TestAccElastiCacheSubnetGroup_basic(t *testing.T) { func TestAccElastiCacheSubnetGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var csg elasticache.CacheSubnetGroup + var csg awstypes.CacheSubnetGroup resourceName := "aws_elasticache_subnet_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -78,7 +78,7 @@ func TestAccElastiCacheSubnetGroup_disappears(t *testing.T) { func TestAccElastiCacheSubnetGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var csg elasticache.CacheSubnetGroup + var csg awstypes.CacheSubnetGroup resourceName := "aws_elasticache_subnet_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -124,7 +124,7 @@ func TestAccElastiCacheSubnetGroup_tags(t *testing.T) { func TestAccElastiCacheSubnetGroup_update(t *testing.T) { ctx := acctest.Context(t) - var csg elasticache.CacheSubnetGroup + var csg awstypes.CacheSubnetGroup resourceName := "aws_elasticache_subnet_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -165,7 +165,7 @@ func TestAccElastiCacheSubnetGroup_update(t *testing.T) { func testAccCheckSubnetGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elasticache_subnet_group" { @@ -189,7 +189,7 @@ func testAccCheckSubnetGroupDestroy(ctx context.Context) resource.TestCheckFunc } } -func testAccCheckSubnetGroupExists(ctx context.Context, n string, v *elasticache.CacheSubnetGroup) resource.TestCheckFunc { +func testAccCheckSubnetGroupExists(ctx context.Context, n string, v *awstypes.CacheSubnetGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -200,7 +200,7 @@ func testAccCheckSubnetGroupExists(ctx context.Context, n string, v *elasticache return fmt.Errorf("No ElastiCache Subnet Group ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) output, err := tfelasticache.FindCacheSubnetGroupByName(ctx, conn, rs.Primary.ID) diff --git a/internal/service/elasticache/sweep.go b/internal/service/elasticache/sweep.go index 3093e348e09..c4f05fa5c68 100644 --- a/internal/service/elasticache/sweep.go +++ b/internal/service/elasticache/sweep.go @@ -10,12 +10,13 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) // These timeouts are lower to fail faster during sweepers @@ -84,45 +85,44 @@ func sweepClusters(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.ElastiCacheConn(ctx) - - var sweeperErrs *multierror.Error - input := &elasticache.DescribeCacheClustersInput{ ShowCacheClustersNotInReplicationGroups: aws.Bool(true), } - err = conn.DescribeCacheClustersPagesWithContext(ctx, input, func(page *elasticache.DescribeCacheClustersOutput, lastPage bool) bool { - if len(page.CacheClusters) == 0 { - log.Print("[DEBUG] No ElastiCache Replication Groups to sweep") - return false + conn := client.ElastiCacheClient(ctx) + var sweeperErrs *multierror.Error + + pages := elasticache.NewDescribeCacheClustersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ElastiCache Cluster sweep for %s: %s", region, err) + return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors } - for _, cluster := range page.CacheClusters { - id := aws.StringValue(cluster.CacheClusterId) + if err != nil { + sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("Error retrieving ElastiCache Clusters: %w", err)) + } + + for _, v := range page.CacheClusters { + id := aws.ToString(v.CacheClusterId) log.Printf("[INFO] Deleting ElastiCache Cluster: %s", id) - err := DeleteCacheCluster(ctx, conn, id, "") + err := deleteCacheCluster(ctx, conn, id, "") + if err != nil { log.Printf("[ERROR] Failed to delete ElastiCache Cache Cluster (%s): %s", id, err) sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error deleting ElastiCache Cache Cluster (%s): %w", id, err)) } + const ( timeout = 40 * time.Minute ) - _, err = waitCacheClusterDeleted(ctx, conn, id, timeout) - if err != nil { + if _, err := waitCacheClusterDeleted(ctx, conn, id, timeout); err != nil { log.Printf("[ERROR] Failed waiting for ElastiCache Cache Cluster (%s) to be deleted: %s", id, err) sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("error deleting ElastiCache Cache Cluster (%s): waiting for completion: %w", id, err)) } } - return !lastPage - }) - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping ElastiCache Cluster sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors - } - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("Error retrieving ElastiCache Clusters: %w", err)) } return sweeperErrs.ErrorOrNil() @@ -134,25 +134,34 @@ func sweepGlobalReplicationGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.ElastiCacheConn(ctx) - - var grgGroup multierror.Group - input := &elasticache.DescribeGlobalReplicationGroupsInput{ ShowMemberInfo: aws.Bool(true), } - err = conn.DescribeGlobalReplicationGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeGlobalReplicationGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + conn := client.ElastiCacheClient(ctx) + + var grgGroup multierror.Group + var grgErrs *multierror.Error + + pages := elasticache.NewDescribeGlobalReplicationGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ElastiCache Global Replication Group sweep for %q: %s", region, err) + return grgErrs.ErrorOrNil() // In case we have completed some pages, but had errors + } + + if err != nil { + grgErrs = multierror.Append(grgErrs, fmt.Errorf("listing ElastiCache Global Replication Groups: %w", err)) } - for _, globalReplicationGroup := range page.GlobalReplicationGroups { - globalReplicationGroup := globalReplicationGroup + for _, v := range page.GlobalReplicationGroups { + globalReplicationGroup := v grgGroup.Go(func() error { - id := aws.StringValue(globalReplicationGroup.GlobalReplicationGroupId) + id := aws.ToString(globalReplicationGroup.GlobalReplicationGroupId) - disassociationErrors := DisassociateMembers(ctx, conn, globalReplicationGroup) + disassociationErrors := disassociateMembers(ctx, conn, globalReplicationGroup) if disassociationErrors != nil { return fmt.Errorf("disassociating ElastiCache Global Replication Group (%s) members: %w", id, disassociationErrors) } @@ -163,20 +172,9 @@ func sweepGlobalReplicationGroups(region string) error { return err }) } - - return !lastPage - }) - - grgErrs := grgGroup.Wait() - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping ElastiCache Global Replication Group sweep for %q: %s", region, err) - return grgErrs.ErrorOrNil() // In case we have completed some pages, but had errors } - if err != nil { - grgErrs = multierror.Append(grgErrs, fmt.Errorf("listing ElastiCache Global Replication Groups: %w", err)) - } + grgErrs = multierror.Append(grgErrs, grgGroup.Wait()) return grgErrs.ErrorOrNil() } @@ -187,92 +185,90 @@ func sweepParameterGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.ElastiCacheConn(ctx) + input := &elasticache.DescribeCacheParameterGroupsInput{} + conn := client.ElastiCacheClient(ctx) + sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeCacheParameterGroupsPagesWithContext(ctx, &elasticache.DescribeCacheParameterGroupsInput{}, func(page *elasticache.DescribeCacheParameterGroupsOutput, lastPage bool) bool { - if len(page.CacheParameterGroups) == 0 { - log.Print("[DEBUG] No ElastiCache Parameter Groups to sweep") - return false + pages := elasticache.NewDescribeCacheParameterGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ElastiCache Parameter Group sweep for %s: %s", region, err) + return nil } - for _, parameterGroup := range page.CacheParameterGroups { - name := aws.StringValue(parameterGroup.CacheParameterGroupName) + if err != nil { + return fmt.Errorf("error listing ElastiCache Parameter Groups (%s): %w", region, err) + } + + for _, v := range page.CacheParameterGroups { + name := aws.ToString(v.CacheParameterGroupName) if strings.HasPrefix(name, "default.") { log.Printf("[INFO] Skipping ElastiCache Cache Parameter Group: %s", name) continue } - log.Printf("[INFO] Deleting ElastiCache Parameter Group: %s", name) - _, err := conn.DeleteCacheParameterGroupWithContext(ctx, &elasticache.DeleteCacheParameterGroupInput{ - CacheParameterGroupName: aws.String(name), - }) - if err != nil { - log.Printf("[ERROR] Failed to delete ElastiCache Parameter Group (%s): %s", name, err) - } + r := resourceParameterGroup() + d := r.Data(nil) + d.SetId(name) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - return !lastPage - }) + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + if err != nil { - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping ElastiCache Parameter Group sweep for %s: %s", region, err) - return nil - } - return fmt.Errorf("Error retrieving ElastiCache Parameter Group: %w", err) + return fmt.Errorf("error sweeping ElastiCache Parameter Groups (%s): %w", region, err) } + return nil } func sweepReplicationGroups(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { return fmt.Errorf("error getting client: %w", err) } - - conn := client.ElastiCacheConn(ctx) + input := &elasticache.DescribeReplicationGroupsInput{} + conn := client.ElastiCacheClient(ctx) sweepResources := make([]sweep.Sweepable, 0) - var errs *multierror.Error - err = conn.DescribeReplicationGroupsPagesWithContext(ctx, &elasticache.DescribeReplicationGroupsInput{}, func(page *elasticache.DescribeReplicationGroupsOutput, lastPage bool) bool { - if len(page.ReplicationGroups) == 0 { - log.Print("[DEBUG] No ElastiCache Replication Groups to sweep") - return !lastPage // in rare cases across API, one page may have empty results but not be last page + pages := elasticache.NewDescribeReplicationGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ElastiCache Replication Group sweep for %s: %s", region, err) + return nil } - for _, replicationGroup := range page.ReplicationGroups { + if err != nil { + return fmt.Errorf("error listing ElastiCache Replication Groups (%s): %w", region, err) + } + + for _, v := range page.ReplicationGroups { r := resourceReplicationGroup() d := r.Data(nil) - - if replicationGroup.GlobalReplicationGroupInfo != nil { - d.Set("global_replication_group_id", replicationGroup.GlobalReplicationGroupInfo.GlobalReplicationGroupId) + d.SetId(aws.ToString(v.ReplicationGroupId)) + if v.GlobalReplicationGroupInfo != nil { + d.Set("global_replication_group_id", v.GlobalReplicationGroupInfo.GlobalReplicationGroupId) } - d.SetId(aws.StringValue(replicationGroup.ReplicationGroupId)) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error describing ElastiCache Replication Groups: %w", err)) } - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping ElastiCache Replication Groups for %s: %w", region, err)) - } - - // waiting for deletion is not necessary in the sweeper since the resource's delete waits + err = sweep.SweepOrchestrator(ctx, sweepResources) - if awsv1.SkipSweepError(errs.ErrorOrNil()) { - log.Printf("[WARN] Skipping ElastiCache Replication Group sweep for %s: %s", region, errs) - return nil + if err != nil { + return fmt.Errorf("error sweeping ElastiCache Replication Groups (%s): %w", region, err) } - return errs.ErrorOrNil() + return nil } func sweepSubnetGroups(region string) error { @@ -281,17 +277,26 @@ func sweepSubnetGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.ElastiCacheConn(ctx) + conn := client.ElastiCacheClient(ctx) input := &elasticache.DescribeCacheSubnetGroupsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeCacheSubnetGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeCacheSubnetGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := elasticache.NewDescribeCacheSubnetGroupsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ElastiCache Subnet Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing ElastiCache Subnet Groups (%s): %w", region, err) } for _, v := range page.CacheSubnetGroups { - name := aws.StringValue(v.CacheSubnetGroupName) + name := aws.ToString(v.CacheSubnetGroupName) if name == "default" { log.Printf("[INFO] Skipping ElastiCache Subnet Group: %s", name) @@ -304,17 +309,6 @@ func sweepSubnetGroups(region string) error { sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping ElastiCache Subnet Group sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing ElastiCache Subnet Groups (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -332,17 +326,26 @@ func sweepUsers(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.ElastiCacheConn(ctx) + conn := client.ElastiCacheClient(ctx) input := &elasticache.DescribeUsersInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeUsersPagesWithContext(ctx, input, func(page *elasticache.DescribeUsersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := elasticache.NewDescribeUsersPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ElastiCache User sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("listing ElastiCache Users (%s): %w", region, err) } for _, v := range page.Users { - id := aws.StringValue(v.UserId) + id := aws.ToString(v.UserId) if id == "default" { log.Printf("[INFO] Skipping ElastiCache User: %s", id) @@ -355,17 +358,6 @@ func sweepUsers(region string) error { sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping ElastiCache User sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("listing ElastiCache Users (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -383,33 +375,31 @@ func sweepUserGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.ElastiCacheConn(ctx) + conn := client.ElastiCacheClient(ctx) input := &elasticache.DescribeUserGroupsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeUserGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeUserGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := elasticache.NewDescribeUserGroupsPaginator(conn, input) + + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ElastiCache User Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("listing ElastiCache User Groups (%s): %w", region, err) } for _, v := range page.UserGroups { r := resourceUserGroup() d := r.Data(nil) - d.SetId(aws.StringValue(v.UserGroupId)) + d.SetId(aws.ToString(v.UserGroupId)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping ElastiCache User Group sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("listing ElastiCache User Groups (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -421,20 +411,20 @@ func sweepUserGroups(region string) error { return nil } -func DisassociateMembers(ctx context.Context, conn *elasticache.ElastiCache, globalReplicationGroup *elasticache.GlobalReplicationGroup) error { +func disassociateMembers(ctx context.Context, conn *elasticache.Client, globalReplicationGroup awstypes.GlobalReplicationGroup) error { var membersGroup multierror.Group for _, member := range globalReplicationGroup.Members { member := member - if aws.StringValue(member.Role) == globalReplicationGroupMemberRolePrimary { + if aws.ToString(member.Role) == globalReplicationGroupMemberRolePrimary { continue } - id := aws.StringValue(globalReplicationGroup.GlobalReplicationGroupId) + id := aws.ToString(globalReplicationGroup.GlobalReplicationGroupId) membersGroup.Go(func() error { - if err := disassociateReplicationGroup(ctx, conn, id, aws.StringValue(member.ReplicationGroupId), aws.StringValue(member.ReplicationGroupRegion), sweeperGlobalReplicationGroupDisassociationReadyTimeout); err != nil { + if err := disassociateReplicationGroup(ctx, conn, id, aws.ToString(member.ReplicationGroupId), aws.ToString(member.ReplicationGroupRegion), sweeperGlobalReplicationGroupDisassociationReadyTimeout); err != nil { log.Printf("[ERROR] %s", err) return err } diff --git a/internal/service/elasticache/tags_gen.go b/internal/service/elasticache/tags_gen.go index 251e46c6eee..3091fa7e41e 100644 --- a/internal/service/elasticache/tags_gen.go +++ b/internal/service/elasticache/tags_gen.go @@ -4,16 +4,14 @@ package elasticache import ( "context" "fmt" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/aws/aws-sdk-go/service/elasticache/elasticacheiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -21,22 +19,12 @@ import ( // listTags lists elasticache service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn elasticacheiface.ElastiCacheAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *elasticache.Client, identifier string, optFns ...func(*elasticache.Options)) (tftags.KeyValueTags, error) { input := &elasticache.ListTagsForResourceInput{ ResourceName: aws.String(identifier), } - output, err := tfresource.RetryGWhenMessageContains(ctx, 15*time.Minute, - func() (*elasticache.TagListMessage, error) { - return conn.ListTagsForResourceWithContext(ctx, input) - }, - []string{ - elasticache.ErrCodeInvalidReplicationGroupStateFault, - }, - []string{ - "not in available state", - }, - ) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -48,7 +36,7 @@ func listTags(ctx context.Context, conn elasticacheiface.ElastiCacheAPI, identif // ListTags lists elasticache service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).ElastiCacheConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).ElastiCacheClient(ctx), identifier) if err != nil { return err @@ -64,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns elasticache service tags. -func Tags(tags tftags.KeyValueTags) []*elasticache.Tag { - result := make([]*elasticache.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &elasticache.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -80,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*elasticache.Tag { } // KeyValueTags creates tftags.KeyValueTags from elasticache service tags. -func KeyValueTags(ctx context.Context, tags []*elasticache.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -92,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*elasticache.Tag) tftags.KeyValueT // getTagsIn returns elasticache service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*elasticache.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -103,25 +91,25 @@ func getTagsIn(ctx context.Context) []*elasticache.Tag { } // setTagsOut sets elasticache service tags in Context. -func setTagsOut(ctx context.Context, tags []*elasticache.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } // createTags creates elasticache service tags for new resources. -func createTags(ctx context.Context, conn elasticacheiface.ElastiCacheAPI, identifier string, tags []*elasticache.Tag) error { +func createTags(ctx context.Context, conn *elasticache.Client, identifier string, tags []awstypes.Tag, optFns ...func(*elasticache.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates elasticache service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn elasticacheiface.ElastiCacheAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *elasticache.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*elasticache.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -132,20 +120,10 @@ func updateTags(ctx context.Context, conn elasticacheiface.ElastiCacheAPI, ident if len(removedTags) > 0 { input := &elasticache.RemoveTagsFromResourceInput{ ResourceName: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := tfresource.RetryWhenMessageContains(ctx, 15*time.Minute, - func() (any, error) { - return conn.RemoveTagsFromResourceWithContext(ctx, input) - }, - []string{ - elasticache.ErrCodeInvalidReplicationGroupStateFault, - }, - []string{ - "not in available state", - }, - ) + _, err := conn.RemoveTagsFromResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -160,17 +138,7 @@ func updateTags(ctx context.Context, conn elasticacheiface.ElastiCacheAPI, ident Tags: Tags(updatedTags), } - _, err := tfresource.RetryWhenMessageContains(ctx, 15*time.Minute, - func() (any, error) { - return conn.AddTagsToResourceWithContext(ctx, input) - }, - []string{ - elasticache.ErrCodeInvalidReplicationGroupStateFault, - }, - []string{ - "not in available state", - }, - ) + _, err := conn.AddTagsToResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -183,5 +151,5 @@ func updateTags(ctx context.Context, conn elasticacheiface.ElastiCacheAPI, ident // UpdateTags updates elasticache service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).ElastiCacheConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).ElastiCacheClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/elasticache/tagsv2_gen.go b/internal/service/elasticache/tagsv2_gen.go deleted file mode 100644 index 2412beef028..00000000000 --- a/internal/service/elasticache/tagsv2_gen.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by internal/generate/tags/main.go; DO NOT EDIT. -package elasticache - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" - tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types/option" -) - -// []*SERVICE.Tag handling - -// TagsV2 returns elasticache service tags. -func TagsV2(tags tftags.KeyValueTags) []awstypes.Tag { - result := make([]awstypes.Tag, 0, len(tags)) - - for k, v := range tags.Map() { - tag := awstypes.Tag{ - Key: aws.String(k), - Value: aws.String(v), - } - - result = append(result, tag) - } - - return result -} - -// keyValueTagsV2 creates tftags.KeyValueTags from elasticache service tags. -func keyValueTagsV2(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { - m := make(map[string]*string, len(tags)) - - for _, tag := range tags { - m[aws.ToString(tag.Key)] = tag.Value - } - - return tftags.New(ctx, m) -} - -// getTagsInV2 returns elasticache service tags from Context. -// nil is returned if there are no input tags. -func getTagsInV2(ctx context.Context) []awstypes.Tag { - if inContext, ok := tftags.FromContext(ctx); ok { - if tags := TagsV2(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { - return tags - } - } - - return nil -} - -// setTagsOutV2 sets elasticache service tags in Context. -func setTagsOutV2(ctx context.Context, tags []awstypes.Tag) { - if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = option.Some(keyValueTagsV2(ctx, tags)) - } -} diff --git a/internal/service/elasticache/user.go b/internal/service/elasticache/user.go index 63bfc16cd24..45b3053a971 100644 --- a/internal/service/elasticache/user.go +++ b/internal/service/elasticache/user.go @@ -9,14 +9,15 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -79,9 +80,9 @@ func resourceUser() *schema.Resource { Computed: true, }, names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(elasticache.InputAuthenticationType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.InputAuthenticationType](), }, }, }, @@ -128,7 +129,8 @@ func resourceUser() *schema.Resource { func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) + partition := meta.(*conns.AWSClient).Partition userID := d.Get("user_id").(string) input := &elasticache.CreateUserInput{ @@ -145,23 +147,23 @@ func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta interf } if v, ok := d.GetOk("passwords"); ok && v.(*schema.Set).Len() > 0 { - input.Passwords = flex.ExpandStringSet(v.(*schema.Set)) + input.Passwords = flex.ExpandStringValueSet(v.(*schema.Set)) } - output, err := conn.CreateUserWithContext(ctx, input) + output, err := conn.CreateUser(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateUserWithContext(ctx, input) + output, err = conn.CreateUser(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache User (%s): %s", userID, err) } - d.SetId(aws.StringValue(output.UserId)) + d.SetId(aws.ToString(output.UserId)) if _, err := waitUserCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache User (%s) create: %s", d.Id(), err) @@ -169,10 +171,10 @@ func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta interf // For partitions not supporting tag-on-create, attempt tag after create. if tags := getTagsIn(ctx); input.Tags == nil && len(tags) > 0 { - err := createTags(ctx, conn, aws.StringValue(output.ARN), tags) + err := createTags(ctx, conn, aws.ToString(output.ARN), tags) // If default tags only, continue. Otherwise, error. - if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(partition, err) { return append(diags, resourceUserRead(ctx, d, meta)...) } @@ -186,7 +188,7 @@ func resourceUserCreate(ctx context.Context, d *schema.ResourceData, meta interf func resourceUserRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) // An ongoing OOB update (where the user is in "modifying" state) can cause "UserNotFound: ... is not available for tagging" errors. // https://github.com/hashicorp/terraform-provider-aws/issues/34002. @@ -206,9 +208,9 @@ func resourceUserRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set(names.AttrARN, user.ARN) if v := user.Authentication; v != nil { tfMap := map[string]interface{}{ - "password_count": aws.Int64Value(v.PasswordCount), + "password_count": aws.ToInt32(v.PasswordCount), "passwords": d.Get("authentication_mode.0.passwords"), - names.AttrType: aws.StringValue(v.Type), + names.AttrType: string(v.Type), } if err := d.Set("authentication_mode", []interface{}{tfMap}); err != nil { @@ -226,7 +228,7 @@ func resourceUserRead(ctx context.Context, d *schema.ResourceData, meta interfac func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &elasticache.ModifyUserInput{ @@ -248,10 +250,10 @@ func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta interf } if d.HasChange("passwords") { - input.Passwords = flex.ExpandStringSet(d.Get("passwords").(*schema.Set)) + input.Passwords = flex.ExpandStringValueSet(d.Get("passwords").(*schema.Set)) } - _, err := conn.ModifyUserWithContext(ctx, input) + _, err := conn.ModifyUser(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating ElastiCache User (%s): %s", d.Id(), err) @@ -267,14 +269,14 @@ func resourceUserUpdate(ctx context.Context, d *schema.ResourceData, meta interf func resourceUserDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) log.Printf("[INFO] Deleting ElastiCache User: %s", d.Id()) - _, err := conn.DeleteUserWithContext(ctx, &elasticache.DeleteUserInput{ + _, err := conn.DeleteUser(ctx, &elasticache.DeleteUserInput{ UserId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeUserNotFoundFault) { + if errs.IsA[*awstypes.UserNotFoundFault](err) { return diags } @@ -289,56 +291,54 @@ func resourceUserDelete(ctx context.Context, d *schema.ResourceData, meta interf return diags } -func findUserByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.User, error) { +func findUserByID(ctx context.Context, conn *elasticache.Client, id string) (*awstypes.User, error) { input := &elasticache.DescribeUsersInput{ UserId: aws.String(id), } - return findUser(ctx, conn, input, tfslices.PredicateTrue[*elasticache.User]()) + return findUser(ctx, conn, input, tfslices.PredicateTrue[*awstypes.User]()) } -func findUser(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeUsersInput, filter tfslices.Predicate[*elasticache.User]) (*elasticache.User, error) { +func findUser(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeUsersInput, filter tfslices.Predicate[*awstypes.User]) (*awstypes.User, error) { output, err := findUsers(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findUsers(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeUsersInput, filter tfslices.Predicate[*elasticache.User]) ([]*elasticache.User, error) { - var output []*elasticache.User +func findUsers(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeUsersInput, filter tfslices.Predicate[*awstypes.User]) ([]awstypes.User, error) { + var output []awstypes.User - err := conn.DescribeUsersPagesWithContext(ctx, input, func(page *elasticache.DescribeUsersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticache.NewDescribeUsersPaginator(conn, input) - for _, v := range page.Users { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.UserNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeUserNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.Users { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusUser(ctx context.Context, conn *elasticache.ElastiCache, id string) retry.StateRefreshFunc { +func statusUser(ctx context.Context, conn *elasticache.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findUserByID(ctx, conn, id) @@ -350,7 +350,7 @@ func statusUser(ctx context.Context, conn *elasticache.ElastiCache, id string) r return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } @@ -361,7 +361,7 @@ const ( userStatusModifying = "modifying" ) -func waitUserCreated(ctx context.Context, conn *elasticache.ElastiCache, id string, timeout time.Duration) (*elasticache.User, error) { +func waitUserCreated(ctx context.Context, conn *elasticache.Client, id string, timeout time.Duration) (*awstypes.User, error) { stateConf := &retry.StateChangeConf{ Pending: []string{userStatusCreating}, Target: []string{userStatusActive}, @@ -371,14 +371,14 @@ func waitUserCreated(ctx context.Context, conn *elasticache.ElastiCache, id stri outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.User); ok { + if output, ok := outputRaw.(*awstypes.User); ok { return output, err } return nil, err } -func waitUserUpdated(ctx context.Context, conn *elasticache.ElastiCache, id string, timeout time.Duration) (*elasticache.User, error) { +func waitUserUpdated(ctx context.Context, conn *elasticache.Client, id string, timeout time.Duration) (*awstypes.User, error) { stateConf := &retry.StateChangeConf{ Pending: []string{userStatusModifying}, Target: []string{userStatusActive}, @@ -388,14 +388,14 @@ func waitUserUpdated(ctx context.Context, conn *elasticache.ElastiCache, id stri outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.User); ok { + if output, ok := outputRaw.(*awstypes.User); ok { return output, err } return nil, err } -func waitUserDeleted(ctx context.Context, conn *elasticache.ElastiCache, id string, timeout time.Duration) (*elasticache.User, error) { +func waitUserDeleted(ctx context.Context, conn *elasticache.Client, id string, timeout time.Duration) (*awstypes.User, error) { stateConf := &retry.StateChangeConf{ Pending: []string{userStatusDeleting}, Target: []string{}, @@ -405,26 +405,26 @@ func waitUserDeleted(ctx context.Context, conn *elasticache.ElastiCache, id stri outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.User); ok { + if output, ok := outputRaw.(*awstypes.User); ok { return output, err } return nil, err } -func expandAuthenticationMode(tfMap map[string]interface{}) *elasticache.AuthenticationMode { +func expandAuthenticationMode(tfMap map[string]interface{}) *awstypes.AuthenticationMode { if tfMap == nil { return nil } - apiObject := &elasticache.AuthenticationMode{} + apiObject := &awstypes.AuthenticationMode{} if v, ok := tfMap["passwords"].(*schema.Set); ok && v.Len() > 0 { - apiObject.Passwords = flex.ExpandStringSet(v) + apiObject.Passwords = flex.ExpandStringValueSet(v) } if v, ok := tfMap[names.AttrType].(string); ok && v != "" { - apiObject.Type = aws.String(v) + apiObject.Type = awstypes.InputAuthenticationType(v) } return apiObject diff --git a/internal/service/elasticache/user_data_source.go b/internal/service/elasticache/user_data_source.go index e5be202ddea..2e8e988f074 100644 --- a/internal/service/elasticache/user_data_source.go +++ b/internal/service/elasticache/user_data_source.go @@ -6,7 +6,7 @@ package elasticache import ( "context" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -70,7 +70,7 @@ func dataSourceUser() *schema.Resource { func dataSourceUserRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) user, err := findUserByID(ctx, conn, d.Get("user_id").(string)) @@ -78,12 +78,12 @@ func dataSourceUserRead(ctx context.Context, d *schema.ResourceData, meta interf return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ElastiCache User", err)) } - d.SetId(aws.StringValue(user.UserId)) + d.SetId(aws.ToString(user.UserId)) d.Set("access_string", user.AccessString) if v := user.Authentication; v != nil { tfMap := map[string]interface{}{ - "password_count": aws.Int64Value(v.PasswordCount), - names.AttrType: aws.StringValue(v.Type), + "password_count": aws.ToInt32(v.PasswordCount), + names.AttrType: string(v.Type), } if err := d.Set("authentication_mode", []interface{}{tfMap}); err != nil { diff --git a/internal/service/elasticache/user_group.go b/internal/service/elasticache/user_group.go index 3c7e55f2fb8..6ad3cc53379 100644 --- a/internal/service/elasticache/user_group.go +++ b/internal/service/elasticache/user_group.go @@ -9,9 +9,9 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -74,7 +74,8 @@ func resourceUserGroup() *schema.Resource { func resourceUserGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) + partition := meta.(*conns.AWSClient).Partition userGroupID := d.Get("user_group_id").(string) input := &elasticache.CreateUserGroupInput{ @@ -84,23 +85,23 @@ func resourceUserGroupCreate(ctx context.Context, d *schema.ResourceData, meta i } if v, ok := d.GetOk("user_ids"); ok && v.(*schema.Set).Len() > 0 { - input.UserIds = flex.ExpandStringSet(v.(*schema.Set)) + input.UserIds = flex.ExpandStringValueSet(v.(*schema.Set)) } - output, err := conn.CreateUserGroupWithContext(ctx, input) + output, err := conn.CreateUserGroup(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateUserGroupWithContext(ctx, input) + output, err = conn.CreateUserGroup(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache User Group (%s): %s", userGroupID, err) } - d.SetId(aws.StringValue(output.UserGroupId)) + d.SetId(aws.ToString(output.UserGroupId)) if _, err := waitUserGroupCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ElastiCache User Group (%s) create: %s", d.Id(), err) @@ -108,10 +109,10 @@ func resourceUserGroupCreate(ctx context.Context, d *schema.ResourceData, meta i // For partitions not supporting tag-on-create, attempt tag after create. if tags := getTagsIn(ctx); input.Tags == nil && len(tags) > 0 { - err := createTags(ctx, conn, aws.StringValue(output.ARN), tags) + err := createTags(ctx, conn, aws.ToString(output.ARN), tags) // If default tags only, continue. Otherwise, error. - if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(partition, err) { return append(diags, resourceUserGroupRead(ctx, d, meta)...) } @@ -125,7 +126,7 @@ func resourceUserGroupCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceUserGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) userGroup, err := findUserGroupByID(ctx, conn, d.Id()) @@ -141,7 +142,7 @@ func resourceUserGroupRead(ctx context.Context, d *schema.ResourceData, meta int d.Set(names.AttrARN, userGroup.ARN) d.Set(names.AttrEngine, userGroup.Engine) - d.Set("user_ids", aws.StringValueSlice(userGroup.UserIds)) + d.Set("user_ids", userGroup.UserIds) d.Set("user_group_id", userGroup.UserGroupId) return diags @@ -149,7 +150,7 @@ func resourceUserGroupRead(ctx context.Context, d *schema.ResourceData, meta int func resourceUserGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &elasticache.ModifyUserGroupInput{ @@ -162,14 +163,14 @@ func resourceUserGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i add := n.(*schema.Set).Difference(o.(*schema.Set)) if add.Len() > 0 { - input.UserIdsToAdd = flex.ExpandStringSet(add) + input.UserIdsToAdd = flex.ExpandStringValueSet(add) } if del.Len() > 0 { - input.UserIdsToRemove = flex.ExpandStringSet(del) + input.UserIdsToRemove = flex.ExpandStringValueSet(del) } } - _, err := conn.ModifyUserGroupWithContext(ctx, input) + _, err := conn.ModifyUserGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating ElastiCache User Group (%q): %s", d.Id(), err) @@ -185,14 +186,14 @@ func resourceUserGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceUserGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) log.Printf("[INFO] Deleting ElastiCache User Group: %s", d.Id()) - _, err := conn.DeleteUserGroupWithContext(ctx, &elasticache.DeleteUserGroupInput{ + _, err := conn.DeleteUserGroup(ctx, &elasticache.DeleteUserGroupInput{ UserGroupId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeUserGroupNotFoundFault) { + if errs.IsA[*awstypes.UserGroupNotFoundFault](err) { return diags } @@ -207,56 +208,54 @@ func resourceUserGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func findUserGroupByID(ctx context.Context, conn *elasticache.ElastiCache, id string) (*elasticache.UserGroup, error) { +func findUserGroupByID(ctx context.Context, conn *elasticache.Client, id string) (*awstypes.UserGroup, error) { input := &elasticache.DescribeUserGroupsInput{ UserGroupId: aws.String(id), } - return findUserGroup(ctx, conn, input, tfslices.PredicateTrue[*elasticache.UserGroup]()) + return findUserGroup(ctx, conn, input, tfslices.PredicateTrue[*awstypes.UserGroup]()) } -func findUserGroup(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeUserGroupsInput, filter tfslices.Predicate[*elasticache.UserGroup]) (*elasticache.UserGroup, error) { +func findUserGroup(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeUserGroupsInput, filter tfslices.Predicate[*awstypes.UserGroup]) (*awstypes.UserGroup, error) { output, err := findUserGroups(ctx, conn, input, filter) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findUserGroups(ctx context.Context, conn *elasticache.ElastiCache, input *elasticache.DescribeUserGroupsInput, filter tfslices.Predicate[*elasticache.UserGroup]) ([]*elasticache.UserGroup, error) { - var output []*elasticache.UserGroup +func findUserGroups(ctx context.Context, conn *elasticache.Client, input *elasticache.DescribeUserGroupsInput, filter tfslices.Predicate[*awstypes.UserGroup]) ([]awstypes.UserGroup, error) { + var output []awstypes.UserGroup - err := conn.DescribeUserGroupsPagesWithContext(ctx, input, func(page *elasticache.DescribeUserGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticache.NewDescribeUserGroupsPaginator(conn, input) - for _, v := range page.UserGroups { - if v != nil && filter(v) { - output = append(output, v) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.UserGroupNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elasticache.ErrCodeUserGroupNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + for _, v := range page.UserGroups { + if filter(&v) { + output = append(output, v) + } + } } return output, nil } -func statusUserGroup(ctx context.Context, conn *elasticache.ElastiCache, id string) retry.StateRefreshFunc { +func statusUserGroup(ctx context.Context, conn *elasticache.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, err := findUserGroupByID(ctx, conn, id) @@ -268,7 +267,7 @@ func statusUserGroup(ctx context.Context, conn *elasticache.ElastiCache, id stri return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, aws.ToString(output.Status), nil } } @@ -279,7 +278,7 @@ const ( userGroupStatusModifying = "modifying" ) -func waitUserGroupCreated(ctx context.Context, conn *elasticache.ElastiCache, id string, timeout time.Duration) (*elasticache.UserGroup, error) { +func waitUserGroupCreated(ctx context.Context, conn *elasticache.Client, id string, timeout time.Duration) (*awstypes.UserGroup, error) { stateConf := &retry.StateChangeConf{ Pending: []string{userGroupStatusCreating, userGroupStatusModifying}, Target: []string{userGroupStatusActive}, @@ -291,14 +290,14 @@ func waitUserGroupCreated(ctx context.Context, conn *elasticache.ElastiCache, id outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.UserGroup); ok { + if output, ok := outputRaw.(*awstypes.UserGroup); ok { return output, err } return nil, err } -func waitUserGroupUpdated(ctx context.Context, conn *elasticache.ElastiCache, id string, timeout time.Duration) (*elasticache.UserGroup, error) { //nolint:unparam +func waitUserGroupUpdated(ctx context.Context, conn *elasticache.Client, id string, timeout time.Duration) (*awstypes.UserGroup, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ Pending: []string{userGroupStatusModifying}, Target: []string{userGroupStatusActive}, @@ -310,14 +309,14 @@ func waitUserGroupUpdated(ctx context.Context, conn *elasticache.ElastiCache, id outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.UserGroup); ok { + if output, ok := outputRaw.(*awstypes.UserGroup); ok { return output, err } return nil, err } -func waitUserGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, id string, timeout time.Duration) (*elasticache.UserGroup, error) { +func waitUserGroupDeleted(ctx context.Context, conn *elasticache.Client, id string, timeout time.Duration) (*awstypes.UserGroup, error) { stateConf := &retry.StateChangeConf{ Pending: []string{userGroupStatusDeleting}, Target: []string{}, @@ -329,7 +328,7 @@ func waitUserGroupDeleted(ctx context.Context, conn *elasticache.ElastiCache, id outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elasticache.UserGroup); ok { + if output, ok := outputRaw.(*awstypes.UserGroup); ok { return output, err } diff --git a/internal/service/elasticache/user_group_association.go b/internal/service/elasticache/user_group_association.go index 801b6af7520..cd88840279f 100644 --- a/internal/service/elasticache/user_group_association.go +++ b/internal/service/elasticache/user_group_association.go @@ -8,9 +8,9 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -53,19 +53,22 @@ func resourceUserGroupAssociation() *schema.Resource { func resourceUserGroupAssociationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) userGroupID := d.Get("user_group_id").(string) userID := d.Get("user_id").(string) id := errs.Must(flex.FlattenResourceId([]string{userGroupID, userID}, userGroupAssociationResourceIDPartCount, true)) input := &elasticache.ModifyUserGroupInput{ UserGroupId: aws.String(userGroupID), - UserIdsToAdd: aws.StringSlice([]string{userID}), + UserIdsToAdd: []string{userID}, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 10*time.Minute, func() (interface{}, error) { - return conn.ModifyUserGroupWithContext(ctx, input) - }, elasticache.ErrCodeInvalidUserGroupStateFault) + const ( + timeout = 10 * time.Minute + ) + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidUserGroupStateFault](ctx, timeout, func() (interface{}, error) { + return conn.ModifyUserGroup(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating ElastiCache User Group Association (%s): %s", id, err) @@ -82,14 +85,14 @@ func resourceUserGroupAssociationCreate(ctx context.Context, d *schema.ResourceD func resourceUserGroupAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) parts, err := flex.ExpandResourceId(d.Id(), userGroupAssociationResourceIDPartCount, true) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - userGroupID, userID := parts[0], parts[1] + err = findUserGroupAssociationByTwoPartKey(ctx, conn, userGroupID, userID) if !d.IsNewResource() && tfresource.NotFound(err) { @@ -110,23 +113,26 @@ func resourceUserGroupAssociationRead(ctx context.Context, d *schema.ResourceDat func resourceUserGroupAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ElastiCacheConn(ctx) + conn := meta.(*conns.AWSClient).ElastiCacheClient(ctx) parts, err := flex.ExpandResourceId(d.Id(), userGroupAssociationResourceIDPartCount, true) if err != nil { return sdkdiag.AppendFromErr(diags, err) } + userGroupID, userID := parts[0], parts[1] log.Printf("[INFO] Deleting ElastiCache User Group Association: %s", d.Id()) - userGroupID, userID := parts[0], parts[1] - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, 10*time.Minute, func() (interface{}, error) { - return conn.ModifyUserGroupWithContext(ctx, &elasticache.ModifyUserGroupInput{ + const ( + timeout = 10 * time.Minute + ) + _, err = tfresource.RetryWhenIsA[*awstypes.InvalidUserGroupStateFault](ctx, timeout, func() (interface{}, error) { + return conn.ModifyUserGroup(ctx, &elasticache.ModifyUserGroupInput{ UserGroupId: aws.String(userGroupID), - UserIdsToRemove: aws.StringSlice([]string{userID}), + UserIdsToRemove: []string{userID}, }) - }, elasticache.ErrCodeInvalidUserGroupStateFault) + }) - if tfawserr.ErrMessageContains(err, elasticache.ErrCodeInvalidParameterValueException, "not a member") { + if errs.IsAErrorMessageContains[*awstypes.InvalidParameterValueException](err, "not a member") { return diags } @@ -141,7 +147,7 @@ func resourceUserGroupAssociationDelete(ctx context.Context, d *schema.ResourceD return diags } -func findUserGroupAssociationByTwoPartKey(ctx context.Context, conn *elasticache.ElastiCache, userGroupID, userID string) error { +func findUserGroupAssociationByTwoPartKey(ctx context.Context, conn *elasticache.Client, userGroupID, userID string) error { userGroup, err := findUserGroupByID(ctx, conn, userGroupID) if err != nil { @@ -149,7 +155,7 @@ func findUserGroupAssociationByTwoPartKey(ctx context.Context, conn *elasticache } for _, v := range userGroup.UserIds { - if aws.StringValue(v) == userID { + if v == userID { return nil } } diff --git a/internal/service/elasticache/user_group_association_test.go b/internal/service/elasticache/user_group_association_test.go index c00e8780cc1..fd5729e6cbc 100644 --- a/internal/service/elasticache/user_group_association_test.go +++ b/internal/service/elasticache/user_group_association_test.go @@ -140,7 +140,7 @@ func TestAccElastiCacheUserGroupAssociation_multiple(t *testing.T) { func testAccCheckUserGroupAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elasticache_user_group_association" { @@ -171,7 +171,7 @@ func testAccCheckUserGroupAssociationExists(ctx context.Context, n string) resou return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) err := tfelasticache.FindUserGroupAssociationByTwoPartKey(ctx, conn, rs.Primary.Attributes["user_group_id"], rs.Primary.Attributes["user_id"]) diff --git a/internal/service/elasticache/user_group_test.go b/internal/service/elasticache/user_group_test.go index 16ab296b15b..b9eabc4f387 100644 --- a/internal/service/elasticache/user_group_test.go +++ b/internal/service/elasticache/user_group_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccElastiCacheUserGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var userGroup elasticache.UserGroup + var userGroup awstypes.UserGroup rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user_group.test" @@ -51,7 +51,7 @@ func TestAccElastiCacheUserGroup_basic(t *testing.T) { func TestAccElastiCacheUserGroup_update(t *testing.T) { ctx := acctest.Context(t) - var userGroup elasticache.UserGroup + var userGroup awstypes.UserGroup rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user_group.test" @@ -94,7 +94,7 @@ func TestAccElastiCacheUserGroup_update(t *testing.T) { func TestAccElastiCacheUserGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var userGroup elasticache.UserGroup + var userGroup awstypes.UserGroup rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user_group.test" @@ -135,7 +135,7 @@ func TestAccElastiCacheUserGroup_tags(t *testing.T) { func TestAccElastiCacheUserGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var userGroup elasticache.UserGroup + var userGroup awstypes.UserGroup rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user_group.test" @@ -159,7 +159,7 @@ func TestAccElastiCacheUserGroup_disappears(t *testing.T) { func testAccCheckUserGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elasticache_user_group" { @@ -183,7 +183,7 @@ func testAccCheckUserGroupDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckUserGroupExists(ctx context.Context, n string, v *elasticache.UserGroup) resource.TestCheckFunc { +func testAccCheckUserGroupExists(ctx context.Context, n string, v *awstypes.UserGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -194,7 +194,7 @@ func testAccCheckUserGroupExists(ctx context.Context, n string, v *elasticache.U return fmt.Errorf("No ElastiCache User Group ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) output, err := tfelasticache.FindUserGroupByID(ctx, conn, rs.Primary.ID) diff --git a/internal/service/elasticache/user_test.go b/internal/service/elasticache/user_test.go index 34841293f97..4cd562085b8 100644 --- a/internal/service/elasticache/user_test.go +++ b/internal/service/elasticache/user_test.go @@ -8,8 +8,9 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elasticache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticache" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticache/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +23,7 @@ import ( func TestAccElastiCacheUser_basic(t *testing.T) { ctx := acctest.Context(t) - var user elasticache.User + var user awstypes.User rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user.test" @@ -57,7 +58,7 @@ func TestAccElastiCacheUser_basic(t *testing.T) { func TestAccElastiCacheUser_password_auth_mode(t *testing.T) { ctx := acctest.Context(t) - var user elasticache.User + var user awstypes.User rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user.test" @@ -96,7 +97,7 @@ func TestAccElastiCacheUser_password_auth_mode(t *testing.T) { func TestAccElastiCacheUser_iam_auth_mode(t *testing.T) { ctx := acctest.Context(t) - var user elasticache.User + var user awstypes.User rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user.test" @@ -130,7 +131,7 @@ func TestAccElastiCacheUser_iam_auth_mode(t *testing.T) { func TestAccElastiCacheUser_update(t *testing.T) { ctx := acctest.Context(t) - var user elasticache.User + var user awstypes.User rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user.test" @@ -168,7 +169,7 @@ func TestAccElastiCacheUser_update(t *testing.T) { func TestAccElastiCacheUser_update_password_auth_mode(t *testing.T) { ctx := acctest.Context(t) - var user elasticache.User + var user awstypes.User rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user.test" @@ -232,7 +233,7 @@ func TestAccElastiCacheUser_update_password_auth_mode(t *testing.T) { func TestAccElastiCacheUser_tags(t *testing.T) { ctx := acctest.Context(t) - var user elasticache.User + var user awstypes.User rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user.test" @@ -283,7 +284,7 @@ func TestAccElastiCacheUser_tags(t *testing.T) { func TestAccElastiCacheUser_disappears(t *testing.T) { ctx := acctest.Context(t) - var user elasticache.User + var user awstypes.User rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user.test" @@ -308,7 +309,7 @@ func TestAccElastiCacheUser_disappears(t *testing.T) { // https://github.com/hashicorp/terraform-provider-aws/issues/34002. func TestAccElastiCacheUser_oobModify(t *testing.T) { ctx := acctest.Context(t) - var user elasticache.User + var user awstypes.User rName := sdkacctest.RandomWithPrefix("tf-acc") resourceName := "aws_elasticache_user.test" @@ -349,7 +350,7 @@ func TestAccElastiCacheUser_oobModify(t *testing.T) { func testAccCheckUserDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elasticache_user" { @@ -373,7 +374,7 @@ func testAccCheckUserDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckUserExists(ctx context.Context, n string, v *elasticache.User) resource.TestCheckFunc { +func testAccCheckUserExists(ctx context.Context, n string, v *awstypes.User) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -384,7 +385,7 @@ func testAccCheckUserExists(ctx context.Context, n string, v *elasticache.User) return fmt.Errorf("No ElastiCache User ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) output, err := tfelasticache.FindUserByID(ctx, conn, rs.Primary.ID) @@ -398,11 +399,11 @@ func testAccCheckUserExists(ctx context.Context, n string, v *elasticache.User) } } -func testAccCheckUserUpdateOOB(ctx context.Context, v *elasticache.User) resource.TestCheckFunc { +func testAccCheckUserUpdateOOB(ctx context.Context, v *awstypes.User) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ElastiCacheClient(ctx) - _, err := conn.ModifyUserWithContext(ctx, &elasticache.ModifyUserInput{ + _, err := conn.ModifyUser(ctx, &elasticache.ModifyUserInput{ AccessString: aws.String("on ~* +@all"), UserId: v.UserId, }) diff --git a/internal/service/elasticbeanstalk/service_endpoint_resolver_gen.go b/internal/service/elasticbeanstalk/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..c80d6aa11e4 --- /dev/null +++ b/internal/service/elasticbeanstalk/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package elasticbeanstalk + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + elasticbeanstalk_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ elasticbeanstalk_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver elasticbeanstalk_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: elasticbeanstalk_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params elasticbeanstalk_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up elasticbeanstalk endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*elasticbeanstalk_sdkv2.Options) { + return func(o *elasticbeanstalk_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/elasticbeanstalk/service_endpoints_gen_test.go b/internal/service/elasticbeanstalk/service_endpoints_gen_test.go index 88ee3a2a32b..8aa8305fd23 100644 --- a/internal/service/elasticbeanstalk/service_endpoints_gen_test.go +++ b/internal/service/elasticbeanstalk/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := elasticbeanstalk_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), elasticbeanstalk_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := elasticbeanstalk_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), elasticbeanstalk_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/elasticbeanstalk/service_package_gen.go b/internal/service/elasticbeanstalk/service_package_gen.go index 8158108bc3f..e93665734f8 100644 --- a/internal/service/elasticbeanstalk/service_package_gen.go +++ b/internal/service/elasticbeanstalk/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package elasticbeanstalk @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" elasticbeanstalk_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -81,19 +80,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*elasticbeanstalk_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return elasticbeanstalk_sdkv2.NewFromConfig(cfg, func(o *elasticbeanstalk_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return elasticbeanstalk_sdkv2.NewFromConfig(cfg, + elasticbeanstalk_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/elasticsearch/domain_test.go b/internal/service/elasticsearch/domain_test.go index 4adeb449bc5..4143c9cfabe 100644 --- a/internal/service/elasticsearch/domain_test.go +++ b/internal/service/elasticsearch/domain_test.go @@ -11,7 +11,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -942,7 +941,7 @@ func TestAccElasticsearchDomain_cognitoOptionsCreateAndRemove(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheckCognitoIdentityProvider(ctx, t) + acctest.PreCheckCognitoIdentityProvider(ctx, t) testAccPreCheckIAMServiceLinkedRole(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticsearchServiceID), @@ -986,7 +985,7 @@ func TestAccElasticsearchDomain_cognitoOptionsUpdate(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheckCognitoIdentityProvider(ctx, t) + acctest.PreCheckCognitoIdentityProvider(ctx, t) testAccPreCheckIAMServiceLinkedRole(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ElasticsearchServiceID), @@ -3167,21 +3166,3 @@ resource "aws_elasticsearch_domain" "test" { } `, rName, cognitoOptions) } - -func testAccPreCheckCognitoIdentityProvider(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) - - input := &cognitoidentityprovider.ListUserPoolsInput{ - MaxResults: aws.Int64(1), - } - - _, err := conn.ListUserPoolsWithContext(ctx, input) - - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} diff --git a/internal/service/elasticsearch/service_endpoint_resolver_gen.go b/internal/service/elasticsearch/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..3fdf3d9fd22 --- /dev/null +++ b/internal/service/elasticsearch/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package elasticsearch + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/elasticsearch/service_endpoints_gen_test.go b/internal/service/elasticsearch/service_endpoints_gen_test.go index a732ab343f3..296c5c1bd56 100644 --- a/internal/service/elasticsearch/service_endpoints_gen_test.go +++ b/internal/service/elasticsearch/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -90,7 +91,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -330,7 +331,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -351,12 +352,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(elasticsearchservice_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -365,17 +366,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(elasticsearchservice_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -384,7 +385,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -466,16 +467,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/elasticsearch/service_package_gen.go b/internal/service/elasticsearch/service_package_gen.go index 1e483ca6d66..40fad5395cb 100644 --- a/internal/service/elasticsearch/service_package_gen.go +++ b/internal/service/elasticsearch/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package elasticsearch @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" elasticsearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/elasticsearchservice" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -74,11 +73,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*e "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return elasticsearchservice_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/elastictranscoder/service_endpoint_resolver_gen.go b/internal/service/elastictranscoder/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..4832e51953b --- /dev/null +++ b/internal/service/elastictranscoder/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package elastictranscoder + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/elastictranscoder/service_endpoints_gen_test.go b/internal/service/elastictranscoder/service_endpoints_gen_test.go index 6ad3a92103e..66486d69bb1 100644 --- a/internal/service/elastictranscoder/service_endpoints_gen_test.go +++ b/internal/service/elastictranscoder/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(elastictranscoder_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(elastictranscoder_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/elastictranscoder/service_package_gen.go b/internal/service/elastictranscoder/service_package_gen.go index 632576761a2..74c5789f3f6 100644 --- a/internal/service/elastictranscoder/service_package_gen.go +++ b/internal/service/elastictranscoder/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package elastictranscoder @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" elastictranscoder_sdkv1 "github.com/aws/aws-sdk-go/service/elastictranscoder" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -57,11 +56,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*e "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return elastictranscoder_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/elb/app_cookie_stickiness_policy.go b/internal/service/elb/app_cookie_stickiness_policy.go index 774a0b21566..d0ab012693e 100644 --- a/internal/service/elb/app_cookie_stickiness_policy.go +++ b/internal/service/elb/app_cookie_stickiness_policy.go @@ -11,20 +11,22 @@ import ( "strings" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_app_cookie_stickiness_policy") -func ResourceAppCookieStickinessPolicy() *schema.Resource { +// @SDKResource("aws_app_cookie_stickiness_policy", name="App Cookie Stickiness Policy") +func resourceAppCookieStickinessPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAppCookieStickinessPolicyCreate, ReadWithoutTimeout: resourceAppCookieStickinessPolicyRead, @@ -69,52 +71,55 @@ func ResourceAppCookieStickinessPolicy() *schema.Resource { func resourceAppCookieStickinessPolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) lbName := d.Get("load_balancer").(string) lbPort := d.Get("lb_port").(int) policyName := d.Get(names.AttrName).(string) - id := AppCookieStickinessPolicyCreateResourceID(lbName, lbPort, policyName) + id := appCookieStickinessPolicyCreateResourceID(lbName, lbPort, policyName) { - input := &elb.CreateAppCookieStickinessPolicyInput{ + input := &elasticloadbalancing.CreateAppCookieStickinessPolicyInput{ CookieName: aws.String(d.Get("cookie_name").(string)), LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), } - if _, err := conn.CreateAppCookieStickinessPolicyWithContext(ctx, input); err != nil { + _, err := conn.CreateAppCookieStickinessPolicy(ctx, input) + + if err != nil { return sdkdiag.AppendErrorf(diags, "creating ELB Classic App Cookie Stickiness Policy (%s): %s", id, err) } } + d.SetId(id) + { - input := &elb.SetLoadBalancerPoliciesOfListenerInput{ + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(int64(lbPort)), - PolicyNames: aws.StringSlice([]string{policyName}), + LoadBalancerPort: int32(lbPort), + PolicyNames: []string{policyName}, } - if _, err := conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, input); err != nil { - return sdkdiag.AppendErrorf(diags, "setting ELB Classic App Cookie Stickiness Policy (%s): %s", id, err) + _, err := conn.SetLoadBalancerPoliciesOfListener(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting ELB Classic App Cookie Stickiness Policy (%s): %s", d.Id(), err) } } - d.SetId(id) - return append(diags, resourceAppCookieStickinessPolicyRead(ctx, d, meta)...) } func resourceAppCookieStickinessPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, lbPort, policyName, err := AppCookieStickinessPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, lbPort, policyName, err := appCookieStickinessPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - policy, err := FindLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) + policy, err := findLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELB Classic App Cookie Stickiness Policy (%s) not found, removing from state", d.Id()) @@ -126,9 +131,10 @@ func resourceAppCookieStickinessPolicyRead(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "reading ELB Classic App Cookie Stickiness Policy (%s): %s", d.Id(), err) } - if len(policy.PolicyAttributeDescriptions) != 1 || aws.StringValue(policy.PolicyAttributeDescriptions[0].AttributeName) != "CookieName" { + if len(policy.PolicyAttributeDescriptions) != 1 || aws.ToString(policy.PolicyAttributeDescriptions[0].AttributeName) != "CookieName" { return sdkdiag.AppendErrorf(diags, "cookie not found") } + cookieAttr := policy.PolicyAttributeDescriptions[0] d.Set("cookie_name", cookieAttr.AttributeValue) d.Set("lb_port", lbPort) @@ -140,35 +146,42 @@ func resourceAppCookieStickinessPolicyRead(ctx context.Context, d *schema.Resour func resourceAppCookieStickinessPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, lbPort, policyName, err := AppCookieStickinessPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, lbPort, policyName, err := appCookieStickinessPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } // Perversely, if we Set an empty list of PolicyNames, we detach the // policies attached to a listener, which is required to delete the // policy itself. - input := &elb.SetLoadBalancerPoliciesOfListenerInput{ + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(int64(lbPort)), - PolicyNames: aws.StringSlice([]string{}), + LoadBalancerPort: int32(lbPort), + PolicyNames: []string{}, } - _, err = conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, input) + _, err = conn.SetLoadBalancerPoliciesOfListener(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeLoadBalancerNotFound) { + return diags + } if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic App Cookie Stickiness Policy (%s): %s", d.Id(), err) } log.Printf("[DEBUG] Deleting ELB Classic App Cookie Stickiness Policy: %s", d.Id()) - _, err = conn.DeleteLoadBalancerPolicyWithContext(ctx, &elb.DeleteLoadBalancerPolicyInput{ + _, err = conn.DeleteLoadBalancerPolicy(ctx, &elasticloadbalancing.DeleteLoadBalancerPolicyInput{ LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), }) + if tfawserr.ErrCodeEquals(err, errCodeLoadBalancerNotFound) { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ELB Classic App Cookie Stickiness Policy (%s): %s", d.Id(), err) } @@ -176,15 +189,15 @@ func resourceAppCookieStickinessPolicyDelete(ctx context.Context, d *schema.Reso return diags } -func FindLoadBalancerPolicyByTwoPartKey(ctx context.Context, conn *elb.ELB, lbName, policyName string) (*elb.PolicyDescription, error) { - input := &elb.DescribeLoadBalancerPoliciesInput{ +func findLoadBalancerPolicyByTwoPartKey(ctx context.Context, conn *elasticloadbalancing.Client, lbName, policyName string) (*awstypes.PolicyDescription, error) { + input := &elasticloadbalancing.DescribeLoadBalancerPoliciesInput{ LoadBalancerName: aws.String(lbName), - PolicyNames: aws.StringSlice([]string{policyName}), + PolicyNames: []string{policyName}, } - output, err := conn.DescribeLoadBalancerPoliciesWithContext(ctx, input) + output, err := conn.DescribeLoadBalancerPolicies(ctx, input) - if tfawserr.ErrCodeEquals(err, elb.ErrCodePolicyNotFoundException, elb.ErrCodeAccessPointNotFoundException) { + if errs.IsA[*awstypes.PolicyNotFoundException](err) || errs.IsA[*awstypes.AccessPointNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -195,41 +208,33 @@ func FindLoadBalancerPolicyByTwoPartKey(ctx context.Context, conn *elb.ELB, lbNa return nil, err } - if output == nil || len(output.PolicyDescriptions) == 0 || output.PolicyDescriptions[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output.PolicyDescriptions); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output.PolicyDescriptions[0], nil + return tfresource.AssertSingleValueResult(output.PolicyDescriptions) } -func FindLoadBalancerListenerPolicyByThreePartKey(ctx context.Context, conn *elb.ELB, lbName string, lbPort int, policyName string) (*elb.PolicyDescription, error) { - policy, err := FindLoadBalancerPolicyByTwoPartKey(ctx, conn, lbName, policyName) +func findLoadBalancerListenerPolicyByThreePartKey(ctx context.Context, conn *elasticloadbalancing.Client, lbName string, lbPort int, policyName string) (*awstypes.PolicyDescription, error) { + policy, err := findLoadBalancerPolicyByTwoPartKey(ctx, conn, lbName, policyName) if err != nil { return nil, err } - lb, err := FindLoadBalancerByName(ctx, conn, lbName) + lb, err := findLoadBalancerByName(ctx, conn, lbName) if err != nil { return nil, err } for _, v := range lb.ListenerDescriptions { - if v == nil || v.Listener == nil { + if v.Listener == nil { continue } - if aws.Int64Value(v.Listener.LoadBalancerPort) != int64(lbPort) { + if v.Listener.LoadBalancerPort != int32(lbPort) { continue } for _, v := range v.PolicyNames { - if aws.StringValue(v) == policyName { + if v == policyName { return policy, nil } } @@ -240,14 +245,14 @@ func FindLoadBalancerListenerPolicyByThreePartKey(ctx context.Context, conn *elb const appCookieStickinessPolicyResourceIDSeparator = ":" -func AppCookieStickinessPolicyCreateResourceID(lbName string, lbPort int, policyName string) string { +func appCookieStickinessPolicyCreateResourceID(lbName string, lbPort int, policyName string) string { parts := []string{lbName, strconv.Itoa(lbPort), policyName} id := strings.Join(parts, appCookieStickinessPolicyResourceIDSeparator) return id } -func AppCookieStickinessPolicyParseResourceID(id string) (string, int, string, error) { +func appCookieStickinessPolicyParseResourceID(id string) (string, int, string, error) { parts := strings.Split(id, appCookieStickinessPolicyResourceIDSeparator) if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { diff --git a/internal/service/elb/app_cookie_stickiness_policy_test.go b/internal/service/elb/app_cookie_stickiness_policy_test.go index d6e3d604ca8..75fc6ad82d3 100644 --- a/internal/service/elb/app_cookie_stickiness_policy_test.go +++ b/internal/service/elb/app_cookie_stickiness_policy_test.go @@ -103,7 +103,7 @@ func TestAccELBAppCookieStickinessPolicy_Disappears_elb(t *testing.T) { func testAccCheckAppCookieStickinessPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_app_cookie_stickiness_policy" { @@ -111,7 +111,6 @@ func testAccCheckAppCookieStickinessPolicyDestroy(ctx context.Context) resource. } lbName, lbPort, policyName, err := tfelb.AppCookieStickinessPolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } @@ -140,17 +139,12 @@ func testAccCheckAppCookieStickinessPolicyExists(ctx context.Context, n string) return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ELB Classic App Cookie Stickiness Policy ID is set") - } - lbName, lbPort, policyName, err := tfelb.AppCookieStickinessPolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) _, err = tfelb.FindLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) diff --git a/internal/service/elb/attachment.go b/internal/service/elb/attachment.go index 4dad85856f7..d2f4a692eac 100644 --- a/internal/service/elb/attachment.go +++ b/internal/service/elb/attachment.go @@ -7,11 +7,12 @@ import ( "context" "fmt" "log" + "slices" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -21,8 +22,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_elb_attachment") -func ResourceAttachment() *schema.Resource { +// @SDKResource("aws_elb_attachment", name="Attachment") +func resourceAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAttachmentCreate, ReadWithoutTimeout: resourceAttachmentRead, @@ -34,7 +35,6 @@ func ResourceAttachment() *schema.Resource { ForceNew: true, Required: true, }, - "instance": { Type: schema.TypeString, ForceNew: true, @@ -46,84 +46,48 @@ func ResourceAttachment() *schema.Resource { func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - elbName := d.Get("elb").(string) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName := d.Get("elb").(string) instance := d.Get("instance").(string) - - registerInstancesOpts := elb.RegisterInstancesWithLoadBalancerInput{ - LoadBalancerName: aws.String(elbName), - Instances: []*elb.Instance{{InstanceId: aws.String(instance)}}, + input := &elasticloadbalancing.RegisterInstancesWithLoadBalancerInput{ + Instances: expandInstances([]interface{}{instance}), + LoadBalancerName: aws.String(lbName), } - log.Printf("[INFO] registering instance %s with ELB %s", instance, elbName) - - err := retry.RetryContext(ctx, 10*time.Minute, func() *retry.RetryError { - _, err := conn.RegisterInstancesWithLoadBalancerWithContext(ctx, ®isterInstancesOpts) - - if tfawserr.ErrCodeEquals(err, "InvalidTarget") { - return retry.RetryableError(fmt.Errorf("attaching instance to ELB, retrying: %s", err)) - } + const ( + timeout = 10 * time.Minute + ) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { + return conn.RegisterInstancesWithLoadBalancer(ctx, input) + }, errCodeInvalidTarget) - if err != nil { - return retry.NonRetryableError(err) - } - - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.RegisterInstancesWithLoadBalancerWithContext(ctx, ®isterInstancesOpts) - } if err != nil { - return sdkdiag.AppendErrorf(diags, "Failure registering instances with ELB: %s", err) + return sdkdiag.AppendErrorf(diags, "creating ELB Classic Attachment (%s/%s): %s", lbName, instance, err) } //lintignore:R016 // Allow legacy unstable ID usage in managed resource - d.SetId(id.PrefixedUniqueId(fmt.Sprintf("%s-", elbName))) + d.SetId(id.PrefixedUniqueId(fmt.Sprintf("%s-", lbName))) return diags } func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - elbName := d.Get("elb").(string) + conn := meta.(*conns.AWSClient).ELBClient(ctx) - // only add the instance that was previously defined for this resource - expected := d.Get("instance").(string) - - // Retrieve the ELB properties to get a list of attachments - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(elbName)}, - } + lbName := d.Get("elb").(string) + instance := d.Get("instance").(string) + err := findLoadBalancerAttachmentByTwoPartKey(ctx, conn, lbName, instance) - resp, err := conn.DescribeLoadBalancersWithContext(ctx, describeElbOpts) - if err != nil { - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, elb.ErrCodeAccessPointNotFoundException) { - log.Printf("[WARN] ELB Classic LB (%s) not found, removing from state", elbName) - d.SetId("") - return diags - } - return sdkdiag.AppendErrorf(diags, "retrieving ELB Classic LB (%s): %s", elbName, err) - } - if !d.IsNewResource() && len(resp.LoadBalancerDescriptions) != 1 { - log.Printf("[WARN] ELB Classic LB (%s) not found, removing from state", elbName) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] ELB Classic Attachment (%s/%s) not found, removing from state", lbName, instance) d.SetId("") return diags } - // only set the instance Id that this resource manages - found := false - for _, i := range resp.LoadBalancerDescriptions[0].Instances { - if expected == aws.StringValue(i.InstanceId) { - d.Set("instance", expected) - found = true - } - } - - if !d.IsNewResource() && !found { - log.Printf("[WARN] instance %s not found in elb attachments", expected) - d.SetId("") + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ELB Classic Attachment (%s/%s): %s", lbName, instance, err) } return diags @@ -131,22 +95,39 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - elbName := d.Get("elb").(string) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName := d.Get("elb").(string) instance := d.Get("instance").(string) + input := &elasticloadbalancing.DeregisterInstancesFromLoadBalancerInput{ + Instances: expandInstances([]interface{}{instance}), + LoadBalancerName: aws.String(lbName), + } - log.Printf("[INFO] Deleting Attachment %s from: %s", instance, elbName) + log.Printf("[DEBUG] Deleting ELB Classic Attachment: %s", d.Id()) + _, err := conn.DeregisterInstancesFromLoadBalancer(ctx, input) - deRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancerInput{ - LoadBalancerName: aws.String(elbName), - Instances: []*elb.Instance{{InstanceId: aws.String(instance)}}, + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting ELB Classic Attachment (%s/%s): %s", lbName, instance, err) } - _, err := conn.DeregisterInstancesFromLoadBalancerWithContext(ctx, &deRegisterInstancesOpts) + return diags +} + +func findLoadBalancerAttachmentByTwoPartKey(ctx context.Context, conn *elasticloadbalancing.Client, lbName, instance string) error { + lb, err := findLoadBalancerByName(ctx, conn, lbName) + if err != nil { - return sdkdiag.AppendErrorf(diags, "Failure deregistering instances from ELB: %s", err) + return err } - return diags + attached := slices.ContainsFunc(lb.Instances, func(v awstypes.Instance) bool { + return aws.ToString(v.InstanceId) == instance + }) + + if !attached { + return &retry.NotFoundError{} + } + + return nil } diff --git a/internal/service/elb/attachment_test.go b/internal/service/elb/attachment_test.go index 41ab621f891..a547293c962 100644 --- a/internal/service/elb/attachment_test.go +++ b/internal/service/elb/attachment_test.go @@ -4,213 +4,136 @@ package elb_test import ( + "context" "fmt" - "log" "testing" - "github.com/aws/aws-sdk-go/service/elb" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfelb "github.com/hashicorp/terraform-provider-aws/internal/service/elb" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccELBAttachment_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription - resourceName := "aws_elb.test" + var conf awstypes.LoadBalancerDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + lbResourceName := "aws_elb.test" + resourceName1 := "aws_elb_attachment.test1" + resourceName2 := "aws_elb_attachment.test2" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), + CheckDestroy: testAccCheckAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAttachmentConfig_1(), + Config: testAccAttachmentConfig_1(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), + testAccCheckAttachmentExists(ctx, resourceName1), + testAccCheckLoadBalancerExists(ctx, lbResourceName, &conf), testAccAttachmentCheckInstanceCount(&conf, 1), ), }, { - Config: testAccAttachmentConfig_2(), + Config: testAccAttachmentConfig_2(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), + testAccCheckAttachmentExists(ctx, resourceName1), + testAccCheckAttachmentExists(ctx, resourceName2), + testAccCheckLoadBalancerExists(ctx, lbResourceName, &conf), testAccAttachmentCheckInstanceCount(&conf, 2), ), }, - { - Config: testAccAttachmentConfig_3(), - Check: resource.ComposeTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), - testAccAttachmentCheckInstanceCount(&conf, 2), - ), - }, - { - Config: testAccAttachmentConfig_4(), - Check: resource.ComposeTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), - testAccAttachmentCheckInstanceCount(&conf, 0), - ), - }, }, }) } -// remove and instance and check that it's correctly re-attached. -func TestAccELBAttachment_drift(t *testing.T) { +func TestAccELBAttachment_disappears(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription - resourceName := "aws_elb.test" - - testAccAttachmentConfig_deregInstance := func() { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) - - deRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancerInput{ - LoadBalancerName: conf.LoadBalancerName, - Instances: conf.Instances, - } - - log.Printf("[DEBUG] deregistering instance %v from ELB", *conf.Instances[0].InstanceId) - - _, err := conn.DeregisterInstancesFromLoadBalancerWithContext(ctx, &deRegisterInstancesOpts) - if err != nil { - t.Fatalf("Failure deregistering instances from ELB: %s", err) - } - } + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_elb_attachment.test1" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckLoadBalancerDestroy(ctx), + CheckDestroy: testAccCheckAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAttachmentConfig_1(), - Check: resource.ComposeTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), - testAccAttachmentCheckInstanceCount(&conf, 1), - ), - }, - // remove an instance from the ELB, and make sure it gets re-added - { - Config: testAccAttachmentConfig_1(), - PreConfig: testAccAttachmentConfig_deregInstance, + Config: testAccAttachmentConfig_1(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, resourceName, &conf), - testAccAttachmentCheckInstanceCount(&conf, 1), + testAccCheckAttachmentExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelb.ResourceAttachment(), resourceName), ), + ExpectNonEmptyPlan: true, }, }, }) } -func testAccAttachmentCheckInstanceCount(conf *elb.LoadBalancerDescription, expected int) resource.TestCheckFunc { - return func(*terraform.State) error { - if actual := len(conf.Instances); actual != expected { - return fmt.Errorf("instance count does not match: expected %d, got %d", expected, actual) - } - return nil - } -} - -// add one attachment -func testAccAttachmentConfig_1() string { - return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), ` -data "aws_availability_zones" "available" { - state = "available" +func testAccCheckAttachmentDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_elb_attachment" { + continue + } -resource "aws_elb" "test" { - availability_zones = data.aws_availability_zones.available.names + err := tfelb.FindLoadBalancerAttachmentByTwoPartKey(ctx, conn, rs.Primary.Attributes["elb"], rs.Primary.Attributes["instance"]) - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } -} - -resource "aws_instance" "foo1" { - ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - instance_type = "t2.micro" -} + if tfresource.NotFound(err) { + continue + } -resource "aws_elb_attachment" "foo1" { - elb = aws_elb.test.id - instance = aws_instance.foo1.id -} -`) -} - -// add a second attachment -func testAccAttachmentConfig_2() string { - return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} + if err != nil { + return err + } -resource "aws_elb" "test" { - availability_zones = data.aws_availability_zones.available.names + return fmt.Errorf("ELB Classic Attachment %s still exists", rs.Primary.ID) + } - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } + return nil + } } -resource "aws_instance" "foo1" { - ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - instance_type = "t2.micro" -} +func testAccCheckAttachmentExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } -resource "aws_instance" "foo2" { - ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - instance_type = "t2.micro" -} + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) -resource "aws_elb_attachment" "foo1" { - elb = aws_elb.test.id - instance = aws_instance.foo1.id -} + err := tfelb.FindLoadBalancerAttachmentByTwoPartKey(ctx, conn, rs.Primary.Attributes["elb"], rs.Primary.Attributes["instance"]) -resource "aws_elb_attachment" "foo2" { - elb = aws_elb.test.id - instance = aws_instance.foo2.id -} -`) + return err + } } -// swap attachments between resources -func testAccAttachmentConfig_3() string { - return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), ` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } +func testAccAttachmentCheckInstanceCount(v *awstypes.LoadBalancerDescription, expected int) resource.TestCheckFunc { + return func(*terraform.State) error { + if actual := len(v.Instances); actual != expected { + return fmt.Errorf("instance count does not match: expected %d, got %d", expected, actual) + } + return nil + } } +func testAccAttachmentConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinux2HVMEBSX8664AMI(), acctest.ConfigAvailableAZsNoOptInDefaultExclude(), fmt.Sprintf(` resource "aws_elb" "test" { availability_zones = data.aws_availability_zones.available.names + name = %[1]q + listener { instance_port = 8000 instance_protocol = "http" @@ -218,50 +141,41 @@ resource "aws_elb" "test" { lb_protocol = "http" } } - -resource "aws_instance" "foo1" { - ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id - instance_type = "t2.micro" +`, rName)) } -resource "aws_instance" "foo2" { +func testAccAttachmentConfig_1(rName string) string { + return acctest.ConfigCompose(testAccAttachmentConfig_base(rName), fmt.Sprintf(` +resource "aws_instance" "test1" { ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id instance_type = "t2.micro" -} -resource "aws_elb_attachment" "foo1" { - elb = aws_elb.test.id - instance = aws_instance.foo2.id + tags = { + Name = %[1]q + } } -resource "aws_elb_attachment" "foo2" { +resource "aws_elb_attachment" "test1" { elb = aws_elb.test.id - instance = aws_instance.foo1.id + instance = aws_instance.test1.id } -`) +`, rName)) } -// destroy attachments -func testAccAttachmentConfig_4() string { - return ` -data "aws_availability_zones" "available" { - state = "available" +func testAccAttachmentConfig_2(rName string) string { + return acctest.ConfigCompose(testAccAttachmentConfig_1(rName), fmt.Sprintf(` +resource "aws_instance" "test2" { + ami = data.aws_ami.amzn2-ami-minimal-hvm-ebs-x86_64.id + instance_type = "t2.micro" - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] + tags = { + Name = %[1]q } } -resource "aws_elb" "test" { - availability_zones = data.aws_availability_zones.available.names - - listener { - instance_port = 8000 - instance_protocol = "http" - lb_port = 80 - lb_protocol = "http" - } +resource "aws_elb_attachment" "test2" { + elb = aws_elb.test.id + instance = aws_instance.test2.id } -` +`, rName)) } diff --git a/internal/service/elb/backend_server_policy.go b/internal/service/elb/backend_server_policy.go index 7f84b5e6a18..8f8aac82f62 100644 --- a/internal/service/elb/backend_server_policy.go +++ b/internal/service/elb/backend_server_policy.go @@ -10,8 +10,8 @@ import ( "strconv" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -20,8 +20,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_load_balancer_backend_server_policy") -func ResourceBackendServerPolicy() *schema.Resource { +// @SDKResource("aws_load_balancer_backend_server_policy", name="Backend Server Policy") +func resourceBackendServerPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBackendServerPolicySet, ReadWithoutTimeout: resourceBackendServerPolicyRead, @@ -48,42 +48,43 @@ func ResourceBackendServerPolicy() *schema.Resource { func resourceBackendServerPolicySet(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) instancePort := d.Get("instance_port").(int) lbName := d.Get("load_balancer_name").(string) - id := BackendServerPolicyCreateResourceID(lbName, instancePort) - input := &elb.SetLoadBalancerPoliciesForBackendServerInput{ - InstancePort: aws.Int64(int64(instancePort)), + id := backendServerPolicyCreateResourceID(lbName, instancePort) + input := &elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: aws.Int32(int32(instancePort)), LoadBalancerName: aws.String(lbName), } if v, ok := d.GetOk("policy_names"); ok && v.(*schema.Set).Len() > 0 { - input.PolicyNames = flex.ExpandStringSet(v.(*schema.Set)) + input.PolicyNames = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := conn.SetLoadBalancerPoliciesForBackendServerWithContext(ctx, input) + _, err := conn.SetLoadBalancerPoliciesForBackendServer(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic Backend Server Policy (%s): %s", id, err) } - d.SetId(id) + if d.IsNewResource() { + d.SetId(id) + } return append(diags, resourceBackendServerPolicyRead(ctx, d, meta)...) } func resourceBackendServerPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, instancePort, err := BackendServerPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, instancePort, err := backendServerPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - policyNames, err := FindLoadBalancerBackendServerPolicyByTwoPartKey(ctx, conn, lbName, instancePort) + policyNames, err := findLoadBalancerBackendServerPolicyByTwoPartKey(ctx, conn, lbName, instancePort) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELB Classic Backend Server Policy (%s) not found, removing from state", d.Id()) @@ -104,22 +105,19 @@ func resourceBackendServerPolicyRead(ctx context.Context, d *schema.ResourceData func resourceBackendServerPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, instancePort, err := BackendServerPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, instancePort, err := backendServerPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) - } - - input := &elb.SetLoadBalancerPoliciesForBackendServerInput{ - InstancePort: aws.Int64(int64(instancePort)), - LoadBalancerName: aws.String(lbName), - PolicyNames: aws.StringSlice([]string{}), + return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting ELB Classic Backend Server Policy: %s", d.Id()) - _, err = conn.SetLoadBalancerPoliciesForBackendServerWithContext(ctx, input) + _, err = conn.SetLoadBalancerPoliciesForBackendServer(ctx, &elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: aws.Int32(int32(instancePort)), + LoadBalancerName: aws.String(lbName), + PolicyNames: []string{}, + }) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic Backend Server Policy (%s): %s", d.Id(), err) @@ -128,8 +126,8 @@ func resourceBackendServerPolicyDelete(ctx context.Context, d *schema.ResourceDa return diags } -func FindLoadBalancerBackendServerPolicyByTwoPartKey(ctx context.Context, conn *elb.ELB, lbName string, instancePort int) ([]string, error) { - lb, err := FindLoadBalancerByName(ctx, conn, lbName) +func findLoadBalancerBackendServerPolicyByTwoPartKey(ctx context.Context, conn *elasticloadbalancing.Client, lbName string, instancePort int) ([]string, error) { + lb, err := findLoadBalancerByName(ctx, conn, lbName) if err != nil { return nil, err @@ -138,15 +136,11 @@ func FindLoadBalancerBackendServerPolicyByTwoPartKey(ctx context.Context, conn * var policyNames []string for _, v := range lb.BackendServerDescriptions { - if v == nil { - continue - } - - if aws.Int64Value(v.InstancePort) != int64(instancePort) { + if aws.ToInt32(v.InstancePort) != int32(instancePort) { continue } - policyNames = append(policyNames, aws.StringValueSlice(v.PolicyNames)...) + policyNames = append(policyNames, v.PolicyNames...) } return policyNames, nil @@ -154,14 +148,14 @@ func FindLoadBalancerBackendServerPolicyByTwoPartKey(ctx context.Context, conn * const backendServerPolicyResourceIDSeparator = ":" -func BackendServerPolicyCreateResourceID(lbName string, instancePort int) string { +func backendServerPolicyCreateResourceID(lbName string, instancePort int) string { parts := []string{lbName, strconv.Itoa(instancePort)} id := strings.Join(parts, backendServerPolicyResourceIDSeparator) return id } -func BackendServerPolicyParseResourceID(id string) (string, int, error) { +func backendServerPolicyParseResourceID(id string) (string, int, error) { parts := strings.Split(id, backendServerPolicyResourceIDSeparator) if len(parts) == 2 && parts[0] != "" && parts[1] != "" { diff --git a/internal/service/elb/backend_server_policy_test.go b/internal/service/elb/backend_server_policy_test.go index f149dda94ae..03039f100a1 100644 --- a/internal/service/elb/backend_server_policy_test.go +++ b/internal/service/elb/backend_server_policy_test.go @@ -115,7 +115,7 @@ func TestAccELBBackendServerPolicy_update(t *testing.T) { func testAccCheckBackendServerPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_load_balancer_backend_policy" { @@ -123,7 +123,6 @@ func testAccCheckBackendServerPolicyDestroy(ctx context.Context) resource.TestCh } lbName, instancePort, err := tfelb.BackendServerPolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } @@ -152,17 +151,12 @@ func testAccCheckBackendServerPolicyExists(ctx context.Context, n string) resour return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ELB Classic Backend Server Policy ID is set") - } - lbName, instancePort, err := tfelb.BackendServerPolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) _, err = tfelb.FindLoadBalancerBackendServerPolicyByTwoPartKey(ctx, conn, lbName, instancePort) @@ -189,6 +183,10 @@ resource "aws_iam_server_certificate" "test" { name = %[1]q certificate_body = "%[2]s" private_key = "%[3]s" + + timeouts { + delete = "30m" + } } resource "aws_load_balancer_policy" "test0" { diff --git a/internal/service/elb/consts.go b/internal/service/elb/consts.go new file mode 100644 index 00000000000..a2172ae372f --- /dev/null +++ b/internal/service/elb/consts.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package elb + +// See https://docs.aws.amazon.com/elasticloadbalancing/2012-06-01/APIReference/API_AdditionalAttribute.html#API_AdditionalAttribute_Contents. +const ( + loadBalancerAttributeDesyncMitigationMode = "elb.http.desyncmitigationmode" +) diff --git a/internal/service/elb/enum.go b/internal/service/elb/enum.go deleted file mode 100644 index 527a0fe0999..00000000000 --- a/internal/service/elb/enum.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package elb - -const ( - InstanceStateInService = "InService" -) - -const ( - ReferenceSecurityPolicy = "Reference-Security-Policy" - SSLNegotiationPolicyType = "SSLNegotiationPolicyType" -) diff --git a/internal/service/elb/errors.go b/internal/service/elb/errors.go new file mode 100644 index 00000000000..8ee5fe1aa8d --- /dev/null +++ b/internal/service/elb/errors.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package elb + +const ( + errCodeInvalidTarget = "InvalidTarget" + errCodeLoadBalancerNotFound = "LoadBalancerNotFound" +) diff --git a/internal/service/elb/exports_test.go b/internal/service/elb/exports_test.go new file mode 100644 index 00000000000..eab233c66de --- /dev/null +++ b/internal/service/elb/exports_test.go @@ -0,0 +1,34 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package elb + +// Exports for use in tests only. +var ( + ResourceAppCookieStickinessPolicy = resourceAppCookieStickinessPolicy + ResourceAttachment = resourceAttachment + ResourceBackendServerPolicy = resourceBackendServerPolicy + ResourceCookieStickinessPolicy = resourceCookieStickinessPolicy + ResourceListenerPolicy = resourceListenerPolicy + ResourceLoadBalancer = resourceLoadBalancer + ResourcePolicy = resourcePolicy + ResourceProxyProtocolPolicy = resourceProxyProtocolPolicy + ResourceSSLNegotiationPolicy = resourceSSLNegotiationPolicy + + AccountIDPerRegionMap = accountIDPerRegionMap + AppCookieStickinessPolicyParseResourceID = appCookieStickinessPolicyParseResourceID + BackendServerPolicyParseResourceID = backendServerPolicyParseResourceID + FindLoadBalancerAttachmentByTwoPartKey = findLoadBalancerAttachmentByTwoPartKey + FindLoadBalancerBackendServerPolicyByTwoPartKey = findLoadBalancerBackendServerPolicyByTwoPartKey + FindLoadBalancerByName = findLoadBalancerByName + FindLoadBalancerListenerPolicyByThreePartKey = findLoadBalancerListenerPolicyByThreePartKey + FindLoadBalancerListenerPolicyByTwoPartKey = findLoadBalancerListenerPolicyByTwoPartKey + FindLoadBalancerPolicyByTwoPartKey = findLoadBalancerPolicyByTwoPartKey + HostedZoneIDPerRegionMap = hostedZoneIDPerRegionMap + LBCookieStickinessPolicyParseResourceID = lbCookieStickinessPolicyParseResourceID + ListenerHash = listenerHash + ListenerPolicyParseResourceID = listenerPolicyParseResourceID + PolicyParseResourceID = policyParseResourceID + ProxyProtocolPolicyParseResourceID = proxyProtocolPolicyParseResourceID + SSLNegotiationPolicyParseResourceID = sslNegotiationPolicyParseResourceID +) diff --git a/internal/service/elb/flex.go b/internal/service/elb/flex.go index c5ebad226ff..6b23e7fa98e 100644 --- a/internal/service/elb/flex.go +++ b/internal/service/elb/flex.go @@ -8,116 +8,110 @@ import ( "sort" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/names" ) -// Flattens an access log into something that flatmap.Flatten() can handle -func flattenAccessLog(l *elb.AccessLog) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) - - if l == nil { +func flattenAccessLog(apiObject *awstypes.AccessLog) []interface{} { + if apiObject == nil { return nil } - r := make(map[string]interface{}) - if l.S3BucketName != nil { - r[names.AttrBucket] = aws.StringValue(l.S3BucketName) - } + tfList := make([]interface{}, 0, 1) + tfMap := make(map[string]interface{}) - if l.S3BucketPrefix != nil { - r[names.AttrBucketPrefix] = aws.StringValue(l.S3BucketPrefix) + if apiObject.S3BucketName != nil { + tfMap[names.AttrBucket] = aws.ToString(apiObject.S3BucketName) } - if l.EmitInterval != nil { - r[names.AttrInterval] = aws.Int64Value(l.EmitInterval) + if apiObject.S3BucketPrefix != nil { + tfMap[names.AttrBucketPrefix] = aws.ToString(apiObject.S3BucketPrefix) } - if l.Enabled != nil { - r[names.AttrEnabled] = aws.BoolValue(l.Enabled) + if apiObject.EmitInterval != nil { + tfMap[names.AttrInterval] = aws.ToInt32(apiObject.EmitInterval) } - result = append(result, r) + tfMap[names.AttrEnabled] = apiObject.Enabled - return result + tfList = append(tfList, tfMap) + + return tfList } -// Flattens an array of Backend Descriptions into a a map of instance_port to policy names. -func flattenBackendPolicies(backends []*elb.BackendServerDescription) map[int64][]string { - policies := make(map[int64][]string) - for _, i := range backends { - for _, p := range i.PolicyNames { - policies[*i.InstancePort] = append(policies[*i.InstancePort], *p) - } - sort.Strings(policies[*i.InstancePort]) +func flattenBackendServerDescriptionPolicies(apiObjects []awstypes.BackendServerDescription) map[int32][]string { + tfMap := make(map[int32][]string) + + for _, apiObject := range apiObjects { + k := aws.ToInt32(apiObject.InstancePort) + tfMap[k] = append(tfMap[k], apiObject.PolicyNames...) + sort.Strings(tfMap[k]) } - return policies + + return tfMap } -// Flattens a health check into something that flatmap.Flatten() -// can handle -func FlattenHealthCheck(check *elb.HealthCheck) []map[string]interface{} { - result := make([]map[string]interface{}, 0, 1) +func flattenHealthCheck(apiObject *awstypes.HealthCheck) []interface{} { + if apiObject == nil { + return nil + } + + tfList := make([]interface{}, 0, 1) + tfMap := make(map[string]interface{}) - chk := make(map[string]interface{}) - chk["unhealthy_threshold"] = aws.Int64Value(check.UnhealthyThreshold) - chk["healthy_threshold"] = aws.Int64Value(check.HealthyThreshold) - chk[names.AttrTarget] = aws.StringValue(check.Target) - chk[names.AttrTimeout] = aws.Int64Value(check.Timeout) - chk[names.AttrInterval] = aws.Int64Value(check.Interval) + tfMap["unhealthy_threshold"] = aws.ToInt32(apiObject.UnhealthyThreshold) + tfMap["healthy_threshold"] = aws.ToInt32(apiObject.HealthyThreshold) + tfMap[names.AttrTarget] = aws.ToString(apiObject.Target) + tfMap[names.AttrTimeout] = aws.ToInt32(apiObject.Timeout) + tfMap[names.AttrInterval] = aws.ToInt32(apiObject.Interval) - result = append(result, chk) + tfList = append(tfList, tfMap) - return result + return tfList } -// Flattens an array of Instances into a []string -func flattenInstances(list []*elb.Instance) []string { - result := make([]string, 0, len(list)) - for _, i := range list { - result = append(result, *i.InstanceId) - } - return result +func flattenInstances(apiObjects []awstypes.Instance) []string { + return tfslices.ApplyToAll(apiObjects, func(v awstypes.Instance) string { + return aws.ToString(v.InstanceId) + }) } -// Expands an array of String Instance IDs into a []Instances -func ExpandInstanceString(list []interface{}) []*elb.Instance { - result := make([]*elb.Instance, 0, len(list)) - for _, i := range list { - result = append(result, &elb.Instance{InstanceId: aws.String(i.(string))}) - } - return result +func expandInstances(tfList []interface{}) []awstypes.Instance { + return tfslices.ApplyToAll(tfList, func(v interface{}) awstypes.Instance { + return awstypes.Instance{ + InstanceId: aws.String(v.(string)), + } + }) } -// Takes the result of flatmap.Expand for an array of listeners and -// returns ELB API compatible objects -func ExpandListeners(configured []interface{}) ([]*elb.Listener, error) { - listeners := make([]*elb.Listener, 0, len(configured)) - - // Loop over our configured listeners and create - // an array of aws-sdk-go compatible objects - for _, lRaw := range configured { - data := lRaw.(map[string]interface{}) - - ip := int64(data["instance_port"].(int)) - lp := int64(data["lb_port"].(int)) - l := &elb.Listener{ - InstancePort: &ip, - InstanceProtocol: aws.String(data["instance_protocol"].(string)), - LoadBalancerPort: &lp, - Protocol: aws.String(data["lb_protocol"].(string)), +func expandListeners(tfList []interface{}) ([]awstypes.Listener, error) { + apiObjects := make([]awstypes.Listener, 0) + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + apiObject := awstypes.Listener{ + InstancePort: aws.Int32(int32(tfMap["instance_port"].(int))), + InstanceProtocol: aws.String(tfMap["instance_protocol"].(string)), + LoadBalancerPort: int32(tfMap["lb_port"].(int)), + Protocol: aws.String(tfMap["lb_protocol"].(string)), } - if v, ok := data["ssl_certificate_id"]; ok { - l.SSLCertificateId = aws.String(v.(string)) + if v, ok := tfMap["ssl_certificate_id"]; ok { + apiObject.SSLCertificateId = aws.String(v.(string)) } var valid bool - if aws.StringValue(l.SSLCertificateId) != "" { + + if aws.ToString(apiObject.SSLCertificateId) != "" { // validate the protocol is correct for _, p := range []string{"https", "ssl"} { - if (strings.ToLower(*l.InstanceProtocol) == p) || (strings.ToLower(*l.Protocol) == p) { + if (strings.ToLower(aws.ToString(apiObject.InstanceProtocol)) == p) || (strings.ToLower(aws.ToString(apiObject.Protocol)) == p) { valid = true } } @@ -125,72 +119,68 @@ func ExpandListeners(configured []interface{}) ([]*elb.Listener, error) { valid = true } - if valid { - listeners = append(listeners, l) - } else { + if !valid { return nil, errors.New(`"ssl_certificate_id" may be set only when "protocol" is "https" or "ssl"`) } + + apiObjects = append(apiObjects, apiObject) } - return listeners, nil + return apiObjects, nil } -// Flattens an array of Listeners into a []map[string]interface{} -func flattenListeners(list []*elb.ListenerDescription) []map[string]interface{} { - result := make([]map[string]interface{}, 0, len(list)) - for _, i := range list { - l := map[string]interface{}{ - "instance_port": *i.Listener.InstancePort, - "instance_protocol": strings.ToLower(*i.Listener.InstanceProtocol), - "lb_port": *i.Listener.LoadBalancerPort, - "lb_protocol": strings.ToLower(*i.Listener.Protocol), +func flattenListenerDescriptions(apiObjects []awstypes.ListenerDescription) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + "instance_port": aws.ToInt32(apiObject.Listener.InstancePort), + "instance_protocol": strings.ToLower(aws.ToString(apiObject.Listener.InstanceProtocol)), + "lb_port": apiObject.Listener.LoadBalancerPort, + "lb_protocol": strings.ToLower(*apiObject.Listener.Protocol), } - // SSLCertificateID is optional, and may be nil - if i.Listener.SSLCertificateId != nil { - l["ssl_certificate_id"] = aws.StringValue(i.Listener.SSLCertificateId) + + if apiObject.Listener.SSLCertificateId != nil { + tfMap["ssl_certificate_id"] = aws.ToString(apiObject.Listener.SSLCertificateId) } - result = append(result, l) + + tfList = append(tfList, tfMap) } - return result + + return tfList } -// Takes the result of flatmap.Expand for an array of policy attributes and -// returns ELB API compatible objects -func ExpandPolicyAttributes(configured []interface{}) []*elb.PolicyAttribute { - attributes := make([]*elb.PolicyAttribute, 0, len(configured)) +func expandPolicyAttributes(tfList []interface{}) []awstypes.PolicyAttribute { + apiObjects := make([]awstypes.PolicyAttribute, 0) - // Loop over our configured attributes and create - // an array of aws-sdk-go compatible objects - for _, lRaw := range configured { - data := lRaw.(map[string]interface{}) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } - a := &elb.PolicyAttribute{ - AttributeName: aws.String(data[names.AttrName].(string)), - AttributeValue: aws.String(data[names.AttrValue].(string)), + apiObject := awstypes.PolicyAttribute{ + AttributeName: aws.String(tfMap[names.AttrName].(string)), + AttributeValue: aws.String(tfMap[names.AttrValue].(string)), } - attributes = append(attributes, a) + apiObjects = append(apiObjects, apiObject) } - return attributes + return apiObjects } -// Flattens an array of PolicyAttributes into a []interface{} -func FlattenPolicyAttributes(list []*elb.PolicyAttributeDescription) []interface{} { - var attributes []interface{} - - for _, attrdef := range list { - if attrdef == nil { - continue - } +func flattenPolicyAttributeDescriptions(apiObjects []awstypes.PolicyAttributeDescription) []interface{} { + var tfList []interface{} - attribute := map[string]string{ - names.AttrName: aws.StringValue(attrdef.AttributeName), - names.AttrValue: aws.StringValue(attrdef.AttributeValue), + for _, apiObject := range apiObjects { + tfMap := map[string]string{ + names.AttrName: aws.ToString(apiObject.AttributeName), + names.AttrValue: aws.ToString(apiObject.AttributeValue), } - attributes = append(attributes, attribute) + tfList = append(tfList, tfMap) } - return attributes + return tfList } diff --git a/internal/service/elb/flex_test.go b/internal/service/elb/flex_test.go deleted file mode 100644 index 192ffbb9649..00000000000 --- a/internal/service/elb/flex_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package elb_test - -import ( - "reflect" - "strings" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform-provider-aws/internal/acctest" - tfelb "github.com/hashicorp/terraform-provider-aws/internal/service/elb" - "github.com/hashicorp/terraform-provider-aws/names" -) - -func TestExpandListeners(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - "instance_port": 8000, - "lb_port": 80, - "instance_protocol": "http", - "lb_protocol": "http", - }, - map[string]interface{}{ - "instance_port": 8000, - "lb_port": 80, - "instance_protocol": "https", - "lb_protocol": "https", - "ssl_certificate_id": "something", - }, - } - listeners, err := tfelb.ExpandListeners(expanded) - if err != nil { - t.Fatalf("bad: %#v", err) - } - - expected := &elb.Listener{ - InstancePort: aws.Int64(8000), - LoadBalancerPort: aws.Int64(80), - InstanceProtocol: aws.String("http"), - Protocol: aws.String("http"), - } - - if !reflect.DeepEqual(listeners[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - listeners[0], - expected) - } -} - -// this test should produce an error from expandlisteners on an invalid -// combination -func TestExpandListeners_invalid(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - "instance_port": 8000, - "lb_port": 80, - "instance_protocol": "http", - "lb_protocol": "http", - "ssl_certificate_id": "something", - }, - } - _, err := tfelb.ExpandListeners(expanded) - if err != nil { - // Check the error we got - if !strings.Contains(err.Error(), `"ssl_certificate_id" may be set only when "protocol"`) { - t.Fatalf("Got error in TestExpandListeners_invalid, but not what we expected: %s", err) - } - } - - if err == nil { - t.Fatalf("Expected TestExpandListeners_invalid to fail, but passed") - } -} - -func TestFlattenHealthCheck(t *testing.T) { - t.Parallel() - - cases := []struct { - Input *elb.HealthCheck - Output []map[string]interface{} - }{ - { - Input: &elb.HealthCheck{ - UnhealthyThreshold: aws.Int64(10), - HealthyThreshold: aws.Int64(10), - Target: aws.String("HTTP:80/"), - Timeout: aws.Int64(30), - Interval: aws.Int64(30), - }, - Output: []map[string]interface{}{ - { - "unhealthy_threshold": int64(10), - "healthy_threshold": int64(10), - names.AttrTarget: "HTTP:80/", - names.AttrTimeout: int64(30), - names.AttrInterval: int64(30), - }, - }, - }, - } - - for _, tc := range cases { - output := tfelb.FlattenHealthCheck(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} - -func TestExpandInstanceString(t *testing.T) { - t.Parallel() - - expected := []*elb.Instance{ - {InstanceId: aws.String("test-one")}, - {InstanceId: aws.String("test-two")}, - } - - ids := []interface{}{ - "test-one", - "test-two", - } - - expanded := tfelb.ExpandInstanceString(ids) - - if !reflect.DeepEqual(expanded, expected) { - t.Fatalf("Expand Instance String output did not match.\nGot:\n%#v\n\nexpected:\n%#v", expanded, expected) - } -} - -func TestExpandPolicyAttributes(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - names.AttrName: "Protocol-TLSv1", - names.AttrValue: acctest.CtFalse, - }, - map[string]interface{}{ - names.AttrName: "Protocol-TLSv1.1", - names.AttrValue: acctest.CtFalse, - }, - map[string]interface{}{ - names.AttrName: "Protocol-TLSv1.2", - names.AttrValue: acctest.CtTrue, - }, - } - attributes := tfelb.ExpandPolicyAttributes(expanded) - - if len(attributes) != 3 { - t.Fatalf("expected number of attributes to be 3, but got %d", len(attributes)) - } - - expected := &elb.PolicyAttribute{ - AttributeName: aws.String("Protocol-TLSv1.2"), - AttributeValue: aws.String(acctest.CtTrue), - } - - if !reflect.DeepEqual(attributes[2], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - attributes[2], - expected) - } -} - -func TestExpandPolicyAttributes_empty(t *testing.T) { - t.Parallel() - - var expanded []interface{} - - attributes := tfelb.ExpandPolicyAttributes(expanded) - - if len(attributes) != 0 { - t.Fatalf("expected number of attributes to be 0, but got %d", len(attributes)) - } -} - -func TestExpandPolicyAttributes_invalid(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - names.AttrName: "Protocol-TLSv1.2", - names.AttrValue: acctest.CtTrue, - }, - } - attributes := tfelb.ExpandPolicyAttributes(expanded) - - expected := &elb.PolicyAttribute{ - AttributeName: aws.String("Protocol-TLSv1.2"), - AttributeValue: aws.String(acctest.CtFalse), - } - - if reflect.DeepEqual(attributes[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - attributes[0], - expected) - } -} - -func TestFlattenPolicyAttributes(t *testing.T) { - t.Parallel() - - cases := []struct { - Input []*elb.PolicyAttributeDescription - Output []interface{} - }{ - { - Input: []*elb.PolicyAttributeDescription{ - { - AttributeName: aws.String("Protocol-TLSv1.2"), - AttributeValue: aws.String(acctest.CtTrue), - }, - }, - Output: []interface{}{ - map[string]string{ - names.AttrName: "Protocol-TLSv1.2", - names.AttrValue: acctest.CtTrue, - }, - }, - }, - } - - for _, tc := range cases { - output := tfelb.FlattenPolicyAttributes(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } -} diff --git a/internal/service/elb/generate.go b/internal/service/elb/generate.go index 00321adf62d..6cc54c3c562 100644 --- a/internal/service/elb/generate.go +++ b/internal/service/elb/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=DescribeTags -ListTagsInIDElem=LoadBalancerNames -ListTagsInIDNeedSlice=yes -ListTagsOutTagsElem=TagDescriptions[0].Tags -ServiceTagsSlice -TagOp=AddTags -TagInIDElem=LoadBalancerNames -TagInIDNeedSlice=yes -TagKeyType=TagKeyOnly -UntagOp=RemoveTags -UntagInNeedTagKeyType=yes -UntagInTagsElem=Tags -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ListTagsOp=DescribeTags -ListTagsInIDElem=LoadBalancerNames -ListTagsInIDNeedValueSlice=yes -ListTagsOutTagsElem=TagDescriptions[0].Tags -ServiceTagsSlice -TagOp=AddTags -TagInIDElem=LoadBalancerNames -TagInIDNeedValueSlice=yes -TagKeyType=TagKeyOnly -UntagOp=RemoveTags -UntagInNeedTagKeyType=yes -UntagInTagsElem=Tags -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/elb/hosted_zone_id_data_source.go b/internal/service/elb/hosted_zone_id_data_source.go index e78d6e03f4b..f8ccc2e945e 100644 --- a/internal/service/elb/hosted_zone_id_data_source.go +++ b/internal/service/elb/hosted_zone_id_data_source.go @@ -6,7 +6,6 @@ package elb import ( "context" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -14,46 +13,45 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// See https://docs.aws.amazon.com/general/latest/gr/elb.html#elb_region - -var HostedZoneIdPerRegionMap = map[string]string{ - endpoints.AfSouth1RegionID: "Z268VQBMOI5EKX", - endpoints.ApEast1RegionID: "Z3DQVH9N71FHZ0", - endpoints.ApNortheast1RegionID: "Z14GRHDCWA56QT", - endpoints.ApNortheast2RegionID: "ZWKZPGTI48KDX", - endpoints.ApNortheast3RegionID: "Z5LXEXXYW11ES", - endpoints.ApSouth1RegionID: "ZP97RAFLXTNZK", - endpoints.ApSouth2RegionID: "Z0173938T07WNTVAEPZN", - endpoints.ApSoutheast1RegionID: "Z1LMS91P8CMLE5", - endpoints.ApSoutheast2RegionID: "Z1GM3OXH4ZPM65", - endpoints.ApSoutheast3RegionID: "Z08888821HLRG5A9ZRTER", - endpoints.ApSoutheast4RegionID: "Z09517862IB2WZLPXG76F", - endpoints.CaCentral1RegionID: "ZQSVJUPU6J1EY", - endpoints.CaWest1RegionID: "Z06473681N0SF6OS049SD", - endpoints.CnNorth1RegionID: "Z1GDH35T77C1KE", - endpoints.CnNorthwest1RegionID: "ZM7IZAIOVVDZF", - endpoints.EuCentral1RegionID: "Z215JYRZR1TBD5", - endpoints.EuCentral2RegionID: "Z06391101F2ZOEP8P5EB3", - endpoints.EuNorth1RegionID: "Z23TAZ6LKFMNIO", - endpoints.EuSouth1RegionID: "Z3ULH7SSC9OV64", - endpoints.EuSouth2RegionID: "Z0956581394HF5D5LXGAP", - endpoints.EuWest1RegionID: "Z32O12XQLNTSW2", - endpoints.EuWest2RegionID: "ZHURV8PSTC4K8", - endpoints.EuWest3RegionID: "Z3Q77PNBQS71R4", - endpoints.IlCentral1RegionID: "Z09170902867EHPV2DABU", - endpoints.MeCentral1RegionID: "Z08230872XQRWHG2XF6I", - endpoints.MeSouth1RegionID: "ZS929ML54UICD", - endpoints.SaEast1RegionID: "Z2P70J7HTTTPLU", - endpoints.UsEast1RegionID: "Z35SXDOTRQ7X7K", - endpoints.UsEast2RegionID: "Z3AADJGX6KTTL2", - endpoints.UsGovEast1RegionID: "Z166TLBEWOO7G0", - endpoints.UsGovWest1RegionID: "Z33AYJ8TM3BH4J", - endpoints.UsWest1RegionID: "Z368ELLRRE2KJ0", - endpoints.UsWest2RegionID: "Z1H1FL5HABSF5", +// See https://docs.aws.amazon.com/general/latest/gr/elb.html#elb_region. +var hostedZoneIDPerRegionMap = map[string]string{ + names.AFSouth1RegionID: "Z268VQBMOI5EKX", + names.APEast1RegionID: "Z3DQVH9N71FHZ0", + names.APNortheast1RegionID: "Z14GRHDCWA56QT", + names.APNortheast2RegionID: "ZWKZPGTI48KDX", + names.APNortheast3RegionID: "Z5LXEXXYW11ES", + names.APSouth1RegionID: "ZP97RAFLXTNZK", + names.APSouth2RegionID: "Z0173938T07WNTVAEPZN", + names.APSoutheast1RegionID: "Z1LMS91P8CMLE5", + names.APSoutheast2RegionID: "Z1GM3OXH4ZPM65", + names.APSoutheast3RegionID: "Z08888821HLRG5A9ZRTER", + names.APSoutheast4RegionID: "Z09517862IB2WZLPXG76F", + names.CACentral1RegionID: "ZQSVJUPU6J1EY", + names.CAWest1RegionID: "Z06473681N0SF6OS049SD", + names.CNNorth1RegionID: "Z1GDH35T77C1KE", + names.CNNorthwest1RegionID: "ZM7IZAIOVVDZF", + names.EUCentral1RegionID: "Z215JYRZR1TBD5", + names.EUCentral2RegionID: "Z06391101F2ZOEP8P5EB3", + names.EUNorth1RegionID: "Z23TAZ6LKFMNIO", + names.EUSouth1RegionID: "Z3ULH7SSC9OV64", + names.EUSouth2RegionID: "Z0956581394HF5D5LXGAP", + names.EUWest1RegionID: "Z32O12XQLNTSW2", + names.EUWest2RegionID: "ZHURV8PSTC4K8", + names.EUWest3RegionID: "Z3Q77PNBQS71R4", + names.ILCentral1RegionID: "Z09170902867EHPV2DABU", + names.MECentral1RegionID: "Z08230872XQRWHG2XF6I", + names.MESouth1RegionID: "ZS929ML54UICD", + names.SAEast1RegionID: "Z2P70J7HTTTPLU", + names.USEast1RegionID: "Z35SXDOTRQ7X7K", + names.USEast2RegionID: "Z3AADJGX6KTTL2", + names.USGovEast1RegionID: "Z166TLBEWOO7G0", + names.USGovWest1RegionID: "Z33AYJ8TM3BH4J", + names.USWest1RegionID: "Z368ELLRRE2KJ0", + names.USWest2RegionID: "Z1H1FL5HABSF5", } -// @SDKDataSource("aws_elb_hosted_zone_id") -func DataSourceHostedZoneID() *schema.Resource { +// @SDKDataSource("aws_elb_hosted_zone_id", name="Hosted Zone ID") +func dataSourceHostedZoneID() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceHostedZoneIDRead, @@ -68,15 +66,16 @@ func DataSourceHostedZoneID() *schema.Resource { func dataSourceHostedZoneIDRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + region := meta.(*conns.AWSClient).Region if v, ok := d.GetOk(names.AttrRegion); ok { region = v.(string) } - if zoneId, ok := HostedZoneIdPerRegionMap[region]; ok { - d.SetId(zoneId) + if v, ok := hostedZoneIDPerRegionMap[region]; ok { + d.SetId(v) return diags } - return sdkdiag.AppendErrorf(diags, "Unknown region (%q)", region) + return sdkdiag.AppendErrorf(diags, "unsupported AWS Region: %s", region) } diff --git a/internal/service/elb/hosted_zone_id_data_source_test.go b/internal/service/elb/hosted_zone_id_data_source_test.go index 4c47eac14e8..2028b54da8f 100644 --- a/internal/service/elb/hosted_zone_id_data_source_test.go +++ b/internal/service/elb/hosted_zone_id_data_source_test.go @@ -22,7 +22,7 @@ func TestAccELBHostedZoneIDDataSource_basic(t *testing.T) { { Config: testAccHostedZoneIDDataSourceConfig_basic, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_elb_hosted_zone_id.main", names.AttrID, tfelb.HostedZoneIdPerRegionMap[acctest.Region()]), + resource.TestCheckResourceAttr("data.aws_elb_hosted_zone_id.main", names.AttrID, tfelb.HostedZoneIDPerRegionMap[acctest.Region()]), ), }, { diff --git a/internal/service/elb/lb_cookie_stickiness_policy.go b/internal/service/elb/lb_cookie_stickiness_policy.go index 105fce4de73..aa64a9b0095 100644 --- a/internal/service/elb/lb_cookie_stickiness_policy.go +++ b/internal/service/elb/lb_cookie_stickiness_policy.go @@ -10,19 +10,21 @@ import ( "strconv" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_lb_cookie_stickiness_policy") -func ResourceCookieStickinessPolicy() *schema.Resource { +// @SDKResource("aws_lb_cookie_stickiness_policy", name="LB Cookie Stickiness Policy") +func resourceCookieStickinessPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCookieStickinessPolicyCreate, ReadWithoutTimeout: resourceCookieStickinessPolicyRead, @@ -56,14 +58,14 @@ func ResourceCookieStickinessPolicy() *schema.Resource { func resourceCookieStickinessPolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) lbName := d.Get("load_balancer").(string) lbPort := d.Get("lb_port").(int) policyName := d.Get(names.AttrName).(string) - id := LBCookieStickinessPolicyCreateResourceID(lbName, lbPort, policyName) + id := lbCookieStickinessPolicyCreateResourceID(lbName, lbPort, policyName) { - input := &elb.CreateLBCookieStickinessPolicyInput{ + input := &elasticloadbalancing.CreateLBCookieStickinessPolicyInput{ LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), } @@ -72,7 +74,7 @@ func resourceCookieStickinessPolicyCreate(ctx context.Context, d *schema.Resourc input.CookieExpirationPeriod = aws.Int64(int64(v.(int))) } - _, err := conn.CreateLBCookieStickinessPolicyWithContext(ctx, input) + _, err := conn.CreateLBCookieStickinessPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating ELB Classic LB Cookie Stickiness Policy (%s): %s", id, err) @@ -80,13 +82,13 @@ func resourceCookieStickinessPolicyCreate(ctx context.Context, d *schema.Resourc } { - input := &elb.SetLoadBalancerPoliciesOfListenerInput{ + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(int64(lbPort)), - PolicyNames: aws.StringSlice([]string{policyName}), + LoadBalancerPort: int32(lbPort), + PolicyNames: []string{policyName}, } - _, err := conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, input) + _, err := conn.SetLoadBalancerPoliciesOfListener(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic LB Cookie Stickiness Policy (%s): %s", id, err) @@ -100,15 +102,14 @@ func resourceCookieStickinessPolicyCreate(ctx context.Context, d *schema.Resourc func resourceCookieStickinessPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, lbPort, policyName, err := LBCookieStickinessPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, lbPort, policyName, err := lbCookieStickinessPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - policy, err := FindLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) + policy, err := findLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELB Classic LB Cookie Stickiness Policy (%s) not found, removing from state", d.Id()) @@ -120,14 +121,11 @@ func resourceCookieStickinessPolicyRead(ctx context.Context, d *schema.ResourceD return sdkdiag.AppendErrorf(diags, "reading ELB Classic LB Cookie Stickiness Policy (%s): %s", d.Id(), err) } - if len(policy.PolicyAttributeDescriptions) != 1 || aws.StringValue(policy.PolicyAttributeDescriptions[0].AttributeName) != "CookieExpirationPeriod" { + if len(policy.PolicyAttributeDescriptions) != 1 || aws.ToString(policy.PolicyAttributeDescriptions[0].AttributeName) != "CookieExpirationPeriod" { return sdkdiag.AppendErrorf(diags, "cookie expiration period not found") } - if v, err := strconv.Atoi(aws.StringValue(policy.PolicyAttributeDescriptions[0].AttributeValue)); err != nil { - return sdkdiag.AppendErrorf(diags, "parsing cookie expiration period: %s", err) - } else { - d.Set("cookie_expiration_period", v) - } + + d.Set("cookie_expiration_period", flex.StringToIntValue(policy.PolicyAttributeDescriptions[0].AttributeValue)) d.Set("lb_port", lbPort) d.Set("load_balancer", lbName) d.Set(names.AttrName, policyName) @@ -137,35 +135,42 @@ func resourceCookieStickinessPolicyRead(ctx context.Context, d *schema.ResourceD func resourceCookieStickinessPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, lbPort, policyName, err := LBCookieStickinessPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, lbPort, policyName, err := lbCookieStickinessPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } // Perversely, if we Set an empty list of PolicyNames, we detach the // policies attached to a listener, which is required to delete the // policy itself. - input := &elb.SetLoadBalancerPoliciesOfListenerInput{ + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(int64(lbPort)), - PolicyNames: aws.StringSlice([]string{}), + LoadBalancerPort: int32(lbPort), + PolicyNames: []string{}, } - _, err = conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, input) + _, err = conn.SetLoadBalancerPoliciesOfListener(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeLoadBalancerNotFound) { + return diags + } if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic LB Cookie Stickiness Policy (%s): %s", d.Id(), err) } log.Printf("[DEBUG] Deleting ELB Classic LB Cookie Stickiness Policy: %s", d.Id()) - _, err = conn.DeleteLoadBalancerPolicyWithContext(ctx, &elb.DeleteLoadBalancerPolicyInput{ + _, err = conn.DeleteLoadBalancerPolicy(ctx, &elasticloadbalancing.DeleteLoadBalancerPolicyInput{ LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), }) + if tfawserr.ErrCodeEquals(err, errCodeLoadBalancerNotFound) { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ELB Classic LB Cookie Stickiness Policy (%s): %s", d.Id(), err) } @@ -175,14 +180,14 @@ func resourceCookieStickinessPolicyDelete(ctx context.Context, d *schema.Resourc const lbCookieStickinessPolicyResourceIDSeparator = ":" -func LBCookieStickinessPolicyCreateResourceID(lbName string, lbPort int, policyName string) string { +func lbCookieStickinessPolicyCreateResourceID(lbName string, lbPort int, policyName string) string { parts := []string{lbName, strconv.Itoa(lbPort), policyName} id := strings.Join(parts, lbCookieStickinessPolicyResourceIDSeparator) return id } -func LBCookieStickinessPolicyParseResourceID(id string) (string, int, string, error) { +func lbCookieStickinessPolicyParseResourceID(id string) (string, int, string, error) { parts := strings.Split(id, lbCookieStickinessPolicyResourceIDSeparator) if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { diff --git a/internal/service/elb/lb_cookie_stickiness_policy_test.go b/internal/service/elb/lb_cookie_stickiness_policy_test.go index abcca5c86e4..3caec2d223d 100644 --- a/internal/service/elb/lb_cookie_stickiness_policy_test.go +++ b/internal/service/elb/lb_cookie_stickiness_policy_test.go @@ -98,7 +98,7 @@ func TestAccELBCookieStickinessPolicy_Disappears_elb(t *testing.T) { func testAccCheckLBCookieStickinessPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lb_cookie_stickiness_policy" { @@ -106,7 +106,6 @@ func testAccCheckLBCookieStickinessPolicyDestroy(ctx context.Context) resource.T } lbName, lbPort, policyName, err := tfelb.LBCookieStickinessPolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } @@ -135,17 +134,12 @@ func testAccCheckLBCookieStickinessPolicyExists(ctx context.Context, n string) r return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ELB Classic LB Cookie Stickiness Policy ID is set") - } - lbName, lbPort, policyName, err := tfelb.LBCookieStickinessPolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) _, err = tfelb.FindLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) diff --git a/internal/service/elb/lb_ssl_negotiation_policy.go b/internal/service/elb/lb_ssl_negotiation_policy.go index cd03716c743..c4511264e74 100644 --- a/internal/service/elb/lb_ssl_negotiation_policy.go +++ b/internal/service/elb/lb_ssl_negotiation_policy.go @@ -10,8 +10,9 @@ import ( "strconv" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -20,8 +21,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_lb_ssl_negotiation_policy") -func ResourceSSLNegotiationPolicy() *schema.Resource { +// @SDKResource("aws_lb_ssl_negotiation_policy", name="SSL Negotiation Policy") +func resourceSSLNegotiationPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceSSLNegotiationPolicyCreate, ReadWithoutTimeout: resourceSSLNegotiationPolicyRead, @@ -72,25 +73,25 @@ func ResourceSSLNegotiationPolicy() *schema.Resource { func resourceSSLNegotiationPolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) lbName := d.Get("load_balancer").(string) lbPort := d.Get("lb_port").(int) policyName := d.Get(names.AttrName).(string) - id := SSLNegotiationPolicyCreateResourceID(lbName, lbPort, policyName) + id := sslNegotiationPolicyCreateResourceID(lbName, lbPort, policyName) { - input := &elb.CreateLoadBalancerPolicyInput{ + input := &elasticloadbalancing.CreateLoadBalancerPolicyInput{ LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), PolicyTypeName: aws.String("SSLNegotiationPolicyType"), } if v, ok := d.GetOk("attribute"); ok && v.(*schema.Set).Len() > 0 { - input.PolicyAttributes = ExpandPolicyAttributes(v.(*schema.Set).List()) + input.PolicyAttributes = expandPolicyAttributes(v.(*schema.Set).List()) } - _, err := conn.CreateLoadBalancerPolicyWithContext(ctx, input) + _, err := conn.CreateLoadBalancerPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating ELB Classic SSL Negotiation Policy (%s): %s", id, err) @@ -98,13 +99,13 @@ func resourceSSLNegotiationPolicyCreate(ctx context.Context, d *schema.ResourceD } { - input := &elb.SetLoadBalancerPoliciesOfListenerInput{ + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(int64(lbPort)), - PolicyNames: aws.StringSlice([]string{policyName}), + LoadBalancerPort: int32(lbPort), + PolicyNames: []string{policyName}, } - _, err := conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, input) + _, err := conn.SetLoadBalancerPoliciesOfListener(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic SSL Negotiation Policy (%s): %s", id, err) @@ -118,15 +119,14 @@ func resourceSSLNegotiationPolicyCreate(ctx context.Context, d *schema.ResourceD func resourceSSLNegotiationPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, lbPort, policyName, err := SSLNegotiationPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, lbPort, policyName, err := sslNegotiationPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - _, err = FindLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) + _, err = findLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELB Classic SSL Negotiation Policy (%s) not found, removing from state", d.Id()) @@ -162,34 +162,41 @@ func resourceSSLNegotiationPolicyRead(ctx context.Context, d *schema.ResourceDat func resourceSSLNegotiationPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, lbPort, policyName, err := SSLNegotiationPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, lbPort, policyName, err := sslNegotiationPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } // Perversely, if we Set an empty list of PolicyNames, we detach the // policies attached to a listener, which is required to delete the // policy itself. - input := &elb.SetLoadBalancerPoliciesOfListenerInput{ + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(int64(lbPort)), - PolicyNames: aws.StringSlice([]string{}), + LoadBalancerPort: int32(lbPort), + PolicyNames: []string{}, } - _, err = conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, input) + _, err = conn.SetLoadBalancerPoliciesOfListener(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeLoadBalancerNotFound) { + return diags + } if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic SSL Negotiation Policy (%s): %s", d.Id(), err) } - _, err = conn.DeleteLoadBalancerPolicyWithContext(ctx, &elb.DeleteLoadBalancerPolicyInput{ + _, err = conn.DeleteLoadBalancerPolicy(ctx, &elasticloadbalancing.DeleteLoadBalancerPolicyInput{ LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), }) + if tfawserr.ErrCodeEquals(err, errCodeLoadBalancerNotFound) { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ELB Classic SSL Negotiation Policy (%s): %s", d.Id(), err) } @@ -199,14 +206,14 @@ func resourceSSLNegotiationPolicyDelete(ctx context.Context, d *schema.ResourceD const sslNegotiationPolicyResourceIDSeparator = ":" -func SSLNegotiationPolicyCreateResourceID(lbName string, lbPort int, policyName string) string { +func sslNegotiationPolicyCreateResourceID(lbName string, lbPort int, policyName string) string { parts := []string{lbName, strconv.Itoa(lbPort), policyName} id := strings.Join(parts, sslNegotiationPolicyResourceIDSeparator) return id } -func SSLNegotiationPolicyParseResourceID(id string) (string, int, string, error) { +func sslNegotiationPolicyParseResourceID(id string) (string, int, string, error) { parts := strings.Split(id, sslNegotiationPolicyResourceIDSeparator) if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { diff --git a/internal/service/elb/lb_ssl_negotiation_policy_test.go b/internal/service/elb/lb_ssl_negotiation_policy_test.go index 449cfdbc2b6..ac47ccabbdc 100644 --- a/internal/service/elb/lb_ssl_negotiation_policy_test.go +++ b/internal/service/elb/lb_ssl_negotiation_policy_test.go @@ -104,7 +104,7 @@ func TestAccELBSSLNegotiationPolicy_disappears(t *testing.T) { func testAccCheckLBSSLNegotiationPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lb_ssl_negotiation_policy" { @@ -112,7 +112,6 @@ func testAccCheckLBSSLNegotiationPolicyDestroy(ctx context.Context) resource.Tes } lbName, lbPort, policyName, err := tfelb.SSLNegotiationPolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } @@ -141,17 +140,12 @@ func testAccCheckLBSSLNegotiationPolicy(ctx context.Context, n string) resource. return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ELB Classic SSL Negotiation Policy ID is set") - } - lbName, lbPort, policyName, err := tfelb.SSLNegotiationPolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) _, err = tfelb.FindLoadBalancerListenerPolicyByThreePartKey(ctx, conn, lbName, lbPort, policyName) @@ -165,6 +159,10 @@ resource "aws_iam_server_certificate" "test" { name = %[1]q certificate_body = "%[2]s" private_key = "%[3]s" + + timeouts { + delete = "30m" + } } resource "aws_elb" "test" { @@ -230,6 +228,10 @@ resource "aws_iam_server_certificate" "test" { name_prefix = %[1]q certificate_body = "%[2]s" private_key = "%[3]s" + + timeouts { + delete = "30m" + } } resource "aws_elb" "test" { diff --git a/internal/service/elb/listener_policy.go b/internal/service/elb/listener_policy.go index 8ff47c4f2dc..bde97f4c07f 100644 --- a/internal/service/elb/listener_policy.go +++ b/internal/service/elb/listener_policy.go @@ -10,8 +10,9 @@ import ( "strconv" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -21,8 +22,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_load_balancer_listener_policy") -func ResourceListenerPolicy() *schema.Resource { +// @SDKResource("aws_load_balancer_listener_policy", name="Listener Policy") +func resourceListenerPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceListenerPolicySet, ReadWithoutTimeout: resourceListenerPolicyRead, @@ -54,42 +55,43 @@ func ResourceListenerPolicy() *schema.Resource { func resourceListenerPolicySet(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) lbName := d.Get("load_balancer_name").(string) lbPort := d.Get("load_balancer_port").(int) - id := ListenerPolicyCreateResourceID(lbName, lbPort) - input := &elb.SetLoadBalancerPoliciesOfListenerInput{ + id := listenerPolicyCreateResourceID(lbName, lbPort) + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(int64(lbPort)), + LoadBalancerPort: int32(lbPort), } if v, ok := d.GetOk("policy_names"); ok && v.(*schema.Set).Len() > 0 { - input.PolicyNames = flex.ExpandStringSet(v.(*schema.Set)) + input.PolicyNames = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, input) + _, err := conn.SetLoadBalancerPoliciesOfListener(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic Listener Policy (%s): %s", id, err) } - d.SetId(id) + if d.IsNewResource() { + d.SetId(id) + } return append(diags, resourceListenerPolicyRead(ctx, d, meta)...) } func resourceListenerPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, lbPort, err := ListenerPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, lbPort, err := listenerPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - policyNames, err := FindLoadBalancerListenerPolicyByTwoPartKey(ctx, conn, lbName, lbPort) + policyNames, err := findLoadBalancerListenerPolicyByTwoPartKey(ctx, conn, lbName, lbPort) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELB Classic Listener Policy (%s) not found, removing from state", d.Id()) @@ -110,22 +112,25 @@ func resourceListenerPolicyRead(ctx context.Context, d *schema.ResourceData, met func resourceListenerPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, lbPort, err := ListenerPolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, lbPort, err := listenerPolicyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - input := &elb.SetLoadBalancerPoliciesOfListenerInput{ + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ LoadBalancerName: aws.String(lbName), - LoadBalancerPort: aws.Int64(int64(lbPort)), - PolicyNames: aws.StringSlice([]string{}), + LoadBalancerPort: int32(lbPort), + PolicyNames: []string{}, } log.Printf("[DEBUG] Deleting ELB Classic Listener Policy: %s", d.Id()) - _, err = conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, input) + _, err = conn.SetLoadBalancerPoliciesOfListener(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeLoadBalancerNotFound) { + return diags + } if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELB Classic Listener Policy (%s): %s", d.Id(), err) @@ -134,8 +139,8 @@ func resourceListenerPolicyDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func FindLoadBalancerListenerPolicyByTwoPartKey(ctx context.Context, conn *elb.ELB, lbName string, lbPort int) ([]string, error) { - lb, err := FindLoadBalancerByName(ctx, conn, lbName) +func findLoadBalancerListenerPolicyByTwoPartKey(ctx context.Context, conn *elasticloadbalancing.Client, lbName string, lbPort int) ([]string, error) { + lb, err := findLoadBalancerByName(ctx, conn, lbName) if err != nil { return nil, err @@ -144,15 +149,11 @@ func FindLoadBalancerListenerPolicyByTwoPartKey(ctx context.Context, conn *elb.E var policyNames []string for _, v := range lb.ListenerDescriptions { - if v == nil { - continue - } - - if aws.Int64Value(v.Listener.LoadBalancerPort) != int64(lbPort) { + if v.Listener.LoadBalancerPort != int32(lbPort) { continue } - policyNames = append(policyNames, aws.StringValueSlice(v.PolicyNames)...) + policyNames = append(policyNames, v.PolicyNames...) } return policyNames, nil @@ -160,14 +161,14 @@ func FindLoadBalancerListenerPolicyByTwoPartKey(ctx context.Context, conn *elb.E const listenerPolicyResourceIDSeparator = ":" -func ListenerPolicyCreateResourceID(lbName string, lbPort int) string { +func listenerPolicyCreateResourceID(lbName string, lbPort int) string { parts := []string{lbName, strconv.Itoa(lbPort)} id := strings.Join(parts, listenerPolicyResourceIDSeparator) return id } -func ListenerPolicyParseResourceID(id string) (string, int, error) { +func listenerPolicyParseResourceID(id string) (string, int, error) { parts := strings.Split(id, listenerPolicyResourceIDSeparator) if len(parts) == 2 && parts[0] != "" && parts[1] != "" { diff --git a/internal/service/elb/listener_policy_test.go b/internal/service/elb/listener_policy_test.go index 1e6da4456d0..ba8090766a4 100644 --- a/internal/service/elb/listener_policy_test.go +++ b/internal/service/elb/listener_policy_test.go @@ -106,7 +106,7 @@ func TestAccELBListenerPolicy_disappears(t *testing.T) { func testAccCheckListenerPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_load_balancer_listener_policy" { @@ -153,7 +153,7 @@ func testAccCheckListenerPolicyExists(ctx context.Context, n string) resource.Te return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) _, err = tfelb.FindLoadBalancerListenerPolicyByTwoPartKey(ctx, conn, lbName, lbPort) @@ -204,6 +204,10 @@ resource "aws_iam_server_certificate" "test" { name_prefix = %[1]q certificate_body = "%[2]s" private_key = "%[3]s" + + timeouts { + delete = "30m" + } } resource "aws_elb" "test" { diff --git a/internal/service/elb/load_balancer.go b/internal/service/elb/load_balancer.go index 5fef81e88b4..3b7f02d9263 100644 --- a/internal/service/elb/load_balancer.go +++ b/internal/service/elb/load_balancer.go @@ -14,11 +14,11 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "time" "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -26,6 +26,7 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" @@ -37,7 +38,7 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports // @SDKResource("aws_elb", name="Classic Load Balancer") // @Tags(identifierAttribute="id") -func ResourceLoadBalancer() *schema.Resource { +func resourceLoadBalancer() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLoadBalancerCreate, ReadWithoutTimeout: resourceLoadBalancerRead, @@ -90,7 +91,7 @@ func ResourceLoadBalancer() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 60, - ValidateFunc: ValidAccessLogsInterval, + ValidateFunc: validAccessLogsInterval, }, }, }, @@ -154,7 +155,7 @@ func ResourceLoadBalancer() *schema.Resource { names.AttrTarget: { Type: schema.TypeString, Required: true, - ValidateFunc: ValidHeathCheckTarget, + ValidateFunc: validHeathCheckTarget, }, names.AttrTimeout: { Type: schema.TypeInt, @@ -219,7 +220,7 @@ func ResourceLoadBalancer() *schema.Resource { }, }, }, - Set: ListenerHash, + Set: listenerHash, }, names.AttrName: { Type: schema.TypeString, @@ -227,7 +228,7 @@ func ResourceLoadBalancer() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{names.AttrNamePrefix}, - ValidateFunc: ValidName, + ValidateFunc: validName, }, names.AttrNamePrefix: { Type: schema.TypeString, @@ -270,26 +271,26 @@ func ResourceLoadBalancer() *schema.Resource { func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) - listeners, err := ExpandListeners(d.Get("listener").(*schema.Set).List()) + listeners, err := expandListeners(d.Get("listener").(*schema.Set).List()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - elbName := create.NewNameGenerator( + lbName := create.NewNameGenerator( create.WithConfiguredName(d.Get(names.AttrName).(string)), create.WithConfiguredPrefix(d.Get(names.AttrNamePrefix).(string)), create.WithDefaultPrefix("tf-lb-"), ).Generate() - input := &elb.CreateLoadBalancerInput{ + input := &elasticloadbalancing.CreateLoadBalancerInput{ Listeners: listeners, - LoadBalancerName: aws.String(elbName), + LoadBalancerName: aws.String(lbName), Tags: getTagsIn(ctx), } if v, ok := d.GetOk(names.AttrAvailabilityZones); ok && v.(*schema.Set).Len() > 0 { - input.AvailabilityZones = flex.ExpandStringSet(v.(*schema.Set)) + input.AvailabilityZones = flex.ExpandStringValueSet(v.(*schema.Set)) } if _, ok := d.GetOk("internal"); ok { @@ -297,31 +298,31 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met } if v, ok := d.GetOk(names.AttrSecurityGroups); ok && v.(*schema.Set).Len() > 0 { - input.SecurityGroups = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroups = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk(names.AttrSubnets); ok && v.(*schema.Set).Len() > 0 { - input.Subnets = flex.ExpandStringSet(v.(*schema.Set)) + input.Subnets = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.CreateLoadBalancerWithContext(ctx, input) - }, elb.ErrCodeCertificateNotFoundException) + _, err = tfresource.RetryWhenIsA[*awstypes.CertificateNotFoundException](ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { + return conn.CreateLoadBalancer(ctx, input) + }) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating ELB Classic Load Balancer (%s): %s", elbName, err) + return sdkdiag.AppendErrorf(diags, "creating ELB Classic Load Balancer (%s): %s", lbName, err) } - d.SetId(elbName) + d.SetId(lbName) return append(diags, resourceLoadBalancerUpdate(ctx, d, meta)...) } func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) - lb, err := FindLoadBalancerByName(ctx, conn, d.Id()) + lb, err := findLoadBalancerByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELB Classic Load Balancer (%s) not found, removing from state", d.Id()) @@ -344,10 +345,10 @@ func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta Region: meta.(*conns.AWSClient).Region, Service: "elasticloadbalancing", AccountID: meta.(*conns.AWSClient).AccountID, - Resource: fmt.Sprintf("loadbalancer/%s", d.Id()), + Resource: "loadbalancer/" + d.Id(), } d.Set(names.AttrARN, arn.String()) - d.Set(names.AttrAvailabilityZones, flex.FlattenStringList(lb.AvailabilityZones)) + d.Set(names.AttrAvailabilityZones, lb.AvailabilityZones) d.Set("connection_draining", lbAttrs.ConnectionDraining.Enabled) d.Set("connection_draining_timeout", lbAttrs.ConnectionDraining.Timeout) d.Set("cross_zone_load_balancing", lbAttrs.CrossZoneLoadBalancing.Enabled) @@ -358,26 +359,26 @@ func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta d.Set("instances", flattenInstances(lb.Instances)) var scheme bool if lb.Scheme != nil { - scheme = aws.StringValue(lb.Scheme) == "internal" + scheme = aws.ToString(lb.Scheme) == "internal" } d.Set("internal", scheme) - d.Set("listener", flattenListeners(lb.ListenerDescriptions)) + d.Set("listener", flattenListenerDescriptions(lb.ListenerDescriptions)) d.Set(names.AttrName, lb.LoadBalancerName) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.StringValue(lb.LoadBalancerName))) - d.Set(names.AttrSecurityGroups, flex.FlattenStringList(lb.SecurityGroups)) - d.Set(names.AttrSubnets, flex.FlattenStringList(lb.Subnets)) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(lb.LoadBalancerName))) + d.Set(names.AttrSecurityGroups, lb.SecurityGroups) + d.Set(names.AttrSubnets, lb.Subnets) d.Set("zone_id", lb.CanonicalHostedZoneNameID) if lb.SourceSecurityGroup != nil { group := lb.SourceSecurityGroup.GroupName - if v := aws.StringValue(lb.SourceSecurityGroup.OwnerAlias); v != "" { - group = aws.String(v + "/" + aws.StringValue(lb.SourceSecurityGroup.GroupName)) + if v := aws.ToString(lb.SourceSecurityGroup.OwnerAlias); v != "" { + group = aws.String(v + "/" + aws.ToString(lb.SourceSecurityGroup.GroupName)) } d.Set("source_security_group", group) // Manually look up the ELB Security Group ID, since it's not provided if lb.VPCId != nil { - sg, err := tfec2.FindSecurityGroupByNameAndVPCIDAndOwnerID(ctx, meta.(*conns.AWSClient).EC2Conn(ctx), aws.StringValue(lb.SourceSecurityGroup.GroupName), aws.StringValue(lb.VPCId), aws.StringValue(lb.SourceSecurityGroup.OwnerAlias)) + sg, err := tfec2.FindSecurityGroupByNameAndVPCIDAndOwnerID(ctx, meta.(*conns.AWSClient).EC2Conn(ctx), aws.ToString(lb.SourceSecurityGroup.GroupName), aws.ToString(lb.VPCId), aws.ToString(lb.SourceSecurityGroup.OwnerAlias)) if err != nil { return sdkdiag.AppendErrorf(diags, "reading ELB Classic Load Balancer (%s) security group: %s", d.Id(), err) } else { @@ -401,27 +402,26 @@ func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta // API/state // See https://github.com/hashicorp/terraform/issues/10138 _, n := d.GetChange("access_logs") - elbal := lbAttrs.AccessLog - nl := n.([]interface{}) - if len(nl) == 0 && !aws.BoolValue(elbal.Enabled) { - elbal = nil + accessLog := lbAttrs.AccessLog + if len(n.([]interface{})) == 0 && !accessLog.Enabled { + accessLog = nil } - if err := d.Set("access_logs", flattenAccessLog(elbal)); err != nil { + if err := d.Set("access_logs", flattenAccessLog(accessLog)); err != nil { return sdkdiag.AppendErrorf(diags, "setting access_logs: %s", err) } } for _, attr := range lbAttrs.AdditionalAttributes { - switch aws.StringValue(attr.Key) { - case "elb.http.desyncmitigationmode": + switch aws.ToString(attr.Key) { + case loadBalancerAttributeDesyncMitigationMode: d.Set("desync_mitigation_mode", attr.Value) } } // There's only one health check, so save that to state as we // currently can - if aws.StringValue(lb.HealthCheck.Target) != "" { - if err := d.Set(names.AttrHealthCheck, FlattenHealthCheck(lb.HealthCheck)); err != nil { + if aws.ToString(lb.HealthCheck.Target) != "" { + if err := d.Set(names.AttrHealthCheck, flattenHealthCheck(lb.HealthCheck)); err != nil { return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) } } @@ -431,32 +431,30 @@ func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) if d.HasChange("listener") { o, n := d.GetChange("listener") - os := o.(*schema.Set) - ns := n.(*schema.Set) - - remove, _ := ExpandListeners(os.Difference(ns).List()) - add, err := ExpandListeners(ns.Difference(os).List()) + os, ns := o.(*schema.Set), n.(*schema.Set) + del, _ := expandListeners(os.Difference(ns).List()) + add, err := expandListeners(ns.Difference(os).List()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - if len(remove) > 0 { - ports := make([]*int64, 0, len(remove)) - for _, listener := range remove { + if len(del) > 0 { + ports := make([]int32, 0, len(del)) + for _, listener := range del { ports = append(ports, listener.LoadBalancerPort) } - input := &elb.DeleteLoadBalancerListenersInput{ + input := &elasticloadbalancing.DeleteLoadBalancerListenersInput{ LoadBalancerName: aws.String(d.Id()), LoadBalancerPorts: ports, } - _, err := conn.DeleteLoadBalancerListenersWithContext(ctx, input) + _, err := conn.DeleteLoadBalancerListeners(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ELB Classic Load Balancer (%s) listeners: %s", d.Id(), err) @@ -464,7 +462,7 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met } if len(add) > 0 { - input := &elb.CreateLoadBalancerListenersInput{ + input := &elasticloadbalancing.CreateLoadBalancerListenersInput{ Listeners: add, LoadBalancerName: aws.String(d.Id()), } @@ -473,13 +471,13 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met // other listeners on the ELB. Retry here to eliminate that. _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.CreateLoadBalancerListenersWithContext(ctx, input) + return conn.CreateLoadBalancerListeners(ctx, input) }, func(err error) (bool, error) { - if tfawserr.ErrCodeEquals(err, elb.ErrCodeDuplicateListenerException) { + if errs.IsA[*awstypes.DuplicateListenerException](err) { return true, err } - if tfawserr.ErrMessageContains(err, elb.ErrCodeCertificateNotFoundException, "Server Certificate not found for the key: arn") { + if errs.IsAErrorMessageContains[*awstypes.CertificateNotFoundException](err, "Server Certificate not found for the key: arn") { return true, err } @@ -497,31 +495,29 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met // balancer if d.HasChange("instances") { o, n := d.GetChange("instances") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := ExpandInstanceString(os.Difference(ns).List()) - add := ExpandInstanceString(ns.Difference(os).List()) + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := expandInstances(ns.Difference(os).List()), expandInstances(os.Difference(ns).List()) if len(add) > 0 { - input := &elb.RegisterInstancesWithLoadBalancerInput{ + input := &elasticloadbalancing.RegisterInstancesWithLoadBalancerInput{ Instances: add, LoadBalancerName: aws.String(d.Id()), } - _, err := conn.RegisterInstancesWithLoadBalancerWithContext(ctx, input) + _, err := conn.RegisterInstancesWithLoadBalancer(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "registering ELB Classic Load Balancer (%s) instances: %s", d.Id(), err) } } - if len(remove) > 0 { - input := &elb.DeregisterInstancesFromLoadBalancerInput{ - Instances: remove, + if len(del) > 0 { + input := &elasticloadbalancing.DeregisterInstancesFromLoadBalancerInput{ + Instances: del, LoadBalancerName: aws.String(d.Id()), } - _, err := conn.DeregisterInstancesFromLoadBalancerWithContext(ctx, input) + _, err := conn.DeregisterInstancesFromLoadBalancer(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "deregistering ELB Classic Load Balancer (%s) instances: %s", d.Id(), err) @@ -530,40 +526,40 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met } if d.HasChanges("cross_zone_load_balancing", "idle_timeout", "access_logs", "desync_mitigation_mode") { - input := &elb.ModifyLoadBalancerAttributesInput{ - LoadBalancerAttributes: &elb.LoadBalancerAttributes{ - AdditionalAttributes: []*elb.AdditionalAttribute{ + input := &elasticloadbalancing.ModifyLoadBalancerAttributesInput{ + LoadBalancerAttributes: &awstypes.LoadBalancerAttributes{ + AdditionalAttributes: []awstypes.AdditionalAttribute{ { - Key: aws.String("elb.http.desyncmitigationmode"), + Key: aws.String(loadBalancerAttributeDesyncMitigationMode), Value: aws.String(d.Get("desync_mitigation_mode").(string)), }, }, - CrossZoneLoadBalancing: &elb.CrossZoneLoadBalancing{ - Enabled: aws.Bool(d.Get("cross_zone_load_balancing").(bool)), + CrossZoneLoadBalancing: &awstypes.CrossZoneLoadBalancing{ + Enabled: d.Get("cross_zone_load_balancing").(bool), }, - ConnectionSettings: &elb.ConnectionSettings{ - IdleTimeout: aws.Int64(int64(d.Get("idle_timeout").(int))), + ConnectionSettings: &awstypes.ConnectionSettings{ + IdleTimeout: aws.Int32(int32(d.Get("idle_timeout").(int))), }, }, LoadBalancerName: aws.String(d.Id()), } - if logs := d.Get("access_logs").([]interface{}); len(logs) == 1 { - l := logs[0].(map[string]interface{}) - input.LoadBalancerAttributes.AccessLog = &elb.AccessLog{ - Enabled: aws.Bool(l[names.AttrEnabled].(bool)), - EmitInterval: aws.Int64(int64(l[names.AttrInterval].(int))), - S3BucketName: aws.String(l[names.AttrBucket].(string)), - S3BucketPrefix: aws.String(l[names.AttrBucketPrefix].(string)), + if v := d.Get("access_logs").([]interface{}); len(v) == 1 { + tfMap := v[0].(map[string]interface{}) + input.LoadBalancerAttributes.AccessLog = &awstypes.AccessLog{ + Enabled: tfMap[names.AttrEnabled].(bool), + EmitInterval: aws.Int32(int32(tfMap[names.AttrInterval].(int))), + S3BucketName: aws.String(tfMap[names.AttrBucket].(string)), + S3BucketPrefix: aws.String(tfMap[names.AttrBucketPrefix].(string)), } - } else if len(logs) == 0 { + } else if len(v) == 0 { // disable access logs - input.LoadBalancerAttributes.AccessLog = &elb.AccessLog{ - Enabled: aws.Bool(false), + input.LoadBalancerAttributes.AccessLog = &awstypes.AccessLog{ + Enabled: false, } } - _, err := conn.ModifyLoadBalancerAttributesWithContext(ctx, input) + _, err := conn.ModifyLoadBalancerAttributes(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ELB Classic Load Balancer (%s) attributes: %s", d.Id(), err) @@ -578,17 +574,17 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met // We do timeout changes first since they require us to set draining // to true for a hot second. if d.HasChange("connection_draining_timeout") { - input := &elb.ModifyLoadBalancerAttributesInput{ - LoadBalancerAttributes: &elb.LoadBalancerAttributes{ - ConnectionDraining: &elb.ConnectionDraining{ - Enabled: aws.Bool(true), - Timeout: aws.Int64(int64(d.Get("connection_draining_timeout").(int))), + input := &elasticloadbalancing.ModifyLoadBalancerAttributesInput{ + LoadBalancerAttributes: &awstypes.LoadBalancerAttributes{ + ConnectionDraining: &awstypes.ConnectionDraining{ + Enabled: true, + Timeout: aws.Int32(int32(d.Get("connection_draining_timeout").(int))), }, }, LoadBalancerName: aws.String(d.Id()), } - _, err := conn.ModifyLoadBalancerAttributesWithContext(ctx, input) + _, err := conn.ModifyLoadBalancerAttributes(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ELB Classic Load Balancer (%s) attributes: %s", d.Id(), err) @@ -598,16 +594,16 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met // Then we always set connection draining even if there is no change. // This lets us reset to "false" if requested even with a timeout // change. - input := &elb.ModifyLoadBalancerAttributesInput{ - LoadBalancerAttributes: &elb.LoadBalancerAttributes{ - ConnectionDraining: &elb.ConnectionDraining{ - Enabled: aws.Bool(d.Get("connection_draining").(bool)), + input := &elasticloadbalancing.ModifyLoadBalancerAttributesInput{ + LoadBalancerAttributes: &awstypes.LoadBalancerAttributes{ + ConnectionDraining: &awstypes.ConnectionDraining{ + Enabled: d.Get("connection_draining").(bool), }, }, LoadBalancerName: aws.String(d.Id()), } - _, err := conn.ModifyLoadBalancerAttributesWithContext(ctx, input) + _, err := conn.ModifyLoadBalancerAttributes(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ELB Classic Load Balancer (%s) attributes: %s", d.Id(), err) @@ -615,19 +611,19 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met } if d.HasChange(names.AttrHealthCheck) { - if hc := d.Get(names.AttrHealthCheck).([]interface{}); len(hc) > 0 { - check := hc[0].(map[string]interface{}) - input := &elb.ConfigureHealthCheckInput{ - HealthCheck: &elb.HealthCheck{ - HealthyThreshold: aws.Int64(int64(check["healthy_threshold"].(int))), - Interval: aws.Int64(int64(check[names.AttrInterval].(int))), - Target: aws.String(check[names.AttrTarget].(string)), - Timeout: aws.Int64(int64(check[names.AttrTimeout].(int))), - UnhealthyThreshold: aws.Int64(int64(check["unhealthy_threshold"].(int))), + if v := d.Get(names.AttrHealthCheck).([]interface{}); len(v) > 0 { + tfMap := v[0].(map[string]interface{}) + input := &elasticloadbalancing.ConfigureHealthCheckInput{ + HealthCheck: &awstypes.HealthCheck{ + HealthyThreshold: aws.Int32(int32(tfMap["healthy_threshold"].(int))), + Interval: aws.Int32(int32(tfMap[names.AttrInterval].(int))), + Target: aws.String(tfMap[names.AttrTarget].(string)), + Timeout: aws.Int32(int32(tfMap[names.AttrTimeout].(int))), + UnhealthyThreshold: aws.Int32(int32(tfMap["unhealthy_threshold"].(int))), }, LoadBalancerName: aws.String(d.Id()), } - _, err := conn.ConfigureHealthCheckWithContext(ctx, input) + _, err := conn.ConfigureHealthCheck(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "configuring ELB Classic Load Balancer (%s) health check: %s", d.Id(), err) @@ -636,12 +632,12 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met } if d.HasChange(names.AttrSecurityGroups) { - input := &elb.ApplySecurityGroupsToLoadBalancerInput{ + input := &elasticloadbalancing.ApplySecurityGroupsToLoadBalancerInput{ LoadBalancerName: aws.String(d.Id()), - SecurityGroups: flex.ExpandStringSet(d.Get(names.AttrSecurityGroups).(*schema.Set)), + SecurityGroups: flex.ExpandStringValueSet(d.Get(names.AttrSecurityGroups).(*schema.Set)), } - _, err := conn.ApplySecurityGroupsToLoadBalancerWithContext(ctx, input) + _, err := conn.ApplySecurityGroupsToLoadBalancer(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "applying ELB Classic Load Balancer (%s) security groups: %s", d.Id(), err) @@ -650,32 +646,29 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met if d.HasChange(names.AttrAvailabilityZones) { o, n := d.GetChange(names.AttrAvailabilityZones) - os := o.(*schema.Set) - ns := n.(*schema.Set) + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := flex.ExpandStringValueSet(ns.Difference(os)), flex.ExpandStringValueSet(os.Difference(ns)) - removed := flex.ExpandStringSet(os.Difference(ns)) - added := flex.ExpandStringSet(ns.Difference(os)) - - if len(added) > 0 { - input := &elb.EnableAvailabilityZonesForLoadBalancerInput{ - AvailabilityZones: added, + if len(add) > 0 { + input := &elasticloadbalancing.EnableAvailabilityZonesForLoadBalancerInput{ + AvailabilityZones: add, LoadBalancerName: aws.String(d.Id()), } - _, err := conn.EnableAvailabilityZonesForLoadBalancerWithContext(ctx, input) + _, err := conn.EnableAvailabilityZonesForLoadBalancer(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "enabling ELB Classic Load Balancer (%s) Availability Zones: %s", d.Id(), err) } } - if len(removed) > 0 { - input := &elb.DisableAvailabilityZonesForLoadBalancerInput{ - AvailabilityZones: removed, + if len(del) > 0 { + input := &elasticloadbalancing.DisableAvailabilityZonesForLoadBalancerInput{ + AvailabilityZones: del, LoadBalancerName: aws.String(d.Id()), } - _, err := conn.DisableAvailabilityZonesForLoadBalancerWithContext(ctx, input) + _, err := conn.DisableAvailabilityZonesForLoadBalancer(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "enabling ELB Classic Load Balancer (%s) Availability Zones: %s", d.Id(), err) @@ -685,34 +678,31 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met if d.HasChange(names.AttrSubnets) { o, n := d.GetChange(names.AttrSubnets) - os := o.(*schema.Set) - ns := n.(*schema.Set) + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := flex.ExpandStringValueSet(ns.Difference(os)), flex.ExpandStringValueSet(os.Difference(ns)) - removed := flex.ExpandStringSet(os.Difference(ns)) - added := flex.ExpandStringSet(ns.Difference(os)) - - if len(removed) > 0 { - input := &elb.DetachLoadBalancerFromSubnetsInput{ + if len(del) > 0 { + input := &elasticloadbalancing.DetachLoadBalancerFromSubnetsInput{ LoadBalancerName: aws.String(d.Id()), - Subnets: removed, + Subnets: del, } - _, err := conn.DetachLoadBalancerFromSubnetsWithContext(ctx, input) + _, err := conn.DetachLoadBalancerFromSubnets(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "detaching ELB Classic Load Balancer (%s) from subnets: %s", d.Id(), err) } } - if len(added) > 0 { - input := &elb.AttachLoadBalancerToSubnetsInput{ + if len(add) > 0 { + input := &elasticloadbalancing.AttachLoadBalancerToSubnetsInput{ LoadBalancerName: aws.String(d.Id()), - Subnets: added, + Subnets: add, } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.AttachLoadBalancerToSubnetsWithContext(ctx, input) - }, elb.ErrCodeInvalidConfigurationRequestException, "cannot be attached to multiple subnets in the same AZ") + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidConfigurationRequestException](ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.AttachLoadBalancerToSubnets(ctx, input) + }, "cannot be attached to multiple subnets in the same AZ") if err != nil { return sdkdiag.AppendErrorf(diags, "attaching ELB Classic Load Balancer (%s) to subnets: %s", d.Id(), err) @@ -725,10 +715,10 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met func resourceLoadBalancerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) log.Printf("[INFO] Deleting ELB Classic Load Balancer: %s", d.Id()) - _, err := conn.DeleteLoadBalancerWithContext(ctx, &elb.DeleteLoadBalancerInput{ + _, err := conn.DeleteLoadBalancer(ctx, &elasticloadbalancing.DeleteLoadBalancerInput{ LoadBalancerName: aws.String(d.Id()), }) @@ -745,14 +735,14 @@ func resourceLoadBalancerDelete(ctx context.Context, d *schema.ResourceData, met return diags } -func FindLoadBalancerByName(ctx context.Context, conn *elb.ELB, name string) (*elb.LoadBalancerDescription, error) { - input := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: aws.StringSlice([]string{name}), +func findLoadBalancerByName(ctx context.Context, conn *elasticloadbalancing.Client, name string) (*awstypes.LoadBalancerDescription, error) { + input := &elasticloadbalancing.DescribeLoadBalancersInput{ + LoadBalancerNames: []string{name}, } - output, err := conn.DescribeLoadBalancersWithContext(ctx, input) + output, err := conn.DescribeLoadBalancers(ctx, input) - if tfawserr.ErrCodeEquals(err, elb.ErrCodeAccessPointNotFoundException) { + if errs.IsA[*awstypes.AccessPointNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -763,32 +753,17 @@ func FindLoadBalancerByName(ctx context.Context, conn *elb.ELB, name string) (*e return nil, err } - if output == nil || len(output.LoadBalancerDescriptions) == 0 || output.LoadBalancerDescriptions[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output.LoadBalancerDescriptions); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - // Eventual consistency check. - if aws.StringValue(output.LoadBalancerDescriptions[0].LoadBalancerName) != name { - return nil, &retry.NotFoundError{ - LastRequest: input, - } - } - - return output.LoadBalancerDescriptions[0], nil + return tfresource.AssertSingleValueResult(output.LoadBalancerDescriptions) } -func findLoadBalancerAttributesByName(ctx context.Context, conn *elb.ELB, name string) (*elb.LoadBalancerAttributes, error) { - input := &elb.DescribeLoadBalancerAttributesInput{ +func findLoadBalancerAttributesByName(ctx context.Context, conn *elasticloadbalancing.Client, name string) (*awstypes.LoadBalancerAttributes, error) { + input := &elasticloadbalancing.DescribeLoadBalancerAttributesInput{ LoadBalancerName: aws.String(name), } - output, err := conn.DescribeLoadBalancerAttributesWithContext(ctx, input) + output, err := conn.DescribeLoadBalancerAttributes(ctx, input) - if tfawserr.ErrCodeEquals(err, elb.ErrCodeAccessPointNotFoundException) { + if errs.IsA[*awstypes.AccessPointNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -806,7 +781,7 @@ func findLoadBalancerAttributesByName(ctx context.Context, conn *elb.ELB, name s return output.LoadBalancerAttributes, nil } -func ListenerHash(v interface{}) int { +func listenerHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) buf.WriteString(fmt.Sprintf("%d-", m["instance_port"].(int))) @@ -821,7 +796,7 @@ func ListenerHash(v interface{}) int { return create.StringHashcode(buf.String()) } -func ValidAccessLogsInterval(v interface{}, k string) (ws []string, errors []error) { +func validAccessLogsInterval(v interface{}, k string) (ws []string, errors []error) { value := v.(int) // Check if the value is either 5 or 60 (minutes). @@ -834,7 +809,7 @@ func ValidAccessLogsInterval(v interface{}, k string) (ws []string, errors []err return } -func ValidHeathCheckTarget(v interface{}, k string) (ws []string, errors []error) { +func validHeathCheckTarget(v interface{}, k string) (ws []string, errors []error) { value := v.(string) // Parse the Health Check target value. @@ -945,8 +920,8 @@ func deleteNetworkInterfaces(ctx context.Context, conn *ec2.Client, name string) continue } - attachmentID := aws.StringValue(networkInterface.Attachment.AttachmentId) - networkInterfaceID := aws.StringValue(networkInterface.NetworkInterfaceId) + attachmentID := aws.ToString(networkInterface.Attachment.AttachmentId) + networkInterfaceID := aws.ToString(networkInterface.NetworkInterfaceId) if err := tfec2.DetachNetworkInterface(ctx, conn, networkInterfaceID, attachmentID, tfec2.NetworkInterfaceDetachedTimeout); err != nil { errs = append(errs, err) diff --git a/internal/service/elb/load_balancer_data_source.go b/internal/service/elb/load_balancer_data_source.go index 398cdce4cda..7b716e00ff7 100644 --- a/internal/service/elb/load_balancer_data_source.go +++ b/internal/service/elb/load_balancer_data_source.go @@ -7,9 +7,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -20,8 +20,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_elb") -func DataSourceLoadBalancer() *schema.Resource { +// @SDKDataSource("aws_elb", name="Classic Load Balancer") +func dataSourceLoadBalancer() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLoadBalancerRead, Schema: map[string]*schema.Schema{ @@ -144,29 +144,24 @@ func DataSourceLoadBalancer() *schema.Resource { Type: schema.TypeInt, Computed: true, }, - "instance_protocol": { Type: schema.TypeString, Computed: true, }, - "lb_port": { Type: schema.TypeInt, Computed: true, }, - "lb_protocol": { Type: schema.TypeString, Computed: true, }, - "ssl_certificate_id": { Type: schema.TypeString, Computed: true, }, }, }, - Set: ListenerHash, }, names.AttrSecurityGroups: { @@ -208,24 +203,24 @@ func DataSourceLoadBalancer() *schema.Resource { func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) ec2conn := meta.(*conns.AWSClient).EC2Conn(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig lbName := d.Get(names.AttrName).(string) - lb, err := FindLoadBalancerByName(ctx, conn, lbName) + lb, err := findLoadBalancerByName(ctx, conn, lbName) if err != nil { return sdkdiag.AppendErrorf(diags, "reading ELB Classic Load Balancer (%s): %s", lbName, err) } - d.SetId(aws.StringValue(lb.LoadBalancerName)) + d.SetId(aws.ToString(lb.LoadBalancerName)) - input := &elb.DescribeLoadBalancerAttributesInput{ + input := &elasticloadbalancing.DescribeLoadBalancerAttributesInput{ LoadBalancerName: aws.String(d.Id()), } - output, err := conn.DescribeLoadBalancerAttributesWithContext(ctx, input) + output, err := conn.DescribeLoadBalancerAttributes(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading ELB Classic Load Balancer (%s) attributes: %s", d.Id(), err) @@ -247,25 +242,25 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met var scheme bool if lb.Scheme != nil { - scheme = aws.StringValue(lb.Scheme) == "internal" + scheme = aws.ToString(lb.Scheme) == "internal" } d.Set("internal", scheme) - d.Set(names.AttrAvailabilityZones, flex.FlattenStringList(lb.AvailabilityZones)) + d.Set(names.AttrAvailabilityZones, flex.FlattenStringValueList(lb.AvailabilityZones)) d.Set("instances", flattenInstances(lb.Instances)) - d.Set("listener", flattenListeners(lb.ListenerDescriptions)) - d.Set(names.AttrSecurityGroups, flex.FlattenStringList(lb.SecurityGroups)) + d.Set("listener", flattenListenerDescriptions(lb.ListenerDescriptions)) + d.Set(names.AttrSecurityGroups, flex.FlattenStringValueList(lb.SecurityGroups)) if lb.SourceSecurityGroup != nil { group := lb.SourceSecurityGroup.GroupName - if lb.SourceSecurityGroup.OwnerAlias != nil && aws.StringValue(lb.SourceSecurityGroup.OwnerAlias) != "" { - group = aws.String(aws.StringValue(lb.SourceSecurityGroup.OwnerAlias) + "/" + aws.StringValue(lb.SourceSecurityGroup.GroupName)) + if lb.SourceSecurityGroup.OwnerAlias != nil && aws.ToString(lb.SourceSecurityGroup.OwnerAlias) != "" { + group = aws.String(aws.ToString(lb.SourceSecurityGroup.OwnerAlias) + "/" + aws.ToString(lb.SourceSecurityGroup.GroupName)) } d.Set("source_security_group", group) // Manually look up the ELB Security Group ID, since it's not provided var elbVpc string if lb.VPCId != nil { - elbVpc = aws.StringValue(lb.VPCId) - sg, err := tfec2.FindSecurityGroupByNameAndVPCIDAndOwnerID(ctx, ec2conn, aws.StringValue(lb.SourceSecurityGroup.GroupName), elbVpc, aws.StringValue(lb.SourceSecurityGroup.OwnerAlias)) + elbVpc = aws.ToString(lb.VPCId) + sg, err := tfec2.FindSecurityGroupByNameAndVPCIDAndOwnerID(ctx, ec2conn, aws.ToString(lb.SourceSecurityGroup.GroupName), elbVpc, aws.ToString(lb.SourceSecurityGroup.OwnerAlias)) if err != nil { return sdkdiag.AppendErrorf(diags, "looking up ELB Security Group ID: %s", err) } else { @@ -273,7 +268,7 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met } } } - d.Set(names.AttrSubnets, flex.FlattenStringList(lb.Subnets)) + d.Set(names.AttrSubnets, flex.FlattenStringValueList(lb.Subnets)) if lbAttrs.ConnectionSettings != nil { d.Set("idle_timeout", lbAttrs.ConnectionSettings.IdleTimeout) } @@ -297,7 +292,7 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met _, n := d.GetChange("access_logs") elbal := lbAttrs.AccessLog nl := n.([]interface{}) - if len(nl) == 0 && !aws.BoolValue(elbal.Enabled) { + if len(nl) == 0 && !elbal.Enabled { elbal = nil } if err := d.Set("access_logs", flattenAccessLog(elbal)); err != nil { @@ -306,8 +301,8 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met } for _, attr := range lbAttrs.AdditionalAttributes { - switch aws.StringValue(attr.Key) { - case "elb.http.desyncmitigationmode": + switch aws.ToString(attr.Key) { + case loadBalancerAttributeDesyncMitigationMode: d.Set("desync_mitigation_mode", attr.Value) } } @@ -324,8 +319,8 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met // There's only one health check, so save that to state as we // currently can - if aws.StringValue(lb.HealthCheck.Target) != "" { - d.Set(names.AttrHealthCheck, FlattenHealthCheck(lb.HealthCheck)) + if aws.ToString(lb.HealthCheck.Target) != "" { + d.Set(names.AttrHealthCheck, flattenHealthCheck(lb.HealthCheck)) } return diags diff --git a/internal/service/elb/load_balancer_test.go b/internal/service/elb/load_balancer_test.go index 6ccd0303d07..7328e5a9823 100644 --- a/internal/service/elb/load_balancer_test.go +++ b/internal/service/elb/load_balancer_test.go @@ -6,13 +6,13 @@ package elb_test import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "context" "fmt" - "math/rand" "reflect" "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -57,192 +57,9 @@ func TestLoadBalancerListenerHash(t *testing.T) { } } -func TestValidLoadBalancerNameCannotBeginWithHyphen(t *testing.T) { - t.Parallel() - - var n = "-Testing123" - _, errors := tfelb.ValidName(n, "SampleKey") - - if len(errors) != 1 { - t.Fatalf("Expected the ELB Name to trigger a validation error") - } -} - -func TestValidLoadBalancerNameCanBeAnEmptyString(t *testing.T) { - t.Parallel() - - var n = "" - _, errors := tfelb.ValidName(n, "SampleKey") - - if len(errors) != 0 { - t.Fatalf("Expected the ELB Name to pass validation") - } -} - -func TestValidLoadBalancerNameCannotBeLongerThan32Characters(t *testing.T) { - t.Parallel() - - var n = "Testing123dddddddddddddddddddvvvv" - _, errors := tfelb.ValidName(n, "SampleKey") - - if len(errors) != 1 { - t.Fatalf("Expected the ELB Name to trigger a validation error") - } -} - -func TestValidLoadBalancerNameCannotHaveSpecialCharacters(t *testing.T) { - t.Parallel() - - var n = "Testing123%%" - _, errors := tfelb.ValidName(n, "SampleKey") - - if len(errors) != 1 { - t.Fatalf("Expected the ELB Name to trigger a validation error") - } -} - -func TestValidLoadBalancerNameCannotEndWithHyphen(t *testing.T) { - t.Parallel() - - var n = "Testing123-" - _, errors := tfelb.ValidName(n, "SampleKey") - - if len(errors) != 1 { - t.Fatalf("Expected the ELB Name to trigger a validation error") - } -} - -func TestValidLoadBalancerAccessLogsInterval(t *testing.T) { - t.Parallel() - - type testCases struct { - Value int - ErrCount int - } - - invalidCases := []testCases{ - { - Value: 0, - ErrCount: 1, - }, - { - Value: 10, - ErrCount: 1, - }, - { - Value: -1, - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := tfelb.ValidAccessLogsInterval(tc.Value, names.AttrInterval) - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } -} - -func TestValidLoadBalancerHealthCheckTarget(t *testing.T) { - t.Parallel() - - type testCase struct { - Value string - ErrCount int - } - - randomRunes := func(n int) string { - // A complete set of modern Katakana characters. - runes := []rune("アイウエオ" + - "カキクケコガギグゲゴサシスセソザジズゼゾ" + - "タチツテトダヂヅデドナニヌネノハヒフヘホ" + - "バビブベボパピプペポマミムメモヤユヨラリ" + - "ルレロワヰヱヲン") - - s := make([]rune, n) - for i := range s { - s[i] = runes[rand.Intn(len(runes))] - } - return string(s) - } - - validCases := []testCase{ - { - Value: "TCP:1234", - ErrCount: 0, - }, - { - Value: "http:80/test", - ErrCount: 0, - }, - { - Value: fmt.Sprintf("HTTP:8080/%s", randomRunes(5)), - ErrCount: 0, - }, - { - Value: "SSL:8080", - ErrCount: 0, - }, - } - - for _, tc := range validCases { - _, errors := tfelb.ValidHeathCheckTarget(tc.Value, names.AttrTarget) - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) - } - } - - invalidCases := []testCase{ - { - Value: "", - ErrCount: 1, - }, - { - Value: "TCP:", - ErrCount: 1, - }, - { - Value: "TCP:1234/", - ErrCount: 1, - }, - { - Value: "SSL:8080/", - ErrCount: 1, - }, - { - Value: "HTTP:8080", - ErrCount: 1, - }, - { - Value: "incorrect-value", - ErrCount: 1, - }, - { - Value: "TCP:123456", - ErrCount: 1, - }, - { - Value: "incorrect:80/", - ErrCount: 1, - }, - { - Value: fmt.Sprintf("HTTP:8080/%s%s", - sdkacctest.RandStringFromCharSet(512, sdkacctest.CharSetAlpha), randomRunes(512)), - ErrCount: 1, - }, - } - - for _, tc := range invalidCases { - _, errors := tfelb.ValidHeathCheckTarget(tc.Value, names.AttrTarget) - if len(errors) != tc.ErrCount { - t.Fatalf("Expected %q to trigger a validation error.", tc.Value) - } - } -} - func TestAccELBLoadBalancer_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elb.test" @@ -296,7 +113,7 @@ func TestAccELBLoadBalancer_basic(t *testing.T) { func TestAccELBLoadBalancer_disappears(t *testing.T) { ctx := acctest.Context(t) - var loadBalancer elb.LoadBalancerDescription + var loadBalancer awstypes.LoadBalancerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elb.test" @@ -320,7 +137,7 @@ func TestAccELBLoadBalancer_disappears(t *testing.T) { func TestAccELBLoadBalancer_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription resourceName := "aws_elb.test" resource.ParallelTest(t, resource.TestCase{ @@ -348,7 +165,7 @@ func TestAccELBLoadBalancer_nameGenerated(t *testing.T) { func TestAccELBLoadBalancer_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription resourceName := "aws_elb.test" resource.ParallelTest(t, resource.TestCase{ @@ -376,7 +193,7 @@ func TestAccELBLoadBalancer_namePrefix(t *testing.T) { func TestAccELBLoadBalancer_tags(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elb.test" @@ -425,7 +242,7 @@ func TestAccELBLoadBalancer_tags(t *testing.T) { func TestAccELBLoadBalancer_fullCharacterRange(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription resourceName := "aws_elb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -448,7 +265,7 @@ func TestAccELBLoadBalancer_fullCharacterRange(t *testing.T) { func TestAccELBLoadBalancer_AccessLogs_enabled(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription resourceName := "aws_elb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -489,7 +306,7 @@ func TestAccELBLoadBalancer_AccessLogs_enabled(t *testing.T) { func TestAccELBLoadBalancer_AccessLogs_disabled(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription resourceName := "aws_elb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -528,7 +345,7 @@ func TestAccELBLoadBalancer_AccessLogs_disabled(t *testing.T) { func TestAccELBLoadBalancer_generatesNameForZeroValue(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription generatedNameRegexp := regexache.MustCompile("^tf-lb-") resourceName := "aws_elb.test" @@ -551,7 +368,7 @@ func TestAccELBLoadBalancer_generatesNameForZeroValue(t *testing.T) { func TestAccELBLoadBalancer_availabilityZones(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elb.test" @@ -582,7 +399,7 @@ func TestAccELBLoadBalancer_availabilityZones(t *testing.T) { func TestAccELBLoadBalancer_ListenerSSLCertificateID_iamServerCertificate(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription key := acctest.TLSRSAPrivateKeyPEM(t, 2048) certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -624,7 +441,7 @@ func TestAccELBLoadBalancer_ListenerSSLCertificateID_iamServerCertificate(t *tes func TestAccELBLoadBalancer_Swap_subnets(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elb.test" @@ -644,15 +461,15 @@ func TestAccELBLoadBalancer_Swap_subnets(t *testing.T) { { Config: testAccLoadBalancerConfig_subnetSwap(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, "aws_elb.test", &conf), - resource.TestCheckResourceAttr("aws_elb.test", "subnets.#", acctest.Ct2), + testAccCheckLoadBalancerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "subnets.#", acctest.Ct2), ), }, { Config: testAccLoadBalancerConfig_subnetCompleteSwap(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckLoadBalancerExists(ctx, "aws_elb.test", &conf), - resource.TestCheckResourceAttr("aws_elb.test", "subnets.#", acctest.Ct2), + testAccCheckLoadBalancerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "subnets.#", acctest.Ct2), ), }, }, @@ -661,7 +478,7 @@ func TestAccELBLoadBalancer_Swap_subnets(t *testing.T) { func TestAccELBLoadBalancer_instanceAttaching(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elb.test" @@ -701,7 +518,7 @@ func TestAccELBLoadBalancer_instanceAttaching(t *testing.T) { func TestAccELBLoadBalancer_listener(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elb.test" @@ -772,12 +589,12 @@ func TestAccELBLoadBalancer_listener(t *testing.T) { { PreConfig: func() { // Simulate out of band listener removal - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) - input := &elb.DeleteLoadBalancerListenersInput{ + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) + input := &elasticloadbalancing.DeleteLoadBalancerListenersInput{ LoadBalancerName: conf.LoadBalancerName, - LoadBalancerPorts: []*int64{aws.Int64(80)}, + LoadBalancerPorts: []int32{80}, } - if _, err := conn.DeleteLoadBalancerListenersWithContext(ctx, input); err != nil { + if _, err := conn.DeleteLoadBalancerListeners(ctx, input); err != nil { t.Fatalf("Error deleting listener: %s", err) } }, @@ -796,19 +613,19 @@ func TestAccELBLoadBalancer_listener(t *testing.T) { { PreConfig: func() { // Simulate out of band listener addition - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) - input := &elb.CreateLoadBalancerListenersInput{ + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) + input := &elasticloadbalancing.CreateLoadBalancerListenersInput{ LoadBalancerName: conf.LoadBalancerName, - Listeners: []*elb.Listener{ + Listeners: []awstypes.Listener{ { - InstancePort: aws.Int64(22), + InstancePort: aws.Int32(22), InstanceProtocol: aws.String("tcp"), - LoadBalancerPort: aws.Int64(22), + LoadBalancerPort: int32(22), Protocol: aws.String("tcp"), }, }, } - if _, err := conn.CreateLoadBalancerListenersWithContext(ctx, input); err != nil { + if _, err := conn.CreateLoadBalancerListeners(ctx, input); err != nil { t.Fatalf("Error creating listener: %s", err) } }, @@ -830,7 +647,7 @@ func TestAccELBLoadBalancer_listener(t *testing.T) { func TestAccELBLoadBalancer_healthCheck(t *testing.T) { ctx := acctest.Context(t) - var conf elb.LoadBalancerDescription + var conf awstypes.LoadBalancerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_elb.test" @@ -1020,7 +837,7 @@ func TestAccELBLoadBalancer_desyncMitigationMode_update(t *testing.T) { func testAccCheckLoadBalancerDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_elb" { @@ -1044,7 +861,7 @@ func testAccCheckLoadBalancerDestroy(ctx context.Context) resource.TestCheckFunc } } -func testAccCheckLoadBalancerExists(ctx context.Context, n string, v *elb.LoadBalancerDescription) resource.TestCheckFunc { +func testAccCheckLoadBalancerExists(ctx context.Context, n string, v *awstypes.LoadBalancerDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -1055,7 +872,7 @@ func testAccCheckLoadBalancerExists(ctx context.Context, n string, v *elb.LoadBa return fmt.Errorf("No ELB Classic Load Balancer ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) output, err := tfelb.FindLoadBalancerByName(ctx, conn, rs.Primary.ID) @@ -1069,12 +886,12 @@ func testAccCheckLoadBalancerExists(ctx context.Context, n string, v *elb.LoadBa } } -func testAccCheckLoadBalancerAttributes(conf *elb.LoadBalancerDescription) resource.TestCheckFunc { +func testAccCheckLoadBalancerAttributes(conf *awstypes.LoadBalancerDescription) resource.TestCheckFunc { return func(s *terraform.State) error { - l := elb.Listener{ - InstancePort: aws.Int64(8000), + l := awstypes.Listener{ + InstancePort: aws.Int32(8000), InstanceProtocol: aws.String("HTTP"), - LoadBalancerPort: aws.Int64(80), + LoadBalancerPort: int32(80), Protocol: aws.String("HTTP"), } @@ -1579,10 +1396,14 @@ resource "aws_security_group" "test" { func testAccLoadBalancerConfig_listenerIAMServerCertificate(rName, certificate, key, lbProtocol string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -resource "aws_iam_server_certificate" "test_cert" { +resource "aws_iam_server_certificate" "test" { name = %[1]q certificate_body = "%[2]s" private_key = "%[3]s" + + timeouts { + delete = "30m" + } } resource "aws_elb" "test" { @@ -1595,7 +1416,7 @@ resource "aws_elb" "test" { instance_protocol = %[4]q lb_port = 443 lb_protocol = %[4]q - ssl_certificate_id = aws_iam_server_certificate.test_cert.arn + ssl_certificate_id = aws_iam_server_certificate.test.arn } } `, rName, acctest.TLSPEMEscapeNewlines(certificate), acctest.TLSPEMEscapeNewlines(key), lbProtocol)) @@ -1603,10 +1424,14 @@ resource "aws_elb" "test" { func testAccLoadBalancerConfig_listenerIAMServerCertificateAddInvalidListener(rName, certificate, key string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -resource "aws_iam_server_certificate" "test_cert" { +resource "aws_iam_server_certificate" "test" { name = %[1]q certificate_body = "%[2]s" private_key = "%[3]s" + + timeouts { + delete = "30m" + } } resource "aws_elb" "test" { @@ -1619,7 +1444,7 @@ resource "aws_elb" "test" { instance_protocol = "https" lb_port = 443 lb_protocol = "https" - ssl_certificate_id = aws_iam_server_certificate.test_cert.arn + ssl_certificate_id = aws_iam_server_certificate.test.arn } # lb_protocol tcp and ssl_certificate_id is not valid @@ -1628,7 +1453,7 @@ resource "aws_elb" "test" { instance_protocol = "tcp" lb_port = 8443 lb_protocol = "tcp" - ssl_certificate_id = aws_iam_server_certificate.test_cert.arn + ssl_certificate_id = aws_iam_server_certificate.test.arn } } `, rName, acctest.TLSPEMEscapeNewlines(certificate), acctest.TLSPEMEscapeNewlines(key))) diff --git a/internal/service/elb/policy.go b/internal/service/elb/policy.go index c7266714b6e..fb54ae5117c 100644 --- a/internal/service/elb/policy.go +++ b/internal/service/elb/policy.go @@ -7,21 +7,25 @@ import ( "context" "fmt" "log" + "slices" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_load_balancer_policy") -func ResourcePolicy() *schema.Resource { +// @SDKResource("aws_load_balancer_policy", name="Load Balancer Policy") +func resourcePolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePolicyCreate, ReadWithoutTimeout: resourcePolicyRead, @@ -74,22 +78,22 @@ func ResourcePolicy() *schema.Resource { func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) + conn := meta.(*conns.AWSClient).ELBClient(ctx) lbName := d.Get("load_balancer_name").(string) policyName := d.Get("policy_name").(string) - id := PolicyCreateResourceID(lbName, policyName) - input := &elb.CreateLoadBalancerPolicyInput{ + id := policyCreateResourceID(lbName, policyName) + input := &elasticloadbalancing.CreateLoadBalancerPolicyInput{ LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), PolicyTypeName: aws.String(d.Get("policy_type_name").(string)), } if v, ok := d.GetOk("policy_attribute"); ok && v.(*schema.Set).Len() > 0 { - input.PolicyAttributes = ExpandPolicyAttributes(v.(*schema.Set).List()) + input.PolicyAttributes = expandPolicyAttributes(v.(*schema.Set).List()) } - _, err := conn.CreateLoadBalancerPolicyWithContext(ctx, input) + _, err := conn.CreateLoadBalancerPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating ELB Classic Load Balancer Policy (%s): %s", id, err) @@ -102,15 +106,14 @@ func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta inte func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, policyName, err := PolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, policyName, err := policyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - policy, err := FindLoadBalancerPolicyByTwoPartKey(ctx, conn, lbName, policyName) + policy, err := findLoadBalancerPolicyByTwoPartKey(ctx, conn, lbName, policyName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELB Classic Load Balancer Policy (%s) not found, removing from state", d.Id()) @@ -123,7 +126,7 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interf } d.Set("load_balancer_name", lbName) - if err := d.Set("policy_attribute", FlattenPolicyAttributes(policy.PolicyAttributeDescriptions)); err != nil { + if err := d.Set("policy_attribute", flattenPolicyAttributeDescriptions(policy.PolicyAttributeDescriptions)); err != nil { return sdkdiag.AppendErrorf(diags, "setting policy_attribute: %s", err) } d.Set("policy_name", policyName) @@ -134,50 +137,59 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interf func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - reassignments := Reassignment{} - - lbName, policyName, err := PolicyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).ELBClient(ctx) + lbName, policyName, err := policyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - assigned, err := resourcePolicyAssigned(ctx, policyName, lbName, conn) - if err != nil { - return sdkdiag.AppendErrorf(diags, "determining assignment status of Load Balancer Policy %s: %s", policyName, err) - } + reassignments := &policyReassignments{} + + err = findPolicyAttachmentByTwoPartKey(ctx, conn, lbName, policyName) + switch { + case tfresource.NotFound(err): + // Policy not attached. + case err != nil: + return sdkdiag.AppendErrorf(diags, "reading ELB Classic Load Balancer Policy Attachment (%s/%s): %s", lbName, policyName, err) + default: + reassignments, err = unassignPolicy(ctx, conn, lbName, policyName) - if assigned { - reassignments, err = resourcePolicyUnassign(ctx, policyName, lbName, conn) if err != nil { - return sdkdiag.AppendErrorf(diags, "unassigning Load Balancer Policy %s: %s", policyName, err) + return sdkdiag.AppendFromErr(diags, err) } } - request := &elb.DeleteLoadBalancerPolicyInput{ + input := &elasticloadbalancing.DeleteLoadBalancerPolicyInput{ LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), } - if _, err := conn.DeleteLoadBalancerPolicyWithContext(ctx, request); err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Load Balancer Policy %s: %s", d.Id(), err) + _, err = conn.DeleteLoadBalancerPolicy(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting ELB Classic Load Balancer Policy (%s): %s", d.Id(), err) } diags = append(diags, sdkdiag.WrapDiagsf(resourcePolicyCreate(ctx, d, meta), "updating ELB Classic Policy (%s)", d.Id())...) + if diags.HasError() { return diags } - for _, listenerAssignment := range reassignments.listenerPolicies { - if _, err := conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, listenerAssignment); err != nil { - return sdkdiag.AppendErrorf(diags, "setting LoadBalancerPoliciesOfListener: %s", err) + for _, input := range reassignments.listenerPolicies { + _, err := conn.SetLoadBalancerPoliciesOfListener(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting ELB Classic Listener Policy (%s): %s", lbName, err) } } - for _, backendServerAssignment := range reassignments.backendServerPolicies { - if _, err := conn.SetLoadBalancerPoliciesForBackendServerWithContext(ctx, backendServerAssignment); err != nil { - return sdkdiag.AppendErrorf(diags, "setting LoadBalancerPoliciesForBackendServer: %s", err) + for _, input := range reassignments.backendServerPolicies { + _, err := conn.SetLoadBalancerPoliciesForBackendServer(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting ELB Classic Backend Server Policy (%s): %s", lbName, err) } } @@ -186,166 +198,134 @@ func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta inte func resourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - - lbName, policyName, err := PolicyParseResourceID(d.Id()) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "parsing resource ID: %s", err) - } + conn := meta.(*conns.AWSClient).ELBClient(ctx) - assigned, err := resourcePolicyAssigned(ctx, policyName, lbName, conn) + lbName, policyName, err := policyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "determining assignment status of Load Balancer Policy %s: %s", policyName, err) - } - - if assigned { - _, err := resourcePolicyUnassign(ctx, policyName, lbName, conn) - if err != nil { - return sdkdiag.AppendErrorf(diags, "unassigning Load Balancer Policy %s: %s", policyName, err) + return sdkdiag.AppendFromErr(diags, err) + } + + err = findPolicyAttachmentByTwoPartKey(ctx, conn, lbName, policyName) + switch { + case tfresource.NotFound(err): + // Policy not attached. + case err != nil: + return sdkdiag.AppendErrorf(diags, "reading ELB Classic Load Balancer Policy Attachment (%s/%s): %s", lbName, policyName, err) + default: + if _, err := unassignPolicy(ctx, conn, lbName, policyName); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } - request := &elb.DeleteLoadBalancerPolicyInput{ + log.Printf("[DEBUG] Deleting ELB Classic Load Balancer Policy: %s", d.Id()) + _, err = conn.DeleteLoadBalancerPolicy(ctx, &elasticloadbalancing.DeleteLoadBalancerPolicyInput{ LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), + }) + + if tfawserr.ErrCodeEquals(err, errCodeLoadBalancerNotFound) { + return diags } - if _, err := conn.DeleteLoadBalancerPolicyWithContext(ctx, request); err != nil { + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ELB Classic Load Balancer Policy (%s): %s", d.Id(), err) } return diags } -func resourcePolicyAssigned(ctx context.Context, policyName, loadBalancerName string, conn *elb.ELB) (bool, error) { - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - } - - describeResp, err := conn.DescribeLoadBalancersWithContext(ctx, describeElbOpts) - - if tfawserr.ErrCodeEquals(err, elb.ErrCodeAccessPointNotFoundException) { - return false, nil - } +func findPolicyAttachmentByTwoPartKey(ctx context.Context, conn *elasticloadbalancing.Client, lbName, policyName string) error { + lb, err := findLoadBalancerByName(ctx, conn, lbName) if err != nil { - return false, fmt.Errorf("retrieving ELB description: %s", err) + return err } - if len(describeResp.LoadBalancerDescriptions) != 1 { - return false, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) - } + attached := slices.ContainsFunc(lb.BackendServerDescriptions, func(v awstypes.BackendServerDescription) bool { + return slices.Contains(v.PolicyNames, policyName) + }) - lb := describeResp.LoadBalancerDescriptions[0] - assigned := false - for _, backendServer := range lb.BackendServerDescriptions { - for _, name := range backendServer.PolicyNames { - if policyName == aws.StringValue(name) { - assigned = true - break - } - } + if attached { + return nil } - for _, listener := range lb.ListenerDescriptions { - for _, name := range listener.PolicyNames { - if policyName == aws.StringValue(name) { - assigned = true - break - } - } + attached = slices.ContainsFunc(lb.ListenerDescriptions, func(v awstypes.ListenerDescription) bool { + return slices.Contains(v.PolicyNames, policyName) + }) + + if attached { + return nil } - return assigned, nil + return &retry.NotFoundError{} } -type Reassignment struct { - backendServerPolicies []*elb.SetLoadBalancerPoliciesForBackendServerInput - listenerPolicies []*elb.SetLoadBalancerPoliciesOfListenerInput +type policyReassignments struct { + backendServerPolicies []*elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput + listenerPolicies []*elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput } -func resourcePolicyUnassign(ctx context.Context, policyName, loadBalancerName string, conn *elb.ELB) (Reassignment, error) { - reassignments := Reassignment{} - - describeElbOpts := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(loadBalancerName)}, - } +func unassignPolicy(ctx context.Context, conn *elasticloadbalancing.Client, lbName, policyName string) (*policyReassignments, error) { + reassignments := &policyReassignments{} - describeResp, err := conn.DescribeLoadBalancersWithContext(ctx, describeElbOpts) + lb, err := findLoadBalancerByName(ctx, conn, lbName) - if tfawserr.ErrCodeEquals(err, elb.ErrCodeAccessPointNotFoundException) { + if tfresource.NotFound(err) { return reassignments, nil } if err != nil { - return reassignments, fmt.Errorf("retrieving ELB description: %s", err) + return nil, err } - if len(describeResp.LoadBalancerDescriptions) != 1 { - return reassignments, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) - } - - lb := describeResp.LoadBalancerDescriptions[0] - - for _, backendServer := range lb.BackendServerDescriptions { - policies := []*string{} + for _, v := range lb.BackendServerDescriptions { + policies := tfslices.Filter(v.PolicyNames, func(v string) bool { + return v != policyName + }) - for _, name := range backendServer.PolicyNames { - if policyName != aws.StringValue(name) { - policies = append(policies, name) - } - } + if len(v.PolicyNames) != len(policies) { + reassignments.backendServerPolicies = append(reassignments.backendServerPolicies, &elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: v.InstancePort, + LoadBalancerName: aws.String(lbName), + PolicyNames: v.PolicyNames, + }) - if len(backendServer.PolicyNames) != len(policies) { - setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ - LoadBalancerName: aws.String(loadBalancerName), - InstancePort: aws.Int64(*backendServer.InstancePort), + input := &elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: v.InstancePort, + LoadBalancerName: aws.String(lbName), PolicyNames: policies, } - reassignOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ - LoadBalancerName: aws.String(loadBalancerName), - InstancePort: aws.Int64(*backendServer.InstancePort), - PolicyNames: backendServer.PolicyNames, - } + _, err = conn.SetLoadBalancerPoliciesForBackendServer(ctx, input) - reassignments.backendServerPolicies = append(reassignments.backendServerPolicies, reassignOpts) - - _, err = conn.SetLoadBalancerPoliciesForBackendServerWithContext(ctx, setOpts) if err != nil { - return reassignments, fmt.Errorf("Setting Load Balancer Policies for Backend Server: %s", err) + return nil, fmt.Errorf("setting ELB Classic Backend Server Policy (%s): %w", lbName, err) } } } - for _, listener := range lb.ListenerDescriptions { - policies := []*string{} + for _, v := range lb.ListenerDescriptions { + policies := tfslices.Filter(v.PolicyNames, func(v string) bool { + return v != policyName + }) - for _, name := range listener.PolicyNames { - if policyName != aws.StringValue(name) { - policies = append(policies, name) - } - } + if len(v.PolicyNames) != len(policies) { + reassignments.listenerPolicies = append(reassignments.listenerPolicies, &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(lbName), + LoadBalancerPort: v.Listener.LoadBalancerPort, + PolicyNames: v.PolicyNames, + }) - if len(listener.PolicyNames) != len(policies) { - setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(loadBalancerName), - LoadBalancerPort: aws.Int64(*listener.Listener.LoadBalancerPort), + input := &elasticloadbalancing.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(lbName), + LoadBalancerPort: v.Listener.LoadBalancerPort, PolicyNames: policies, } - reassignOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ - LoadBalancerName: aws.String(loadBalancerName), - LoadBalancerPort: aws.Int64(*listener.Listener.LoadBalancerPort), - PolicyNames: listener.PolicyNames, - } - - reassignments.listenerPolicies = append(reassignments.listenerPolicies, reassignOpts) + _, err = conn.SetLoadBalancerPoliciesOfListener(ctx, input) - _, err = conn.SetLoadBalancerPoliciesOfListenerWithContext(ctx, setOpts) if err != nil { - return reassignments, fmt.Errorf("Setting Load Balancer Policies of Listener: %s", err) + return reassignments, fmt.Errorf("setting ELB Classic Listener Policy (%s): %w", lbName, err) } } } @@ -374,14 +354,14 @@ func suppressPolicyAttributeDiffs(k, old, new string, d *schema.ResourceData) bo const policyResourceIDSeparator = ":" -func PolicyCreateResourceID(lbName, policyName string) string { +func policyCreateResourceID(lbName, policyName string) string { parts := []string{lbName, policyName} id := strings.Join(parts, policyResourceIDSeparator) return id } -func PolicyParseResourceID(id string) (string, string, error) { +func policyParseResourceID(id string) (string, string, error) { parts := strings.Split(id, backendServerPolicyResourceIDSeparator) if len(parts) == 2 && parts[0] != "" && parts[1] != "" { diff --git a/internal/service/elb/policy_test.go b/internal/service/elb/policy_test.go index 0e8d3e6eacb..87053c4007e 100644 --- a/internal/service/elb/policy_test.go +++ b/internal/service/elb/policy_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/elb" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccELBPolicy_basic(t *testing.T) { ctx := acctest.Context(t) - var policy elb.PolicyDescription + var policy awstypes.PolicyDescription resourceName := "aws_load_balancer_policy.test-policy" rInt := sdkacctest.RandInt() @@ -44,7 +44,7 @@ func TestAccELBPolicy_basic(t *testing.T) { func TestAccELBPolicy_disappears(t *testing.T) { ctx := acctest.Context(t) - var policy elb.PolicyDescription + var policy awstypes.PolicyDescription resourceName := "aws_load_balancer_policy.test-policy" rInt := sdkacctest.RandInt() @@ -68,7 +68,7 @@ func TestAccELBPolicy_disappears(t *testing.T) { func TestAccELBPolicy_LBCookieStickinessPolicyType_computedAttributesOnly(t *testing.T) { ctx := acctest.Context(t) - var policy elb.PolicyDescription + var policy awstypes.PolicyDescription resourceName := "aws_load_balancer_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) policyTypeName := "LBCookieStickinessPolicyType" @@ -93,7 +93,7 @@ func TestAccELBPolicy_LBCookieStickinessPolicyType_computedAttributesOnly(t *tes func TestAccELBPolicy_SSLNegotiationPolicyType_computedAttributesOnly(t *testing.T) { ctx := acctest.Context(t) - var policy elb.PolicyDescription + var policy awstypes.PolicyDescription resourceName := "aws_load_balancer_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -104,10 +104,10 @@ func TestAccELBPolicy_SSLNegotiationPolicyType_computedAttributesOnly(t *testing CheckDestroy: testAccCheckPolicyDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccPolicyConfig_typeNameOnly(rName, tfelb.SSLNegotiationPolicyType), + Config: testAccPolicyConfig_typeNameOnly(rName, "SSLNegotiationPolicyType"), Check: resource.ComposeTestCheckFunc( testAccCheckPolicyExists(ctx, resourceName, &policy), - resource.TestCheckResourceAttr(resourceName, "policy_type_name", tfelb.SSLNegotiationPolicyType), + resource.TestCheckResourceAttr(resourceName, "policy_type_name", "SSLNegotiationPolicyType"), resource.TestMatchResourceAttr(resourceName, "policy_attribute.#", regexache.MustCompile(`[^0]+`)), ), }, @@ -117,7 +117,7 @@ func TestAccELBPolicy_SSLNegotiationPolicyType_computedAttributesOnly(t *testing func TestAccELBPolicy_SSLNegotiationPolicyType_customPolicy(t *testing.T) { ctx := acctest.Context(t) - var policy elb.PolicyDescription + var policy awstypes.PolicyDescription resourceName := "aws_load_balancer_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -132,7 +132,7 @@ func TestAccELBPolicy_SSLNegotiationPolicyType_customPolicy(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckPolicyExists(ctx, resourceName, &policy), resource.TestCheckResourceAttr(resourceName, "policy_name", rName), - resource.TestCheckResourceAttr(resourceName, "policy_type_name", tfelb.SSLNegotiationPolicyType), + resource.TestCheckResourceAttr(resourceName, "policy_type_name", "SSLNegotiationPolicyType"), resource.TestCheckResourceAttr(resourceName, "policy_attribute.#", acctest.Ct2), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "policy_attribute.*", map[string]string{ names.AttrName: "Protocol-TLSv1.1", @@ -149,7 +149,7 @@ func TestAccELBPolicy_SSLNegotiationPolicyType_customPolicy(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckPolicyExists(ctx, resourceName, &policy), resource.TestCheckResourceAttr(resourceName, "policy_name", rName), - resource.TestCheckResourceAttr(resourceName, "policy_type_name", tfelb.SSLNegotiationPolicyType), + resource.TestCheckResourceAttr(resourceName, "policy_type_name", "SSLNegotiationPolicyType"), resource.TestCheckResourceAttr(resourceName, "policy_attribute.#", acctest.Ct2), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "policy_attribute.*", map[string]string{ names.AttrName: "Protocol-TLSv1.2", @@ -167,7 +167,7 @@ func TestAccELBPolicy_SSLNegotiationPolicyType_customPolicy(t *testing.T) { func TestAccELBPolicy_SSLSecurityPolicy_predefined(t *testing.T) { ctx := acctest.Context(t) - var policy elb.PolicyDescription + var policy awstypes.PolicyDescription resourceName := "aws_load_balancer_policy.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) predefinedSecurityPolicy := "ELBSecurityPolicy-TLS-1-2-2017-01" @@ -185,10 +185,10 @@ func TestAccELBPolicy_SSLSecurityPolicy_predefined(t *testing.T) { testAccCheckPolicyExists(ctx, resourceName, &policy), resource.TestCheckResourceAttr(resourceName, "policy_attribute.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "policy_attribute.*", map[string]string{ - names.AttrName: tfelb.ReferenceSecurityPolicy, + names.AttrName: "Reference-Security-Policy", names.AttrValue: predefinedSecurityPolicy, }), - resource.TestCheckResourceAttr(resourceName, "policy_type_name", tfelb.SSLNegotiationPolicyType), + resource.TestCheckResourceAttr(resourceName, "policy_type_name", "SSLNegotiationPolicyType"), ), }, { @@ -197,10 +197,10 @@ func TestAccELBPolicy_SSLSecurityPolicy_predefined(t *testing.T) { testAccCheckPolicyExists(ctx, resourceName, &policy), resource.TestCheckResourceAttr(resourceName, "policy_attribute.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "policy_attribute.*", map[string]string{ - names.AttrName: tfelb.ReferenceSecurityPolicy, + names.AttrName: "Reference-Security-Policy", names.AttrValue: predefinedSecurityPolicyUpdated, }), - resource.TestCheckResourceAttr(resourceName, "policy_type_name", tfelb.SSLNegotiationPolicyType), + resource.TestCheckResourceAttr(resourceName, "policy_type_name", "SSLNegotiationPolicyType"), ), }, }, @@ -209,7 +209,7 @@ func TestAccELBPolicy_SSLSecurityPolicy_predefined(t *testing.T) { func TestAccELBPolicy_updateWhileAssigned(t *testing.T) { ctx := acctest.Context(t) - var policy elb.PolicyDescription + var policy awstypes.PolicyDescription resourceName := "aws_load_balancer_policy.test-policy" rInt := sdkacctest.RandInt() @@ -235,24 +235,19 @@ func TestAccELBPolicy_updateWhileAssigned(t *testing.T) { }) } -func testAccCheckPolicyExists(ctx context.Context, n string, v *elb.PolicyDescription) resource.TestCheckFunc { +func testAccCheckPolicyExists(ctx context.Context, n string, v *awstypes.PolicyDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ELB Classic Load Balancer Policy is set") - } - lbName, policyName, err := tfelb.PolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) output, err := tfelb.FindLoadBalancerPolicyByTwoPartKey(ctx, conn, lbName, policyName) @@ -268,7 +263,7 @@ func testAccCheckPolicyExists(ctx context.Context, n string, v *elb.PolicyDescri func testAccCheckPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_load_balancer_policy" { @@ -276,7 +271,6 @@ func testAccCheckPolicyDestroy(ctx context.Context) resource.TestCheckFunc { } lbName, policyName, err := tfelb.PolicyParseResourceID(rs.Primary.ID) - if err != nil { return err } diff --git a/internal/service/elb/proxy_protocol_policy.go b/internal/service/elb/proxy_protocol_policy.go index 91aab9a4223..7de74b0f3db 100644 --- a/internal/service/elb/proxy_protocol_policy.go +++ b/internal/service/elb/proxy_protocol_policy.go @@ -7,20 +7,24 @@ import ( "context" "fmt" "log" - "strconv" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -// @SDKResource("aws_proxy_protocol_policy") -func ResourceProxyProtocolPolicy() *schema.Resource { +// @SDKResource("aws_proxy_protocol_policy", name="Proxy Protocol Policy") +func resourceProxyProtocolPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceProxyProtocolPolicyCreate, ReadWithoutTimeout: resourceProxyProtocolPolicyRead, @@ -28,16 +32,17 @@ func ResourceProxyProtocolPolicy() *schema.Resource { DeleteWithoutTimeout: resourceProxyProtocolPolicyDelete, Schema: map[string]*schema.Schema{ - "load_balancer": { - Type: schema.TypeString, - Required: true, - }, - "instance_ports": { Type: schema.TypeSet, - Elem: &schema.Schema{Type: schema.TypeString}, Required: true, - Set: schema.HashString, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.StringIsInt32, + }, + }, + "load_balancer": { + Type: schema.TypeString, + Required: true, }, }, } @@ -45,12 +50,12 @@ func ResourceProxyProtocolPolicy() *schema.Resource { func resourceProxyProtocolPolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - elbname := aws.String(d.Get("load_balancer").(string)) + conn := meta.(*conns.AWSClient).ELBClient(ctx) - input := &elb.CreateLoadBalancerPolicyInput{ - LoadBalancerName: elbname, - PolicyAttributes: []*elb.PolicyAttribute{ + lbName := d.Get("load_balancer").(string) + input := &elasticloadbalancing.CreateLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(lbName), + PolicyAttributes: []awstypes.PolicyAttribute{ { AttributeName: aws.String("ProxyProtocol"), AttributeValue: aws.String("True"), @@ -60,92 +65,78 @@ func resourceProxyProtocolPolicyCreate(ctx context.Context, d *schema.ResourceDa PolicyTypeName: aws.String("ProxyProtocolPolicyType"), } - // Create a policy - log.Printf("[DEBUG] ELB create a policy %s from policy type %s", - *input.PolicyName, *input.PolicyTypeName) + _, err := conn.CreateLoadBalancerPolicy(ctx, input) - if _, err := conn.CreateLoadBalancerPolicyWithContext(ctx, input); err != nil { - return sdkdiag.AppendErrorf(diags, "creating a policy %s: %s", aws.StringValue(input.PolicyName), err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating ELB Classic Proxy Protocol Policy (%s): %s", lbName, err) } - d.SetId(fmt.Sprintf("%s:%s", *elbname, *input.PolicyName)) + d.SetId(proxyProtocolPolicyCreateResourceID(lbName, aws.ToString(input.PolicyName))) return append(diags, resourceProxyProtocolPolicyUpdate(ctx, d, meta)...) } func resourceProxyProtocolPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - elbname := d.Get("load_balancer").(string) + conn := meta.(*conns.AWSClient).ELBClient(ctx) - // Retrieve the current ELB policies for updating the state - req := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{aws.String(elbname)}, - } - resp, err := conn.DescribeLoadBalancersWithContext(ctx, req) + lbName, _, err := proxyProtocolPolicyParseResourceID(d.Id()) if err != nil { - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, elb.ErrCodeAccessPointNotFoundException) { - log.Printf("[WARN] ELB Classic Proxy Protocol Policy (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } - return sdkdiag.AppendErrorf(diags, "retrieving ELB attributes: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) + lb, err := findLoadBalancerByName(ctx, conn, lbName) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] ELB Classic Proxy Protocol Policy (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } - ports := []*string{} - for ip := range backends { - ipstr := strconv.Itoa(int(ip)) - ports = append(ports, &ipstr) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ELB Classic Load Balancer (%s): %s", lbName, err) } + + ports := tfslices.ApplyToAll(tfmaps.Keys(flattenBackendServerDescriptionPolicies(lb.BackendServerDescriptions)), flex.Int32ValueToStringValue) d.Set("instance_ports", ports) - d.Set("load_balancer", elbname) + d.Set("load_balancer", lbName) + return diags } func resourceProxyProtocolPolicyUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - elbname := aws.String(d.Get("load_balancer").(string)) + conn := meta.(*conns.AWSClient).ELBClient(ctx) - // Retrieve the current ELB policies for updating the state - req := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{elbname}, + lbName, policyName, err := proxyProtocolPolicyParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) } - resp, err := conn.DescribeLoadBalancersWithContext(ctx, req) + + lb, err := findLoadBalancerByName(ctx, conn, lbName) + if err != nil { - return sdkdiag.AppendErrorf(diags, "retrieving ELB attributes: %s", err) + return sdkdiag.AppendErrorf(diags, "reading ELB Classic Load Balancer (%s): %s", lbName, err) } - backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) - policyName := resourceProxyProtocolPolicyParseID(d.Id()) + backendPolicies := flattenBackendServerDescriptionPolicies(lb.BackendServerDescriptions) if d.HasChange("instance_ports") { o, n := d.GetChange("instance_ports") - os := o.(*schema.Set) - ns := n.(*schema.Set) - remove := os.Difference(ns).List() - add := ns.Difference(os).List() + os, ns := o.(*schema.Set), n.(*schema.Set) + add, del := ns.Difference(os), os.Difference(ns) - inputs := []*elb.SetLoadBalancerPoliciesForBackendServerInput{} + var inputs []*elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput + inputs = append(inputs, expandRemoveProxyProtocolPolicyInputs(policyName, flex.ExpandStringValueSet(del), backendPolicies)...) + inputs = append(inputs, expandAddProxyProtocolPolicyInputs(policyName, flex.ExpandStringValueSet(add), backendPolicies)...) - i, err := resourceProxyProtocolPolicyRemove(policyName, remove, backends) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ELB Classic Proxy Protocol Policy (%s): %s", d.Id(), err) - } - inputs = append(inputs, i...) + for _, input := range inputs { + input.LoadBalancerName = aws.String(lbName) - i, err = resourceProxyProtocolPolicyAdd(policyName, add, backends) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating ELB Classic Proxy Protocol Policy (%s): %s", d.Id(), err) - } - inputs = append(inputs, i...) + _, err := conn.SetLoadBalancerPoliciesForBackendServer(ctx, input) - for _, input := range inputs { - input.LoadBalancerName = elbname - if _, err := conn.SetLoadBalancerPoliciesForBackendServerWithContext(ctx, input); err != nil { - return sdkdiag.AppendErrorf(diags, "setting policy for backend: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting ELB Classic Backend Server Policy (%s): %s", lbName, err) } } } @@ -155,110 +146,119 @@ func resourceProxyProtocolPolicyUpdate(ctx context.Context, d *schema.ResourceDa func resourceProxyProtocolPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBConn(ctx) - elbname := aws.String(d.Get("load_balancer").(string)) + conn := meta.(*conns.AWSClient).ELBClient(ctx) - // Retrieve the current ELB policies for updating the state - req := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{elbname}, - } - resp, err := conn.DescribeLoadBalancersWithContext(ctx, req) + lbName, policyName, err := proxyProtocolPolicyParseResourceID(d.Id()) if err != nil { - if tfawserr.ErrCodeEquals(err, elb.ErrCodeAccessPointNotFoundException) { - return diags - } - return sdkdiag.AppendErrorf(diags, "retrieving ELB attributes: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - backends := flattenBackendPolicies(resp.LoadBalancerDescriptions[0].BackendServerDescriptions) - ports := d.Get("instance_ports").(*schema.Set).List() - policyName := resourceProxyProtocolPolicyParseID(d.Id()) + lb, err := findLoadBalancerByName(ctx, conn, lbName) + + if tfresource.NotFound(err) { + return diags + } - inputs, err := resourceProxyProtocolPolicyRemove(policyName, ports, backends) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting ELB Classic Proxy Protocol Policy (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading ELB Classic Load Balancer (%s): %s", lbName, err) } - for _, input := range inputs { - input.LoadBalancerName = elbname - if _, err := conn.SetLoadBalancerPoliciesForBackendServerWithContext(ctx, input); err != nil { - return sdkdiag.AppendErrorf(diags, "setting policy for backend: %s", err) + + backendPolicies := flattenBackendServerDescriptionPolicies(lb.BackendServerDescriptions) + ports := flex.ExpandStringValueSet(d.Get("instance_ports").(*schema.Set)) + + for _, input := range expandRemoveProxyProtocolPolicyInputs(policyName, ports, backendPolicies) { + input.LoadBalancerName = aws.String(lbName) + + _, err := conn.SetLoadBalancerPoliciesForBackendServer(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting ELB Classic Backend Server Policy (%s): %s", lbName, err) } } - pOpt := &elb.DeleteLoadBalancerPolicyInput{ - LoadBalancerName: elbname, + _, err = conn.DeleteLoadBalancerPolicy(ctx, &elasticloadbalancing.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(lbName), PolicyName: aws.String(policyName), - } - if _, err := conn.DeleteLoadBalancerPolicyWithContext(ctx, pOpt); err != nil { - return sdkdiag.AppendErrorf(diags, "removing a policy from load balancer: %s", err) + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting ELB Classic Proxy Protocol Policy (%s): %s", lbName, err) } return diags } -func resourceProxyProtocolPolicyRemove(policyName string, ports []interface{}, backends map[int64][]string) ([]*elb.SetLoadBalancerPoliciesForBackendServerInput, error) { - inputs := make([]*elb.SetLoadBalancerPoliciesForBackendServerInput, 0, len(ports)) - for _, p := range ports { - ip, err := strconv.ParseInt(p.(string), 10, 64) - if err != nil { - return nil, fmt.Errorf("detaching the policy: %s", err) - } +func expandAddProxyProtocolPolicyInputs(policyName string, ports []string, backendPolicies map[int32][]string) []*elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput { + apiObjects := make([]*elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput, 0, len(ports)) - newPolicies := []*string{} - curPolicies, found := backends[ip] - if !found { - // No policy for this instance port found, just skip it. - continue - } + for _, p := range ports { + port := flex.StringValueToInt32Value(p) + newPolicies := []string{} + curPolicies := backendPolicies[port] for _, p := range curPolicies { if p == policyName { - // remove the policy + // Just remove it for now. It will be back later. continue } - newPolicies = append(newPolicies, aws.String(p)) + + newPolicies = append(newPolicies, p) } + newPolicies = append(newPolicies, policyName) - inputs = append(inputs, &elb.SetLoadBalancerPoliciesForBackendServerInput{ - InstancePort: &ip, + apiObjects = append(apiObjects, &elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: aws.Int32(port), PolicyNames: newPolicies, }) } - return inputs, nil + return apiObjects } -func resourceProxyProtocolPolicyAdd(policyName string, ports []interface{}, backends map[int64][]string) ([]*elb.SetLoadBalancerPoliciesForBackendServerInput, error) { - inputs := make([]*elb.SetLoadBalancerPoliciesForBackendServerInput, 0, len(ports)) +func expandRemoveProxyProtocolPolicyInputs(policyName string, ports []string, backendPolicies map[int32][]string) []*elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput { + apiObjects := make([]*elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput, 0, len(ports)) + for _, p := range ports { - ip, err := strconv.ParseInt(p.(string), 10, 64) - if err != nil { - return nil, fmt.Errorf("attaching the policy: %s", err) + port := flex.StringValueToInt32Value(p) + + newPolicies := []string{} + curPolicies, found := backendPolicies[port] + if !found { + // No policy for this instance port found, just skip it. + continue } - newPolicies := []*string{} - curPolicies := backends[ip] for _, p := range curPolicies { if p == policyName { - // Just remove it for now. It will be back later. + // remove the policy continue } - newPolicies = append(newPolicies, aws.String(p)) + newPolicies = append(newPolicies, p) } - newPolicies = append(newPolicies, aws.String(policyName)) - inputs = append(inputs, &elb.SetLoadBalancerPoliciesForBackendServerInput{ - InstancePort: &ip, + apiObjects = append(apiObjects, &elasticloadbalancing.SetLoadBalancerPoliciesForBackendServerInput{ + InstancePort: aws.Int32(port), PolicyNames: newPolicies, }) } - return inputs, nil + + return apiObjects +} + +const proxyProtocolPolicyResourceIDSeparator = ":" + +func proxyProtocolPolicyCreateResourceID(lbName, policyName string) string { + parts := []string{lbName, policyName} + id := strings.Join(parts, proxyProtocolPolicyResourceIDSeparator) + + return id } -// resourceProxyProtocolPolicyParseID takes an ID and parses it into -// it's constituent parts. You need two axes (LB name, policy name) -// to create or identify a proxy protocol policy in AWS's API. -func resourceProxyProtocolPolicyParseID(id string) string { - parts := strings.SplitN(id, ":", 2) - // We currently omit the ELB name as it is not currently used anywhere - return parts[1] +func proxyProtocolPolicyParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, proxyProtocolPolicyResourceIDSeparator, 2) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected LBNAME%[2]sPOLICYNAME", id, proxyProtocolPolicyResourceIDSeparator) } diff --git a/internal/service/elb/proxy_protocol_policy_test.go b/internal/service/elb/proxy_protocol_policy_test.go index f996a058df4..4ad7fd71e24 100644 --- a/internal/service/elb/proxy_protocol_policy_test.go +++ b/internal/service/elb/proxy_protocol_policy_test.go @@ -4,85 +4,51 @@ package elb_test import ( - "context" "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" - "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccELBProxyProtocolPolicy_basic(t *testing.T) { ctx := acctest.Context(t) lbName := fmt.Sprintf("tf-test-lb-%s", sdkacctest.RandString(5)) + resourceName := "aws_proxy_protocol_policy.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.ELBServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckProxyProtocolPolicyDestroy(ctx), + CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { Config: testAccProxyProtocolPolicyConfig_basic(lbName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "load_balancer", lbName), - resource.TestCheckResourceAttr( - "aws_proxy_protocol_policy.smtp", "instance_ports.#", acctest.Ct1), - resource.TestCheckTypeSetElemAttr("aws_proxy_protocol_policy.smtp", "instance_ports.*", "25"), + resource.TestCheckResourceAttr(resourceName, "load_balancer", lbName), + resource.TestCheckResourceAttr(resourceName, "instance_ports.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttr(resourceName, "instance_ports.*", "25"), ), }, { Config: testAccProxyProtocolPolicyConfig_update(lbName), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("aws_proxy_protocol_policy.smtp", "load_balancer", lbName), - resource.TestCheckResourceAttr("aws_proxy_protocol_policy.smtp", "instance_ports.#", acctest.Ct2), - resource.TestCheckTypeSetElemAttr("aws_proxy_protocol_policy.smtp", "instance_ports.*", "25"), - resource.TestCheckTypeSetElemAttr("aws_proxy_protocol_policy.smtp", "instance_ports.*", "587"), + resource.TestCheckResourceAttr(resourceName, "load_balancer", lbName), + resource.TestCheckResourceAttr(resourceName, "instance_ports.#", acctest.Ct2), + resource.TestCheckTypeSetElemAttr(resourceName, "instance_ports.*", "25"), + resource.TestCheckTypeSetElemAttr(resourceName, "instance_ports.*", "587"), ), }, }, }) } -func testAccCheckProxyProtocolPolicyDestroy(ctx context.Context) resource.TestCheckFunc { - return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBConn(ctx) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_placement_group" { - continue - } - - req := &elb.DescribeLoadBalancersInput{ - LoadBalancerNames: []*string{ - aws.String(rs.Primary.Attributes["load_balancer"])}, - } - _, err := conn.DescribeLoadBalancersWithContext(ctx, req) - if err != nil { - // Verify the error is what we want - if tfawserr.ErrCodeEquals(err, elb.ErrCodeAccessPointNotFoundException) { - continue - } - return err - } - - return fmt.Errorf("still exists") - } - return nil - } -} - func testAccProxyProtocolPolicyConfig_basic(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" +resource "aws_elb" "test" { + name = %[1]q availability_zones = [data.aws_availability_zones.available.names[0]] listener { @@ -100,8 +66,8 @@ resource "aws_elb" "lb" { } } -resource "aws_proxy_protocol_policy" "smtp" { - load_balancer = aws_elb.lb.name +resource "aws_proxy_protocol_policy" "test" { + load_balancer = aws_elb.test.name instance_ports = ["25"] } `, rName)) @@ -109,8 +75,8 @@ resource "aws_proxy_protocol_policy" "smtp" { func testAccProxyProtocolPolicyConfig_update(rName string) string { return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -resource "aws_elb" "lb" { - name = "%s" +resource "aws_elb" "test" { + name = %[1]q availability_zones = [data.aws_availability_zones.available.names[0]] listener { @@ -128,8 +94,8 @@ resource "aws_elb" "lb" { } } -resource "aws_proxy_protocol_policy" "smtp" { - load_balancer = aws_elb.lb.name +resource "aws_proxy_protocol_policy" "test" { + load_balancer = aws_elb.test.name instance_ports = ["25", "587"] } `, rName)) diff --git a/internal/service/elb/service_account_data_source.go b/internal/service/elb/service_account_data_source.go index 2fcbe067740..5a82eac5192 100644 --- a/internal/service/elb/service_account_data_source.go +++ b/internal/service/elb/service_account_data_source.go @@ -6,8 +6,7 @@ package elb import ( "context" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -17,68 +16,68 @@ import ( // See http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy // See https://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-access-logs.html#access-logging-bucket-permissions - -var AccountIdPerRegionMap = map[string]string{ - endpoints.AfSouth1RegionID: "098369216593", - endpoints.ApEast1RegionID: "754344448648", - endpoints.ApNortheast1RegionID: "582318560864", - endpoints.ApNortheast2RegionID: "600734575887", - endpoints.ApNortheast3RegionID: "383597477331", - endpoints.ApSouth1RegionID: "718504428378", - endpoints.ApSoutheast1RegionID: "114774131450", - endpoints.ApSoutheast2RegionID: "783225319266", - endpoints.ApSoutheast3RegionID: "589379963580", - endpoints.CaCentral1RegionID: "985666609251", - endpoints.CnNorth1RegionID: "638102146993", - endpoints.CnNorthwest1RegionID: "037604701340", - endpoints.EuCentral1RegionID: "054676820928", - endpoints.EuNorth1RegionID: "897822967062", - endpoints.EuSouth1RegionID: "635631232127", - endpoints.EuWest1RegionID: "156460612806", - endpoints.EuWest2RegionID: "652711504416", - endpoints.EuWest3RegionID: "009996457667", - // endpoints.MeCentral1RegionID: "", - endpoints.MeSouth1RegionID: "076674570225", - endpoints.SaEast1RegionID: "507241528517", - endpoints.UsEast1RegionID: "127311923021", - endpoints.UsEast2RegionID: "033677994240", - endpoints.UsGovEast1RegionID: "190560391635", - endpoints.UsGovWest1RegionID: "048591011584", - endpoints.UsWest1RegionID: "027434742980", - endpoints.UsWest2RegionID: "797873946194", +var accountIDPerRegionMap = map[string]string{ + names.AFSouth1RegionID: "098369216593", + names.APEast1RegionID: "754344448648", + names.APNortheast1RegionID: "582318560864", + names.APNortheast2RegionID: "600734575887", + names.APNortheast3RegionID: "383597477331", + names.APSouth1RegionID: "718504428378", + names.APSoutheast1RegionID: "114774131450", + names.APSoutheast2RegionID: "783225319266", + names.APSoutheast3RegionID: "589379963580", + names.CACentral1RegionID: "985666609251", + names.CNNorth1RegionID: "638102146993", + names.CNNorthwest1RegionID: "037604701340", + names.EUCentral1RegionID: "054676820928", + names.EUNorth1RegionID: "897822967062", + names.EUSouth1RegionID: "635631232127", + names.EUWest1RegionID: "156460612806", + names.EUWest2RegionID: "652711504416", + names.EUWest3RegionID: "009996457667", + // names.MECentral1RegionID: "", + names.MESouth1RegionID: "076674570225", + names.SAEast1RegionID: "507241528517", + names.USEast1RegionID: "127311923021", + names.USEast2RegionID: "033677994240", + names.USGovEast1RegionID: "190560391635", + names.USGovWest1RegionID: "048591011584", + names.USWest1RegionID: "027434742980", + names.USWest2RegionID: "797873946194", } -// @SDKDataSource("aws_elb_service_account") -func DataSourceServiceAccount() *schema.Resource { +// @SDKDataSource("aws_elb_service_account", name="Service Account") +func dataSourceServiceAccount() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceServiceAccountRead, Schema: map[string]*schema.Schema{ - names.AttrRegion: { - Type: schema.TypeString, - Optional: true, - }, names.AttrARN: { Type: schema.TypeString, Computed: true, }, + names.AttrRegion: { + Type: schema.TypeString, + Optional: true, + }, }, } } func dataSourceServiceAccountRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + region := meta.(*conns.AWSClient).Region if v, ok := d.GetOk(names.AttrRegion); ok { region = v.(string) } - if accid, ok := AccountIdPerRegionMap[region]; ok { - d.SetId(accid) + if v, ok := accountIDPerRegionMap[region]; ok { + d.SetId(v) arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, Service: "iam", - AccountID: accid, + AccountID: v, Resource: "root", }.String() d.Set(names.AttrARN, arn) @@ -86,5 +85,5 @@ func dataSourceServiceAccountRead(ctx context.Context, d *schema.ResourceData, m return diags } - return sdkdiag.AppendErrorf(diags, "Unknown region (%q)", region) + return sdkdiag.AppendErrorf(diags, "unsupported AWS Region: %s", region) } diff --git a/internal/service/elb/service_account_data_source_test.go b/internal/service/elb/service_account_data_source_test.go index ebc5f1065e0..a08dfffacaa 100644 --- a/internal/service/elb/service_account_data_source_test.go +++ b/internal/service/elb/service_account_data_source_test.go @@ -14,8 +14,7 @@ import ( func TestAccELBServiceAccountDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - expectedAccountID := tfelb.AccountIdPerRegionMap[acctest.Region()] - + expectedAccountID := tfelb.AccountIDPerRegionMap[acctest.Region()] dataSourceName := "data.aws_elb_service_account.main" resource.ParallelTest(t, resource.TestCase{ @@ -36,8 +35,7 @@ func TestAccELBServiceAccountDataSource_basic(t *testing.T) { func TestAccELBServiceAccountDataSource_region(t *testing.T) { ctx := acctest.Context(t) - expectedAccountID := tfelb.AccountIdPerRegionMap[acctest.Region()] - + expectedAccountID := tfelb.AccountIDPerRegionMap[acctest.Region()] dataSourceName := "data.aws_elb_service_account.regional" resource.ParallelTest(t, resource.TestCase{ diff --git a/internal/service/elb/service_endpoint_resolver_gen.go b/internal/service/elb/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..941c5efd895 --- /dev/null +++ b/internal/service/elb/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package elb + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + elasticloadbalancing_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ elasticloadbalancing_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver elasticloadbalancing_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: elasticloadbalancing_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params elasticloadbalancing_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up elasticloadbalancing endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*elasticloadbalancing_sdkv2.Options) { + return func(o *elasticloadbalancing_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/elb/service_endpoints_gen_test.go b/internal/service/elb/service_endpoints_gen_test.go index 39f797de522..f050e7d99d0 100644 --- a/internal/service/elb/service_endpoints_gen_test.go +++ b/internal/service/elb/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package elb_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - elb_sdkv1 "github.com/aws/aws-sdk-go/service/elb" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + elasticloadbalancing_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/go-cty/cty" @@ -88,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -271,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -292,55 +297,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := elasticloadbalancing_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(elb_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), elasticloadbalancing_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := elasticloadbalancing_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(elb_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), elasticloadbalancing_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.ELBConn(ctx) - - req, _ := client.DescribeLoadBalancersRequest(&elb_sdkv1.DescribeLoadBalancersInput{}) + client := meta.ELBClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.DescribeLoadBalancers(ctx, &elasticloadbalancing_sdkv2.DescribeLoadBalancersInput{}, + func(opts *elasticloadbalancing_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -396,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -523,6 +559,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/elb/service_package_gen.go b/internal/service/elb/service_package_gen.go index 00111dd9313..b4a6313e238 100644 --- a/internal/service/elb/service_package_gen.go +++ b/internal/service/elb/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package elb import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - elb_sdkv1 "github.com/aws/aws-sdk-go/service/elb" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + elasticloadbalancing_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -28,16 +25,19 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceLoadBalancer, + Factory: dataSourceLoadBalancer, TypeName: "aws_elb", + Name: "Classic Load Balancer", }, { - Factory: DataSourceHostedZoneID, + Factory: dataSourceHostedZoneID, TypeName: "aws_elb_hosted_zone_id", + Name: "Hosted Zone ID", }, { - Factory: DataSourceServiceAccount, + Factory: dataSourceServiceAccount, TypeName: "aws_elb_service_account", + Name: "Service Account", }, } } @@ -45,11 +45,12 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceAppCookieStickinessPolicy, + Factory: resourceAppCookieStickinessPolicy, TypeName: "aws_app_cookie_stickiness_policy", + Name: "App Cookie Stickiness Policy", }, { - Factory: ResourceLoadBalancer, + Factory: resourceLoadBalancer, TypeName: "aws_elb", Name: "Classic Load Balancer", Tags: &types.ServicePackageResourceTags{ @@ -57,32 +58,39 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceAttachment, + Factory: resourceAttachment, TypeName: "aws_elb_attachment", + Name: "Attachment", }, { - Factory: ResourceCookieStickinessPolicy, + Factory: resourceCookieStickinessPolicy, TypeName: "aws_lb_cookie_stickiness_policy", + Name: "LB Cookie Stickiness Policy", }, { - Factory: ResourceSSLNegotiationPolicy, + Factory: resourceSSLNegotiationPolicy, TypeName: "aws_lb_ssl_negotiation_policy", + Name: "SSL Negotiation Policy", }, { - Factory: ResourceBackendServerPolicy, + Factory: resourceBackendServerPolicy, TypeName: "aws_load_balancer_backend_server_policy", + Name: "Backend Server Policy", }, { - Factory: ResourceListenerPolicy, + Factory: resourceListenerPolicy, TypeName: "aws_load_balancer_listener_policy", + Name: "Listener Policy", }, { - Factory: ResourcePolicy, + Factory: resourcePolicy, TypeName: "aws_load_balancer_policy", + Name: "Load Balancer Policy", }, { - Factory: ResourceProxyProtocolPolicy, + Factory: resourceProxyProtocolPolicy, TypeName: "aws_proxy_protocol_policy", + Name: "Proxy Protocol Policy", }, } } @@ -91,25 +99,14 @@ func (p *servicePackage) ServicePackageName() string { return names.ELB } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*elb_sdkv1.ELB, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*elasticloadbalancing_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return elb_sdkv1.New(sess.Copy(&cfg)), nil + return elasticloadbalancing_sdkv2.NewFromConfig(cfg, + elasticloadbalancing_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/elb/sweep.go b/internal/service/elb/sweep.go index b36331b761a..eb8a3acdb3a 100644 --- a/internal/service/elb/sweep.go +++ b/internal/service/elb/sweep.go @@ -7,11 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -27,33 +27,30 @@ func sweepLoadBalancers(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.ELBConn(ctx) - input := &elb.DescribeLoadBalancersInput{} + conn := client.ELBClient(ctx) + input := &elasticloadbalancing.DescribeLoadBalancersInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.DescribeLoadBalancersPagesWithContext(ctx, input, func(page *elb.DescribeLoadBalancersOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := elasticloadbalancing.NewDescribeLoadBalancersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ELB Classic Load Balancer sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing ELB Classic Load Balancers (%s): %w", region, err) } for _, v := range page.LoadBalancerDescriptions { - r := ResourceLoadBalancer() + r := resourceLoadBalancer() d := r.Data(nil) - d.SetId(aws.StringValue(v.LoadBalancerName)) + d.SetId(aws.ToString(v.LoadBalancerName)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping ELB Classic Load Balancer sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing ELB Classic Load Balancers (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/elb/tags_gen.go b/internal/service/elb/tags_gen.go index 57d0b4e9a54..acc1ad09771 100644 --- a/internal/service/elb/tags_gen.go +++ b/internal/service/elb/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elb" - "github.com/aws/aws-sdk-go/service/elb/elbiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists elb service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn elbiface.ELBAPI, identifier string) (tftags.KeyValueTags, error) { - input := &elb.DescribeTagsInput{ - LoadBalancerNames: aws.StringSlice([]string{identifier}), +func listTags(ctx context.Context, conn *elasticloadbalancing.Client, identifier string, optFns ...func(*elasticloadbalancing.Options)) (tftags.KeyValueTags, error) { + input := &elasticloadbalancing.DescribeTagsInput{ + LoadBalancerNames: []string{identifier}, } - output, err := conn.DescribeTagsWithContext(ctx, input) + output, err := conn.DescribeTags(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn elbiface.ELBAPI, identifier string) (tft // ListTags lists elb service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).ELBConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).ELBClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // TagKeys returns elb service tag keys. -func TagKeys(tags tftags.KeyValueTags) []*elb.TagKeyOnly { - result := make([]*elb.TagKeyOnly, 0, len(tags)) +func TagKeys(tags tftags.KeyValueTags) []awstypes.TagKeyOnly { + result := make([]awstypes.TagKeyOnly, 0, len(tags)) for k := range tags.Map() { - tagKey := &elb.TagKeyOnly{ + tagKey := awstypes.TagKeyOnly{ Key: aws.String(k), } @@ -67,11 +67,11 @@ func TagKeys(tags tftags.KeyValueTags) []*elb.TagKeyOnly { } // Tags returns elb service tags. -func Tags(tags tftags.KeyValueTags) []*elb.Tag { - result := make([]*elb.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &elb.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -82,12 +82,12 @@ func Tags(tags tftags.KeyValueTags) []*elb.Tag { return result } -// KeyValueTags creates tftags.KeyValueTags from elb service tags. -func KeyValueTags(ctx context.Context, tags []*elb.Tag) tftags.KeyValueTags { +// KeyValueTags creates tftags.KeyValueTags from elasticloadbalancing service tags. +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -95,7 +95,7 @@ func KeyValueTags(ctx context.Context, tags []*elb.Tag) tftags.KeyValueTags { // getTagsIn returns elb service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*elb.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -106,7 +106,7 @@ func getTagsIn(ctx context.Context) []*elb.Tag { } // setTagsOut sets elb service tags in Context. -func setTagsOut(ctx context.Context, tags []*elb.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -115,7 +115,7 @@ func setTagsOut(ctx context.Context, tags []*elb.Tag) { // updateTags updates elb service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn elbiface.ELBAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *elasticloadbalancing.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*elasticloadbalancing.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -124,12 +124,12 @@ func updateTags(ctx context.Context, conn elbiface.ELBAPI, identifier string, ol removedTags := oldTags.Removed(newTags) removedTags = removedTags.IgnoreSystem(names.ELB) if len(removedTags) > 0 { - input := &elb.RemoveTagsInput{ - LoadBalancerNames: aws.StringSlice([]string{identifier}), + input := &elasticloadbalancing.RemoveTagsInput{ + LoadBalancerNames: []string{identifier}, Tags: TagKeys(removedTags), } - _, err := conn.RemoveTagsWithContext(ctx, input) + _, err := conn.RemoveTags(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -139,12 +139,12 @@ func updateTags(ctx context.Context, conn elbiface.ELBAPI, identifier string, ol updatedTags := oldTags.Updated(newTags) updatedTags = updatedTags.IgnoreSystem(names.ELB) if len(updatedTags) > 0 { - input := &elb.AddTagsInput{ - LoadBalancerNames: aws.StringSlice([]string{identifier}), + input := &elasticloadbalancing.AddTagsInput{ + LoadBalancerNames: []string{identifier}, Tags: Tags(updatedTags), } - _, err := conn.AddTagsWithContext(ctx, input) + _, err := conn.AddTags(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -157,5 +157,5 @@ func updateTags(ctx context.Context, conn elbiface.ELBAPI, identifier string, ol // UpdateTags updates elb service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).ELBConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).ELBClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/elb/validate.go b/internal/service/elb/validate.go index 6210dd5c40d..c185199c44f 100644 --- a/internal/service/elb/validate.go +++ b/internal/service/elb/validate.go @@ -9,7 +9,7 @@ import ( "github.com/YakDriver/regexache" ) -func ValidName(v interface{}, k string) (ws []string, errors []error) { +func validName(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if len(value) == 0 { return // short-circuit diff --git a/internal/service/elb/validate_test.go b/internal/service/elb/validate_test.go index 588063915ae..9bc9607d87b 100644 --- a/internal/service/elb/validate_test.go +++ b/internal/service/elb/validate_test.go @@ -4,8 +4,11 @@ package elb import ( + "fmt" + "math/rand" "testing" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -17,7 +20,7 @@ func TestValidName(t *testing.T) { } for _, s := range validNames { - _, errors := ValidName(s, names.AttrName) + _, errors := validName(s, names.AttrName) if len(errors) > 0 { t.Fatalf("%q should be a valid ELB name: %v", s, errors) } @@ -31,13 +34,68 @@ func TestValidName(t *testing.T) { } for _, s := range invalidNames { - _, errors := ValidName(s, names.AttrName) + _, errors := validName(s, names.AttrName) if len(errors) == 0 { t.Fatalf("%q should not be a valid ELB name: %v", s, errors) } } } +func TestValidLoadBalancerNameCannotBeginWithHyphen(t *testing.T) { + t.Parallel() + + var n = "-Testing123" + _, errors := validName(n, "SampleKey") + + if len(errors) != 1 { + t.Fatalf("Expected the ELB Name to trigger a validation error") + } +} + +func TestValidLoadBalancerNameCanBeAnEmptyString(t *testing.T) { + t.Parallel() + + var n = "" + _, errors := validName(n, "SampleKey") + + if len(errors) != 0 { + t.Fatalf("Expected the ELB Name to pass validation") + } +} + +func TestValidLoadBalancerNameCannotBeLongerThan32Characters(t *testing.T) { + t.Parallel() + + var n = "Testing123dddddddddddddddddddvvvv" + _, errors := validName(n, "SampleKey") + + if len(errors) != 1 { + t.Fatalf("Expected the ELB Name to trigger a validation error") + } +} + +func TestValidLoadBalancerNameCannotHaveSpecialCharacters(t *testing.T) { + t.Parallel() + + var n = "Testing123%%" + _, errors := validName(n, "SampleKey") + + if len(errors) != 1 { + t.Fatalf("Expected the ELB Name to trigger a validation error") + } +} + +func TestValidLoadBalancerNameCannotEndWithHyphen(t *testing.T) { + t.Parallel() + + var n = "Testing123-" + _, errors := validName(n, "SampleKey") + + if len(errors) != 1 { + t.Fatalf("Expected the ELB Name to trigger a validation error") + } +} + func TestValidNamePrefix(t *testing.T) { t.Parallel() @@ -65,3 +123,131 @@ func TestValidNamePrefix(t *testing.T) { } } } + +func TestValidLoadBalancerAccessLogsInterval(t *testing.T) { + t.Parallel() + + type testCases struct { + Value int + ErrCount int + } + + invalidCases := []testCases{ + { + Value: 0, + ErrCount: 1, + }, + { + Value: 10, + ErrCount: 1, + }, + { + Value: -1, + ErrCount: 1, + }, + } + + for _, tc := range invalidCases { + _, errors := validAccessLogsInterval(tc.Value, names.AttrInterval) + if len(errors) != tc.ErrCount { + t.Fatalf("Expected %q to trigger a validation error.", tc.Value) + } + } +} + +func TestValidLoadBalancerHealthCheckTarget(t *testing.T) { + t.Parallel() + + type testCase struct { + Value string + ErrCount int + } + + randomRunes := func(n int) string { + // A complete set of modern Katakana characters. + runes := []rune("アイウエオ" + + "カキクケコガギグゲゴサシスセソザジズゼゾ" + + "タチツテトダヂヅデドナニヌネノハヒフヘホ" + + "バビブベボパピプペポマミムメモヤユヨラリ" + + "ルレロワヰヱヲン") + + s := make([]rune, n) + for i := range s { + s[i] = runes[rand.Intn(len(runes))] + } + return string(s) + } + + validCases := []testCase{ + { + Value: "TCP:1234", + ErrCount: 0, + }, + { + Value: "http:80/test", + ErrCount: 0, + }, + { + Value: fmt.Sprintf("HTTP:8080/%s", randomRunes(5)), + ErrCount: 0, + }, + { + Value: "SSL:8080", + ErrCount: 0, + }, + } + + for _, tc := range validCases { + _, errors := validHeathCheckTarget(tc.Value, names.AttrTarget) + if len(errors) != tc.ErrCount { + t.Fatalf("Expected %q not to trigger a validation error.", tc.Value) + } + } + + invalidCases := []testCase{ + { + Value: "", + ErrCount: 1, + }, + { + Value: "TCP:", + ErrCount: 1, + }, + { + Value: "TCP:1234/", + ErrCount: 1, + }, + { + Value: "SSL:8080/", + ErrCount: 1, + }, + { + Value: "HTTP:8080", + ErrCount: 1, + }, + { + Value: "incorrect-value", + ErrCount: 1, + }, + { + Value: "TCP:123456", + ErrCount: 1, + }, + { + Value: "incorrect:80/", + ErrCount: 1, + }, + { + Value: fmt.Sprintf("HTTP:8080/%s%s", + sdkacctest.RandStringFromCharSet(512, sdkacctest.CharSetAlpha), randomRunes(512)), + ErrCount: 1, + }, + } + + for _, tc := range invalidCases { + _, errors := validHeathCheckTarget(tc.Value, names.AttrTarget) + if len(errors) != tc.ErrCount { + t.Fatalf("Expected %q to trigger a validation error.", tc.Value) + } + } +} diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index e9b9455766f..47396ec9773 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -6,17 +6,13 @@ package elbv2 import ( "time" - "github.com/aws/aws-sdk-go/service/elbv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/hashicorp/terraform-provider-aws/internal/enum" ) const ( - propagationTimeout = 2 * time.Minute -) - -const ( - errCodeValidationError = "ValidationError" - - tagsOnCreationErrMessage = "cannot specify tags on creation" + iamPropagationTimeout = 2 * time.Minute + elbv2PropagationTimeout = 5 * time.Minute // nosemgrep:ci.elbv2-in-const-name, ci.elbv2-in-var-name ) // See https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_LoadBalancerAttribute.html#API_LoadBalancerAttribute_Contents. @@ -203,11 +199,11 @@ const ( ) func healthCheckProtocolEnumValues() []string { - return []string{ - elbv2.ProtocolEnumHttp, - elbv2.ProtocolEnumHttps, - elbv2.ProtocolEnumTcp, - } + return enum.Slice[awstypes.ProtocolEnum]( + awstypes.ProtocolEnumHttp, + awstypes.ProtocolEnumHttps, + awstypes.ProtocolEnumTcp, + ) } const ( diff --git a/internal/service/elbv2/errors.go b/internal/service/elbv2/errors.go new file mode 100644 index 00000000000..ffcdda2ee02 --- /dev/null +++ b/internal/service/elbv2/errors.go @@ -0,0 +1,12 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package elbv2 + +const ( + errCodeValidationError = "ValidationError" +) + +const ( + tagsOnCreationErrMessage = "cannot specify tags on creation" +) diff --git a/internal/service/elbv2/exports.go b/internal/service/elbv2/exports.go new file mode 100644 index 00000000000..8b6b1c43e92 --- /dev/null +++ b/internal/service/elbv2/exports.go @@ -0,0 +1,9 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package elbv2 + +// Exports for use in other modules. +var ( + FindTargetGroupByARN = findTargetGroupByARN +) diff --git a/internal/service/elbv2/exports_test.go b/internal/service/elbv2/exports_test.go index 68c67d7103f..f8581971082 100644 --- a/internal/service/elbv2/exports_test.go +++ b/internal/service/elbv2/exports_test.go @@ -5,15 +5,29 @@ package elbv2 // Exports for use in tests only. var ( - FindListenerByARN = findListenerByARN - HealthCheckProtocolEnumValues = healthCheckProtocolEnumValues - ProtocolVersionEnumValues = protocolVersionEnumValues -) + ResourceListener = resourceListener + ResourceListenerCertificate = resourceListenerCertificate + ResourceListenerRule = resourceListenerRule + ResourceLoadBalancer = resourceLoadBalancer + ResourceTargetGroup = resourceTargetGroup + ResourceTargetGroupAttachment = resourceTargetGroupAttachment + ResourceTrustStore = resourceTrustStore + ResourceTrustStoreRevocation = resourceTrustStoreRevocation -const ( - MutualAuthenticationOff = mutualAuthenticationOff - MutualAuthenticationVerify = mutualAuthenticationVerify - MutualAuthenticationPassthrough = mutualAuthenticationPassthrough + FindListenerByARN = findListenerByARN + FindListenerCertificateByTwoPartKey = findListenerCertificateByTwoPartKey + FindListenerRuleByARN = findListenerRuleByARN + FindLoadBalancerAttributesByARN = findLoadBalancerAttributesByARN + FindLoadBalancerByARN = findLoadBalancerByARN + FindTargetHealthDescription = findTargetHealthDescription + FindTrustStoreByARN = findTrustStoreByARN + FindTrustStoreRevocationByTwoPartKey = findTrustStoreRevocationByTwoPartKey + HealthCheckProtocolEnumValues = healthCheckProtocolEnumValues + HostedZoneIDPerRegionALBMap = hostedZoneIDPerRegionALBMap + HostedZoneIDPerRegionNLBMap = hostedZoneIDPerRegionNLBMap + ListenerARNFromRuleARN = listenerARNFromRuleARN + ProtocolVersionEnumValues = protocolVersionEnumValues + SuffixFromARN = suffixFromARN ) const ( @@ -24,4 +38,8 @@ const ( AlpnPolicyNone = alpnPolicyNone LoadBalancerAttributeClientKeepAliveSeconds = loadBalancerAttributeClientKeepAliveSeconds + + MutualAuthenticationOff = mutualAuthenticationOff + MutualAuthenticationVerify = mutualAuthenticationVerify + MutualAuthenticationPassthrough = mutualAuthenticationPassthrough ) diff --git a/internal/service/elbv2/generate.go b/internal/service/elbv2/generate.go index ac0bbf62e77..05269535b1e 100644 --- a/internal/service/elbv2/generate.go +++ b/internal/service/elbv2/generate.go @@ -1,10 +1,11 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=DescribeTags -ListTagsInIDElem=ResourceArns -ListTagsInIDNeedSlice=yes -ListTagsOutTagsElem=TagDescriptions[0].Tags -ServiceTagsSlice -TagOp=AddTags -TagInIDElem=ResourceArns -TagInIDNeedSlice=yes -UntagOp=RemoveTags -UpdateTags -CreateTags -TagsFunc=tags -KeyValueTagsFunc=keyValueTags -//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=DescribeTags -ListTagsInIDElem=ResourceArns -ListTagsInIDNeedValueSlice=yes -ListTagsOutTagsElem=TagDescriptions[0].Tags -ServiceTagsSlice -TagOp=AddTags -TagInIDElem=ResourceArns -TagInIDNeedValueSlice=yes -UntagOp=RemoveTags -UpdateTags -CreateTags -TagsFunc=tagsV2 -KeyValueTagsFunc=keyValueTagsV2 -ListTagsFunc=listTagsV2 -GetTagsInFunc=getTagsInV2 -SetTagsOutFunc=setTagsOutV2 -UpdateTagsFunc=updateTagsV2 -CreateTagsFunc=createTagsV2 -AWSSDKVersion=2 -KVTValues -- tagsv2_gen.go +//go:generate go run ../../generate/listpages/main.go -AWSSDKVersion=2 -ListOps=DescribeListenerCertificates -InputPaginator=Marker -OutputPaginator=NextMarker -- list_listener_certificates_pages_gen.go +//go:generate go run ../../generate/listpages/main.go -AWSSDKVersion=2 -ListOps=DescribeRules -InputPaginator=Marker -OutputPaginator=NextMarker -- list_rules_pages_gen.go +//go:generate go run ../../generate/tags/main.go -ListTags -ListTagsOp=DescribeTags -ListTagsInIDElem=ResourceArns -ListTagsInIDNeedValueSlice=yes -ListTagsOutTagsElem=TagDescriptions[0].Tags -ServiceTagsSlice -TagOp=AddTags -TagInIDElem=ResourceArns -TagInIDNeedValueSlice=yes -UntagOp=RemoveTags -UpdateTags -CreateTags -AWSSDKVersion=2 -KVTValues //go:generate go run ../../generate/servicepackage/main.go -//go:generate go run ../../generate/tagstests/main.go +//go:generate go run ../../generate/tagstests/main.go -AWSSDKVersion=2 // ONLY generate directives and package declaration! Do not add anything else to this file. package elbv2 diff --git a/internal/service/elbv2/hosted_zone_id_data_source.go b/internal/service/elbv2/hosted_zone_id_data_source.go index 147dcb3a6b8..a4d666f13f9 100644 --- a/internal/service/elbv2/hosted_zone_id_data_source.go +++ b/internal/service/elbv2/hosted_zone_id_data_source.go @@ -6,95 +6,93 @@ package elbv2 import ( "context" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/service/elbv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // See https://docs.aws.amazon.com/general/latest/gr/elb.html#elb_region - -var HostedZoneIdPerRegionALBMap = map[string]string{ - endpoints.AfSouth1RegionID: "Z268VQBMOI5EKX", - endpoints.ApEast1RegionID: "Z3DQVH9N71FHZ0", - endpoints.ApNortheast1RegionID: "Z14GRHDCWA56QT", - endpoints.ApNortheast2RegionID: "ZWKZPGTI48KDX", - endpoints.ApNortheast3RegionID: "Z5LXEXXYW11ES", - endpoints.ApSouth1RegionID: "ZP97RAFLXTNZK", - endpoints.ApSouth2RegionID: "Z0173938T07WNTVAEPZN", - endpoints.ApSoutheast1RegionID: "Z1LMS91P8CMLE5", - endpoints.ApSoutheast2RegionID: "Z1GM3OXH4ZPM65", - endpoints.ApSoutheast3RegionID: "Z08888821HLRG5A9ZRTER", - endpoints.ApSoutheast4RegionID: "Z09517862IB2WZLPXG76F", - endpoints.CaCentral1RegionID: "ZQSVJUPU6J1EY", - endpoints.CaWest1RegionID: "Z06473681N0SF6OS049SD", - endpoints.CnNorth1RegionID: "Z1GDH35T77C1KE", - endpoints.CnNorthwest1RegionID: "ZM7IZAIOVVDZF", - endpoints.EuCentral1RegionID: "Z215JYRZR1TBD5", - endpoints.EuCentral2RegionID: "Z06391101F2ZOEP8P5EB3", - endpoints.EuNorth1RegionID: "Z23TAZ6LKFMNIO", - endpoints.EuSouth1RegionID: "Z3ULH7SSC9OV64", - endpoints.EuSouth2RegionID: "Z0956581394HF5D5LXGAP", - endpoints.EuWest1RegionID: "Z32O12XQLNTSW2", - endpoints.EuWest2RegionID: "ZHURV8PSTC4K8", - endpoints.EuWest3RegionID: "Z3Q77PNBQS71R4", - endpoints.IlCentral1RegionID: "Z09170902867EHPV2DABU", - endpoints.MeCentral1RegionID: "Z08230872XQRWHG2XF6I", - endpoints.MeSouth1RegionID: "ZS929ML54UICD", - endpoints.SaEast1RegionID: "Z2P70J7HTTTPLU", - endpoints.UsEast1RegionID: "Z35SXDOTRQ7X7K", - endpoints.UsEast2RegionID: "Z3AADJGX6KTTL2", - endpoints.UsGovEast1RegionID: "Z166TLBEWOO7G0", - endpoints.UsGovWest1RegionID: "Z33AYJ8TM3BH4J", - endpoints.UsWest1RegionID: "Z368ELLRRE2KJ0", - endpoints.UsWest2RegionID: "Z1H1FL5HABSF5", +var hostedZoneIDPerRegionALBMap = map[string]string{ + names.AFSouth1RegionID: "Z268VQBMOI5EKX", + names.APEast1RegionID: "Z3DQVH9N71FHZ0", + names.APNortheast1RegionID: "Z14GRHDCWA56QT", + names.APNortheast2RegionID: "ZWKZPGTI48KDX", + names.APNortheast3RegionID: "Z5LXEXXYW11ES", + names.APSouth1RegionID: "ZP97RAFLXTNZK", + names.APSouth2RegionID: "Z0173938T07WNTVAEPZN", + names.APSoutheast1RegionID: "Z1LMS91P8CMLE5", + names.APSoutheast2RegionID: "Z1GM3OXH4ZPM65", + names.APSoutheast3RegionID: "Z08888821HLRG5A9ZRTER", + names.APSoutheast4RegionID: "Z09517862IB2WZLPXG76F", + names.CACentral1RegionID: "ZQSVJUPU6J1EY", + names.CAWest1RegionID: "Z06473681N0SF6OS049SD", + names.CNNorth1RegionID: "Z1GDH35T77C1KE", + names.CNNorthwest1RegionID: "ZM7IZAIOVVDZF", + names.EUCentral1RegionID: "Z215JYRZR1TBD5", + names.EUCentral2RegionID: "Z06391101F2ZOEP8P5EB3", + names.EUNorth1RegionID: "Z23TAZ6LKFMNIO", + names.EUSouth1RegionID: "Z3ULH7SSC9OV64", + names.EUSouth2RegionID: "Z0956581394HF5D5LXGAP", + names.EUWest1RegionID: "Z32O12XQLNTSW2", + names.EUWest2RegionID: "ZHURV8PSTC4K8", + names.EUWest3RegionID: "Z3Q77PNBQS71R4", + names.ILCentral1RegionID: "Z09170902867EHPV2DABU", + names.MECentral1RegionID: "Z08230872XQRWHG2XF6I", + names.MESouth1RegionID: "ZS929ML54UICD", + names.SAEast1RegionID: "Z2P70J7HTTTPLU", + names.USEast1RegionID: "Z35SXDOTRQ7X7K", + names.USEast2RegionID: "Z3AADJGX6KTTL2", + names.USGovEast1RegionID: "Z166TLBEWOO7G0", + names.USGovWest1RegionID: "Z33AYJ8TM3BH4J", + names.USWest1RegionID: "Z368ELLRRE2KJ0", + names.USWest2RegionID: "Z1H1FL5HABSF5", } // See https://docs.aws.amazon.com/general/latest/gr/elb.html#elb_region - -var HostedZoneIdPerRegionNLBMap = map[string]string{ - endpoints.AfSouth1RegionID: "Z203XCE67M25HM", - endpoints.ApEast1RegionID: "Z12Y7K3UBGUAD1", - endpoints.ApNortheast1RegionID: "Z31USIVHYNEOWT", - endpoints.ApNortheast2RegionID: "ZIBE1TIR4HY56", - endpoints.ApNortheast3RegionID: "Z1GWIQ4HH19I5X", - endpoints.ApSouth1RegionID: "ZVDDRBQ08TROA", - endpoints.ApSouth2RegionID: "Z0711778386UTO08407HT", - endpoints.ApSoutheast1RegionID: "ZKVM4W9LS7TM", - endpoints.ApSoutheast2RegionID: "ZCT6FZBF4DROD", - endpoints.ApSoutheast3RegionID: "Z01971771FYVNCOVWJU1G", - endpoints.ApSoutheast4RegionID: "Z01156963G8MIIL7X90IV", - endpoints.CaCentral1RegionID: "Z2EPGBW3API2WT", - endpoints.CaWest1RegionID: "Z02754302KBB00W2LKWZ9", - endpoints.CnNorth1RegionID: "Z3QFB96KMJ7ED6", - endpoints.CnNorthwest1RegionID: "ZQEIKTCZ8352D", - endpoints.EuCentral1RegionID: "Z3F0SRJ5LGBH90", - endpoints.EuCentral2RegionID: "Z02239872DOALSIDCX66S", - endpoints.EuNorth1RegionID: "Z1UDT6IFJ4EJM", - endpoints.EuSouth1RegionID: "Z23146JA1KNAFP", - endpoints.EuSouth2RegionID: "Z1011216NVTVYADP1SSV", - endpoints.EuWest1RegionID: "Z2IFOLAFXWLO4F", - endpoints.EuWest2RegionID: "ZD4D7Y8KGAS4G", - endpoints.EuWest3RegionID: "Z1CMS0P5QUZ6D5", - endpoints.IlCentral1RegionID: "Z0313266YDI6ZRHTGQY4", - endpoints.MeCentral1RegionID: "Z00282643NTTLPANJJG2P", - endpoints.MeSouth1RegionID: "Z3QSRYVP46NYYV", - endpoints.SaEast1RegionID: "ZTK26PT1VY4CU", - endpoints.UsEast1RegionID: "Z26RNL4JYFTOTI", - endpoints.UsEast2RegionID: "ZLMOA37VPKANP", - endpoints.UsGovEast1RegionID: "Z1ZSMQQ6Q24QQ8", - endpoints.UsGovWest1RegionID: "ZMG1MZ2THAWF1", - endpoints.UsWest1RegionID: "Z24FKFUX50B4VW", - endpoints.UsWest2RegionID: "Z18D5FSROUN65G", +var hostedZoneIDPerRegionNLBMap = map[string]string{ + names.AFSouth1RegionID: "Z203XCE67M25HM", + names.APEast1RegionID: "Z12Y7K3UBGUAD1", + names.APNortheast1RegionID: "Z31USIVHYNEOWT", + names.APNortheast2RegionID: "ZIBE1TIR4HY56", + names.APNortheast3RegionID: "Z1GWIQ4HH19I5X", + names.APSouth1RegionID: "ZVDDRBQ08TROA", + names.APSouth2RegionID: "Z0711778386UTO08407HT", + names.APSoutheast1RegionID: "ZKVM4W9LS7TM", + names.APSoutheast2RegionID: "ZCT6FZBF4DROD", + names.APSoutheast3RegionID: "Z01971771FYVNCOVWJU1G", + names.APSoutheast4RegionID: "Z01156963G8MIIL7X90IV", + names.CACentral1RegionID: "Z2EPGBW3API2WT", + names.CAWest1RegionID: "Z02754302KBB00W2LKWZ9", + names.CNNorth1RegionID: "Z3QFB96KMJ7ED6", + names.CNNorthwest1RegionID: "ZQEIKTCZ8352D", + names.EUCentral1RegionID: "Z3F0SRJ5LGBH90", + names.EUCentral2RegionID: "Z02239872DOALSIDCX66S", + names.EUNorth1RegionID: "Z1UDT6IFJ4EJM", + names.EUSouth1RegionID: "Z23146JA1KNAFP", + names.EUSouth2RegionID: "Z1011216NVTVYADP1SSV", + names.EUWest1RegionID: "Z2IFOLAFXWLO4F", + names.EUWest2RegionID: "ZD4D7Y8KGAS4G", + names.EUWest3RegionID: "Z1CMS0P5QUZ6D5", + names.ILCentral1RegionID: "Z0313266YDI6ZRHTGQY4", + names.MECentral1RegionID: "Z00282643NTTLPANJJG2P", + names.MESouth1RegionID: "Z3QSRYVP46NYYV", + names.SAEast1RegionID: "ZTK26PT1VY4CU", + names.USEast1RegionID: "Z26RNL4JYFTOTI", + names.USEast2RegionID: "ZLMOA37VPKANP", + names.USGovEast1RegionID: "Z1ZSMQQ6Q24QQ8", + names.USGovWest1RegionID: "ZMG1MZ2THAWF1", + names.USWest1RegionID: "Z24FKFUX50B4VW", + names.USWest2RegionID: "Z18D5FSROUN65G", } -// @SDKDataSource("aws_lb_hosted_zone_id") -func DataSourceHostedZoneID() *schema.Resource { +// @SDKDataSource("aws_lb_hosted_zone_id", name="Hosted Zone ID") +func dataSourceHostedZoneID() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceHostedZoneIDRead, @@ -107,8 +105,8 @@ func DataSourceHostedZoneID() *schema.Resource { "load_balancer_type": { Type: schema.TypeString, Optional: true, - Default: elbv2.LoadBalancerTypeEnumApplication, - ValidateFunc: validation.StringInSlice([]string{elbv2.LoadBalancerTypeEnumApplication, elbv2.LoadBalancerTypeEnumNetwork}, false), + Default: awstypes.LoadBalancerTypeEnumApplication, + ValidateFunc: validation.StringInSlice(enum.Slice[awstypes.LoadBalancerTypeEnum](awstypes.LoadBalancerTypeEnumApplication, awstypes.LoadBalancerTypeEnumNetwork), false), }, }, } @@ -116,25 +114,27 @@ func DataSourceHostedZoneID() *schema.Resource { func dataSourceHostedZoneIDRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + region := meta.(*conns.AWSClient).Region if v, ok := d.GetOk(names.AttrRegion); ok { region = v.(string) } - lbType := elbv2.LoadBalancerTypeEnumApplication + lbType := awstypes.LoadBalancerTypeEnumApplication if v, ok := d.GetOk("load_balancer_type"); ok { - lbType = v.(string) + lbType = awstypes.LoadBalancerTypeEnum(v.(string)) } - if lbType == elbv2.LoadBalancerTypeEnumApplication { - if zoneId, ok := HostedZoneIdPerRegionALBMap[region]; ok { - d.SetId(zoneId) + switch lbType { + case awstypes.LoadBalancerTypeEnumApplication: + if v, ok := hostedZoneIDPerRegionALBMap[region]; ok { + d.SetId(v) } else { return sdkdiag.AppendErrorf(diags, "unsupported AWS Region: %s", region) } - } else if lbType == elbv2.LoadBalancerTypeEnumNetwork { - if zoneId, ok := HostedZoneIdPerRegionNLBMap[region]; ok { - d.SetId(zoneId) + case awstypes.LoadBalancerTypeEnumNetwork: + if v, ok := hostedZoneIDPerRegionNLBMap[region]; ok { + d.SetId(v) } else { return sdkdiag.AppendErrorf(diags, "unsupported AWS Region: %s", region) } diff --git a/internal/service/elbv2/hosted_zone_id_data_source_test.go b/internal/service/elbv2/hosted_zone_id_data_source_test.go index 7de885682ea..7d4439743b4 100644 --- a/internal/service/elbv2/hosted_zone_id_data_source_test.go +++ b/internal/service/elbv2/hosted_zone_id_data_source_test.go @@ -22,7 +22,7 @@ func TestAccELBV2HostedZoneIDDataSource_basic(t *testing.T) { { Config: testAccHostedZoneIDDataSourceConfig_basic, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_lb_hosted_zone_id.main", names.AttrID, tfelbv2.HostedZoneIdPerRegionALBMap[acctest.Region()]), + resource.TestCheckResourceAttr("data.aws_lb_hosted_zone_id.main", names.AttrID, tfelbv2.HostedZoneIDPerRegionALBMap[acctest.Region()]), ), }, { @@ -34,7 +34,7 @@ func TestAccELBV2HostedZoneIDDataSource_basic(t *testing.T) { { Config: testAccHostedZoneIDDataSourceConfig_explicitNetwork, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_lb_hosted_zone_id.network", names.AttrID, tfelbv2.HostedZoneIdPerRegionNLBMap[acctest.Region()]), + resource.TestCheckResourceAttr("data.aws_lb_hosted_zone_id.network", names.AttrID, tfelbv2.HostedZoneIDPerRegionNLBMap[acctest.Region()]), ), }, { diff --git a/internal/service/elbv2/id.go b/internal/service/elbv2/id.go deleted file mode 100644 index 74338265616..00000000000 --- a/internal/service/elbv2/id.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package elbv2 - -import ( - "fmt" - "strings" -) - -const listenerCertificateIDSeparator = "_" - -func listenerCertificateParseID(id string) (string, string, error) { - parts := strings.SplitN(id, listenerCertificateIDSeparator, 2) - if len(parts) == 2 && parts[0] != "" && parts[1] != "" { - return parts[0], parts[1], nil - } - - return "", "", - fmt.Errorf("unexpected format for ID (%q), expected listener-arn"+listenerCertificateIDSeparator+ - "certificate-arn", id) -} - -func listenerCertificateCreateID(listenerArn, certificateArn string) string { - return strings.Join([]string{listenerArn, listenerCertificateIDSeparator, certificateArn}, "") -} diff --git a/internal/service/elbv2/list_listener_certificates_pages_gen.go b/internal/service/elbv2/list_listener_certificates_pages_gen.go new file mode 100644 index 00000000000..3f84830cf75 --- /dev/null +++ b/internal/service/elbv2/list_listener_certificates_pages_gen.go @@ -0,0 +1,27 @@ +// Code generated by "internal/generate/listpages/main.go -AWSSDKVersion=2 -ListOps=DescribeListenerCertificates -InputPaginator=Marker -OutputPaginator=NextMarker -- list_listener_certificates_pages_gen.go"; DO NOT EDIT. + +package elbv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" +) + +func describeListenerCertificatesPages(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeListenerCertificatesInput, fn func(*elasticloadbalancingv2.DescribeListenerCertificatesOutput, bool) bool) error { + for { + output, err := conn.DescribeListenerCertificates(ctx, input) + if err != nil { + return err + } + + lastPage := aws.ToString(output.NextMarker) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.Marker = output.NextMarker + } + return nil +} diff --git a/internal/service/elbv2/list_rules_pages_gen.go b/internal/service/elbv2/list_rules_pages_gen.go new file mode 100644 index 00000000000..b259c54ec4c --- /dev/null +++ b/internal/service/elbv2/list_rules_pages_gen.go @@ -0,0 +1,27 @@ +// Code generated by "internal/generate/listpages/main.go -AWSSDKVersion=2 -ListOps=DescribeRules -InputPaginator=Marker -OutputPaginator=NextMarker -- list_rules_pages_gen.go"; DO NOT EDIT. + +package elbv2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" +) + +func describeRulesPages(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeRulesInput, fn func(*elasticloadbalancingv2.DescribeRulesOutput, bool) bool) error { + for { + output, err := conn.DescribeRules(ctx, input) + if err != nil { + return err + } + + lastPage := aws.ToString(output.NextMarker) == "" + if !fn(output, lastPage) || lastPage { + break + } + + input.Marker = output.NextMarker + } + return nil +} diff --git a/internal/service/elbv2/listener.go b/internal/service/elbv2/listener.go index 0f5b63d259c..3646685ddab 100644 --- a/internal/service/elbv2/listener.go +++ b/internal/service/elbv2/listener.go @@ -16,7 +16,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -40,7 +40,7 @@ import ( // @Tags(identifierAttribute="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;awstypes;awstypes.Listener") // @Testing(importIgnore="default_action.0.forward") -func ResourceListener() *schema.Resource { +func resourceListener() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceListenerCreate, ReadWithoutTimeout: resourceListenerRead, @@ -368,7 +368,6 @@ func ResourceListener() *schema.Resource { }, }, }, - names.AttrPort: { Type: schema.TypeInt, Optional: true, @@ -421,7 +420,7 @@ func resourceListenerCreate(ctx context.Context, d *schema.ResourceData, meta in lbARN := d.Get("load_balancer_arn").(string) input := &elasticloadbalancingv2.CreateListenerInput{ LoadBalancerArn: aws.String(lbARN), - Tags: getTagsInV2(ctx), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("alpn_policy"); ok { @@ -492,8 +491,8 @@ func resourceListenerCreate(ctx context.Context, d *schema.ResourceData, meta in } // For partitions not supporting tag-on-create, attempt tag after create. - if tags := getTagsInV2(ctx); input.Tags == nil && len(tags) > 0 { - err := createTagsV2(ctx, conn, d.Id(), tags) + if tags := getTagsIn(ctx); input.Tags == nil && len(tags) > 0 { + err := createTags(ctx, conn, d.Id(), tags) // If default tags only, continue. Otherwise, error. if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition, err) { @@ -663,15 +662,17 @@ func findListener(ctx context.Context, conn *elasticloadbalancingv2.Client, inpu func findListeners(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeListenersInput, filter tfslices.Predicate[*awstypes.Listener]) ([]awstypes.Listener, error) { var output []awstypes.Listener - paginator := elasticloadbalancingv2.NewDescribeListenersPaginator(conn, input) - for paginator.HasMorePages() { - page, err := paginator.NextPage(ctx) + pages := elasticloadbalancingv2.NewDescribeListenersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if errs.IsA[*awstypes.ListenerNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, } } + if err != nil { return nil, err } @@ -963,31 +964,31 @@ func expandLbListenerActionForwardConfigTargetGroupStickinessConfig(l []interfac } } -func flattenLbListenerActions(d *schema.ResourceData, attrName string, actions []awstypes.Action) []interface{} { - if len(actions) == 0 { +func flattenLbListenerActions(d *schema.ResourceData, attrName string, apiObjects []awstypes.Action) []interface{} { + if len(apiObjects) == 0 { return []interface{}{} } - var vActions []interface{} + var tfList []interface{} - for i, action := range actions { - m := map[string]interface{}{ - names.AttrType: string(action.Type), - "order": aws.ToInt32(action.Order), + for i, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + names.AttrType: apiObject.Type, + "order": aws.ToInt32(apiObject.Order), } - switch action.Type { + switch apiObject.Type { case awstypes.ActionTypeEnumForward: - flattenLbForwardAction(d, attrName, i, action, m) + flattenLbForwardAction(d, attrName, i, apiObject, tfMap) case awstypes.ActionTypeEnumRedirect: - m["redirect"] = flattenLbListenerActionRedirectConfig(action.RedirectConfig) + tfMap["redirect"] = flattenLbListenerActionRedirectConfig(apiObject.RedirectConfig) case awstypes.ActionTypeEnumFixedResponse: - m["fixed_response"] = flattenLbListenerActionFixedResponseConfig(action.FixedResponseConfig) + tfMap["fixed_response"] = flattenLbListenerActionFixedResponseConfig(apiObject.FixedResponseConfig) case awstypes.ActionTypeEnumAuthenticateCognito: - m["authenticate_cognito"] = flattenLbListenerActionAuthenticateCognitoConfig(action.AuthenticateCognitoConfig) + tfMap["authenticate_cognito"] = flattenLbListenerActionAuthenticateCognitoConfig(apiObject.AuthenticateCognitoConfig) case awstypes.ActionTypeEnumAuthenticateOidc: // The LB API currently provides no way to read the ClientSecret @@ -997,13 +998,13 @@ func flattenLbListenerActions(d *schema.ResourceData, attrName string, actions [ clientSecret = v.(string) } - m["authenticate_oidc"] = flattenAuthenticateOIDCActionConfig(action.AuthenticateOidcConfig, clientSecret) + tfMap["authenticate_oidc"] = flattenAuthenticateOIDCActionConfig(apiObject.AuthenticateOidcConfig, clientSecret) } - vActions = append(vActions, m) + tfList = append(tfList, tfMap) } - return vActions + return tfList } func flattenLbForwardAction(d *schema.ResourceData, attrName string, i int, awsAction awstypes.Action, actionMap map[string]any) { @@ -1012,17 +1013,17 @@ func flattenLbForwardAction(d *schema.ResourceData, attrName string, i int, awsA // On import, we have an empty State and empty Config if rawConfig := d.GetRawConfig(); rawConfig.IsKnown() && !rawConfig.IsNull() { - actions := rawConfig.GetAttr(attrName) - flattenLbForwardActionOneOf(actions, i, awsAction, actionMap) - return + if actions := rawConfig.GetAttr(attrName); actions.IsKnown() && !actions.IsNull() { + flattenLbForwardActionOneOf(actions, i, awsAction, actionMap) + return + } } - rawState := d.GetRawState() - defaultActions := rawState.GetAttr(attrName) - - if defaultActions.LengthInt() > 0 { - flattenLbForwardActionOneOf(defaultActions, i, awsAction, actionMap) - return + if rawState := d.GetRawState(); rawState.IsKnown() && !rawState.IsNull() { + if defaultActions := rawState.GetAttr(attrName); defaultActions.LengthInt() > 0 { + flattenLbForwardActionOneOf(defaultActions, i, awsAction, actionMap) + return + } } flattenLbForwardActionBoth(awsAction, actionMap) @@ -1073,45 +1074,45 @@ func flattenMutualAuthenticationAttributes(description *awstypes.MutualAuthentic return []interface{}{m} } -func flattenAuthenticateOIDCActionConfig(config *awstypes.AuthenticateOidcActionConfig, clientSecret string) []interface{} { - if config == nil { +func flattenAuthenticateOIDCActionConfig(apiObject *awstypes.AuthenticateOidcActionConfig, clientSecret string) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "authentication_request_extra_params": config.AuthenticationRequestExtraParams, - "authorization_endpoint": aws.ToString(config.AuthorizationEndpoint), - names.AttrClientID: aws.ToString(config.ClientId), + tfMap := map[string]interface{}{ + "authentication_request_extra_params": apiObject.AuthenticationRequestExtraParams, + "authorization_endpoint": aws.ToString(apiObject.AuthorizationEndpoint), + names.AttrClientID: aws.ToString(apiObject.ClientId), names.AttrClientSecret: clientSecret, - names.AttrIssuer: aws.ToString(config.Issuer), - "on_unauthenticated_request": string(config.OnUnauthenticatedRequest), - names.AttrScope: aws.ToString(config.Scope), - "session_cookie_name": aws.ToString(config.SessionCookieName), - "session_timeout": aws.ToInt64(config.SessionTimeout), - "token_endpoint": aws.ToString(config.TokenEndpoint), - "user_info_endpoint": aws.ToString(config.UserInfoEndpoint), + names.AttrIssuer: aws.ToString(apiObject.Issuer), + "on_unauthenticated_request": apiObject.OnUnauthenticatedRequest, + names.AttrScope: aws.ToString(apiObject.Scope), + "session_cookie_name": aws.ToString(apiObject.SessionCookieName), + "session_timeout": aws.ToInt64(apiObject.SessionTimeout), + "token_endpoint": aws.ToString(apiObject.TokenEndpoint), + "user_info_endpoint": aws.ToString(apiObject.UserInfoEndpoint), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenLbListenerActionAuthenticateCognitoConfig(config *awstypes.AuthenticateCognitoActionConfig) []interface{} { - if config == nil { +func flattenLbListenerActionAuthenticateCognitoConfig(apiObject *awstypes.AuthenticateCognitoActionConfig) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "authentication_request_extra_params": config.AuthenticationRequestExtraParams, - "on_unauthenticated_request": string(config.OnUnauthenticatedRequest), - names.AttrScope: aws.ToString(config.Scope), - "session_cookie_name": aws.ToString(config.SessionCookieName), - "session_timeout": aws.ToInt64(config.SessionTimeout), - "user_pool_arn": aws.ToString(config.UserPoolArn), - "user_pool_client_id": aws.ToString(config.UserPoolClientId), - "user_pool_domain": aws.ToString(config.UserPoolDomain), + tfMap := map[string]interface{}{ + "authentication_request_extra_params": apiObject.AuthenticationRequestExtraParams, + "on_unauthenticated_request": apiObject.OnUnauthenticatedRequest, + names.AttrScope: aws.ToString(apiObject.Scope), + "session_cookie_name": aws.ToString(apiObject.SessionCookieName), + "session_timeout": aws.ToInt64(apiObject.SessionTimeout), + "user_pool_arn": aws.ToString(apiObject.UserPoolArn), + "user_pool_client_id": aws.ToString(apiObject.UserPoolClientId), + "user_pool_domain": aws.ToString(apiObject.UserPoolDomain), } - return []interface{}{m} + return []interface{}{tfMap} } func flattenLbListenerActionFixedResponseConfig(config *awstypes.FixedResponseActionConfig) []interface{} { @@ -1173,21 +1174,21 @@ func flattenLbListenerActionForwardConfigTargetGroupStickinessConfig(config *aws return []interface{}{m} } -func flattenLbListenerActionRedirectConfig(config *awstypes.RedirectActionConfig) []interface{} { - if config == nil { +func flattenLbListenerActionRedirectConfig(apiObject *awstypes.RedirectActionConfig) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "host": aws.ToString(config.Host), - names.AttrPath: aws.ToString(config.Path), - names.AttrPort: aws.ToString(config.Port), - names.AttrProtocol: aws.ToString(config.Protocol), - "query": aws.ToString(config.Query), - names.AttrStatusCode: string(config.StatusCode), + tfMap := map[string]interface{}{ + "host": aws.ToString(apiObject.Host), + names.AttrPath: aws.ToString(apiObject.Path), + names.AttrPort: aws.ToString(apiObject.Port), + names.AttrProtocol: aws.ToString(apiObject.Protocol), + "query": aws.ToString(apiObject.Query), + names.AttrStatusCode: apiObject.StatusCode, } - return []interface{}{m} + return []interface{}{tfMap} } const ( @@ -1275,7 +1276,7 @@ func listenerActionPlantimeValidate(actionPath cty.Path, action cty.Value, diags )) } - switch awstypes.ActionTypeEnum(actionType.AsString()) { + switch actionType := awstypes.ActionTypeEnum(actionType.AsString()); actionType { case awstypes.ActionTypeEnumForward: if tga.IsNull() && (f.IsNull() || f.LengthInt() == 0) { typePath := actionPath.GetAttr(names.AttrType) @@ -1284,7 +1285,7 @@ func listenerActionPlantimeValidate(actionPath cty.Path, action cty.Value, diags fmt.Sprintf("Either %q or %q must be specified when %q is %q.", errs.PathString(actionPath.GetAttr("target_group_arn")), errs.PathString(actionPath.GetAttr("forward")), errs.PathString(typePath), - awstypes.ActionTypeEnumForward, + actionType, ), )) } @@ -1294,7 +1295,7 @@ func listenerActionPlantimeValidate(actionPath cty.Path, action cty.Value, diags *diags = append(*diags, errs.NewAttributeRequiredWhenError( actionPath.GetAttr("redirect"), actionPath.GetAttr(names.AttrType), - string(awstypes.ActionTypeEnumRedirect), + string(actionType), )) } @@ -1303,7 +1304,7 @@ func listenerActionPlantimeValidate(actionPath cty.Path, action cty.Value, diags *diags = append(*diags, errs.NewAttributeRequiredWhenError( actionPath.GetAttr("fixed_response"), actionPath.GetAttr(names.AttrType), - string(awstypes.ActionTypeEnumFixedResponse), + string(actionType), )) } @@ -1312,7 +1313,7 @@ func listenerActionPlantimeValidate(actionPath cty.Path, action cty.Value, diags *diags = append(*diags, errs.NewAttributeRequiredWhenError( actionPath.GetAttr("authenticate_cognito"), actionPath.GetAttr(names.AttrType), - string(awstypes.ActionTypeEnumAuthenticateCognito), + string(actionType), )) } @@ -1321,7 +1322,7 @@ func listenerActionPlantimeValidate(actionPath cty.Path, action cty.Value, diags *diags = append(*diags, errs.NewAttributeRequiredWhenError( actionPath.GetAttr("authenticate_oidc"), actionPath.GetAttr(names.AttrType), - string(awstypes.ActionTypeEnumAuthenticateOidc), + string(actionType), )) } } diff --git a/internal/service/elbv2/listener_certificate.go b/internal/service/elbv2/listener_certificate.go index 7cf9850ee0b..7303e575057 100644 --- a/internal/service/elbv2/listener_certificate.go +++ b/internal/service/elbv2/listener_certificate.go @@ -7,7 +7,7 @@ import ( "context" "fmt" "log" - "time" + "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" @@ -16,32 +16,34 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_alb_listener_certificate") -// @SDKResource("aws_lb_listener_certificate") -func ResourceListenerCertificate() *schema.Resource { +// @SDKResource("aws_alb_listener_certificate", name="Listener Certificate") +// @SDKResource("aws_lb_listener_certificate", name="Listener Certificate") +func resourceListenerCertificate() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceListenerCertificateCreate, ReadWithoutTimeout: resourceListenerCertificateRead, DeleteWithoutTimeout: resourceListenerCertificateDelete, + Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, Schema: map[string]*schema.Schema{ - "listener_arn": { + names.AttrCertificateARN: { Type: schema.TypeString, Required: true, ForceNew: true, ValidateFunc: verify.ValidARN, }, - names.AttrCertificateARN: { + "listener_arn": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -51,51 +53,29 @@ func ResourceListenerCertificate() *schema.Resource { } } -const ( - ResNameListenerCertificate = "Listener Certificate" - ListenerCertificateNotFound = "ListenerCertificateNotFound" -) - func resourceListenerCertificateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - listenerArn := d.Get("listener_arn").(string) - certificateArn := d.Get(names.AttrCertificateARN).(string) - - params := &elasticloadbalancingv2.AddListenerCertificatesInput{ - ListenerArn: aws.String(listenerArn), + listenerARN := d.Get("listener_arn").(string) + certificateARN := d.Get(names.AttrCertificateARN).(string) + id := listenerCertificateCreateResourceID(listenerARN, certificateARN) + input := &elasticloadbalancingv2.AddListenerCertificatesInput{ Certificates: []awstypes.Certificate{{ - CertificateArn: aws.String(certificateArn), + CertificateArn: aws.String(certificateARN), }}, + ListenerArn: aws.String(listenerARN), } - log.Printf("[DEBUG] Adding certificate: %s of listener: %s", certificateArn, listenerArn) - - err := retry.RetryContext(ctx, 1*time.Minute, func() *retry.RetryError { - _, err := conn.AddListenerCertificates(ctx, params) - - // Retry for IAM Server Certificate eventual consistency - if errs.IsA[*awstypes.CertificateNotFoundException](err) { - return retry.RetryableError(err) - } - - if err != nil { - return retry.NonRetryableError(err) - } - - return nil + _, err := tfresource.RetryWhenIsA[*awstypes.CertificateNotFoundException](ctx, iamPropagationTimeout, func() (interface{}, error) { + return conn.AddListenerCertificates(ctx, input) }) - if tfresource.TimedOut(err) { - _, err = conn.AddListenerCertificates(ctx, params) - } - if err != nil { - return create.AppendDiagError(diags, names.ELBV2, create.ErrActionCreating, ResNameListenerCertificate, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "creating ELBv2 Listener Certificate (%s): %s", id, err) } - d.SetId(listenerCertificateCreateID(listenerArn, certificateArn)) + d.SetId(id) return append(diags, resourceListenerCertificateRead(ctx, d, meta)...) } @@ -104,42 +84,27 @@ func resourceListenerCertificateRead(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - listenerArn, certificateArn, err := listenerCertificateParseID(d.Id()) + listenerARN, certificateARN, err := listenerCertificateParseResourceID(d.Id()) if err != nil { - return create.AppendDiagError(diags, names.ELBV2, create.ErrActionReading, ResNameListenerCertificate, d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - log.Printf("[DEBUG] Reading certificate: %s of listener: %s", certificateArn, listenerArn) - - err = retry.RetryContext(ctx, 1*time.Minute, func() *retry.RetryError { - err := findListenerCertificate(ctx, conn, certificateArn, listenerArn, true, nil) - if tfresource.NotFound(err) && d.IsNewResource() { - return retry.RetryableError(err) - } - - if err != nil { - return retry.NonRetryableError(err) - } - - return nil - }) - - if tfresource.TimedOut(err) { - err = findListenerCertificate(ctx, conn, certificateArn, listenerArn, true, nil) - } + _, err = tfresource.RetryWhenNewResourceNotFound(ctx, elbv2PropagationTimeout, func() (interface{}, error) { + return findListenerCertificateByTwoPartKey(ctx, conn, listenerARN, certificateARN) + }, d.IsNewResource()) if !d.IsNewResource() && tfresource.NotFound(err) { - create.LogNotFoundRemoveState(names.ELBV2, create.ErrActionReading, ResNameListenerCertificate, d.Id()) + log.Printf("[WARN] ELBv2 Listener Certificate (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return create.AppendDiagError(diags, names.ELBV2, create.ErrActionReading, ResNameListenerCertificate, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading ELBv2 Listener Certificate (%s): %s", d.Id(), err) } - d.Set(names.AttrCertificateARN, certificateArn) - d.Set("listener_arn", listenerArn) + d.Set(names.AttrCertificateARN, certificateARN) + d.Set("listener_arn", listenerARN) return diags } @@ -148,72 +113,102 @@ func resourceListenerCertificateDelete(ctx context.Context, d *schema.ResourceDa var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - certificateArn := d.Get(names.AttrCertificateARN).(string) - listenerArn := d.Get("listener_arn").(string) - - log.Printf("[DEBUG] Deleting certificate: %s of listener: %s", certificateArn, listenerArn) + listenerARN, certificateARN, err := listenerCertificateParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } - params := &elasticloadbalancingv2.RemoveListenerCertificatesInput{ - ListenerArn: aws.String(listenerArn), + log.Printf("[INFO] Deleting ELBv2 Listener Certificate: %s", d.Id()) + _, err = conn.RemoveListenerCertificates(ctx, &elasticloadbalancingv2.RemoveListenerCertificatesInput{ Certificates: []awstypes.Certificate{{ - CertificateArn: aws.String(certificateArn), + CertificateArn: aws.String(certificateARN), }}, + ListenerArn: aws.String(listenerARN), + }) + + if errs.IsA[*awstypes.CertificateNotFoundException](err) || errs.IsA[*awstypes.ListenerNotFoundException](err) { + return diags } - _, err := conn.RemoveListenerCertificates(ctx, params) - if err != nil { - if errs.IsA[*awstypes.CertificateNotFoundException](err) { - return diags - } else if errs.IsA[*awstypes.ListenerNotFoundException](err) { - return diags - } - // Even though we're not trying to remove the default certificate, AWS started returning this error around 2023-12-09 - if errs.IsAErrorMessageContains[*awstypes.OperationNotPermittedException](err, "Default certificate cannot be removed") { - return diags - } + // Even though we're not trying to remove the default certificate, AWS started returning this error around 2023-12-09. + if errs.IsAErrorMessageContains[*awstypes.OperationNotPermittedException](err, "Default certificate cannot be removed") { + return diags + } - return create.AppendDiagError(diags, names.ELBV2, create.ErrActionDeleting, ResNameListenerCertificate, d.Id(), err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting ELBv2 Listener Certificate (%s): %s", d.Id(), err) } return diags } -func findListenerCertificate(ctx context.Context, conn *elasticloadbalancingv2.Client, certificateArn, listenerArn string, skipDefault bool, nextMarker *string) error { - params := &elasticloadbalancingv2.DescribeListenerCertificatesInput{ - ListenerArn: aws.String(listenerArn), - PageSize: aws.Int32(400), - } - if nextMarker != nil { - params.Marker = nextMarker - } +const listenerCertificateResourceIDSeparator = "_" - resp, err := conn.DescribeListenerCertificates(ctx, params) - if errs.IsA[*awstypes.ListenerNotFoundException](err) { - return &retry.NotFoundError{ - LastRequest: params, - LastError: err, - } +func listenerCertificateCreateResourceID(listenerARN, certificateARN string) string { + parts := []string{listenerARN, certificateARN} + id := strings.Join(parts, listenerCertificateResourceIDSeparator) + + return id +} + +func listenerCertificateParseResourceID(id string) (string, string, error) { + parts := strings.SplitN(id, listenerCertificateResourceIDSeparator, 2) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected LISTENER_ARN%[2]sCERTIFICATE_ARN", id, listenerCertificateResourceIDSeparator) +} + +func findListenerCertificate(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeListenerCertificatesInput, filter tfslices.Predicate[*awstypes.Certificate]) (*awstypes.Certificate, error) { + output, err := findListenerCertificates(ctx, conn, input, filter) + if err != nil { - return err + return nil, err } - for _, cert := range resp.Certificates { - if skipDefault && aws.ToBool(cert.IsDefault) { - continue + return tfresource.AssertSingleValueResult(output) +} + +func findListenerCertificates(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeListenerCertificatesInput, filter tfslices.Predicate[*awstypes.Certificate]) ([]awstypes.Certificate, error) { + var output []awstypes.Certificate + + err := describeListenerCertificatesPages(ctx, conn, input, func(page *elasticloadbalancingv2.DescribeListenerCertificatesOutput, lastPage bool) bool { + if page == nil { + return !lastPage } - if aws.ToString(cert.CertificateArn) == certificateArn { - return nil + for _, v := range page.Certificates { + if filter(&v) { + output = append(output, v) + } + } + + return !lastPage + }) + + if errs.IsA[*awstypes.ListenerNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - if resp.NextMarker != nil { - return findListenerCertificate(ctx, conn, certificateArn, listenerArn, skipDefault, resp.NextMarker) + if err != nil { + return nil, err } - return &retry.NotFoundError{ - LastRequest: params, - Message: fmt.Sprintf("%s: certificate %s for listener %s not found", ListenerCertificateNotFound, certificateArn, listenerArn), + return output, nil +} + +func findListenerCertificateByTwoPartKey(ctx context.Context, conn *elasticloadbalancingv2.Client, listenerARN, certificateARN string) (*awstypes.Certificate, error) { + input := &elasticloadbalancingv2.DescribeListenerCertificatesInput{ + ListenerArn: aws.String(listenerARN), + PageSize: aws.Int32(400), } + + return findListenerCertificate(ctx, conn, input, func(v *awstypes.Certificate) bool { + return !aws.ToBool(v.IsDefault) && aws.ToString(v.CertificateArn) == certificateARN + }) } diff --git a/internal/service/elbv2/listener_certificate_test.go b/internal/service/elbv2/listener_certificate_test.go index 81ba2c5e393..06c19cf55e0 100644 --- a/internal/service/elbv2/listener_certificate_test.go +++ b/internal/service/elbv2/listener_certificate_test.go @@ -5,20 +5,16 @@ package elbv2_test import ( "context" - "errors" "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" - awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/errs" tfelbv2 "github.com/hashicorp/terraform-provider-aws/internal/service/elbv2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -40,7 +36,7 @@ func TestAccELBV2ListenerCertificate_basic(t *testing.T) { { Config: testAccListenerCertificateConfig_basic(rName, key, certificate), Check: resource.ComposeTestCheckFunc( - testAccCheckListenerCertificateExists(resourceName), + testAccCheckListenerCertificateExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, names.AttrCertificateARN, iamServerCertificateResourceName, names.AttrARN), resource.TestCheckResourceAttrPair(resourceName, "listener_arn", lbListenerResourceName, names.AttrARN), ), @@ -73,7 +69,7 @@ func TestAccELBV2ListenerCertificate_CertificateARN_underscores(t *testing.T) { { Config: testAccListenerCertificateConfig_arnUnderscores(rName, key, certificate), Check: resource.ComposeTestCheckFunc( - testAccCheckListenerCertificateExists(resourceName), + testAccCheckListenerCertificateExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, names.AttrCertificateARN, iamServerCertificateResourceName, names.AttrARN), resource.TestCheckResourceAttrPair(resourceName, "listener_arn", lbListenerResourceName, names.AttrARN), ), @@ -108,9 +104,9 @@ func TestAccELBV2ListenerCertificate_multiple(t *testing.T) { { Config: testAccListenerCertificateConfig_multiple(rName, keys, certificates), Check: resource.ComposeTestCheckFunc( - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.default"), - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.additional_1"), - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.additional_2"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.default"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.additional_1"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.additional_2"), resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.default", "listener_arn"), resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.default", names.AttrCertificateARN), resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.additional_1", "listener_arn"), @@ -127,10 +123,10 @@ func TestAccELBV2ListenerCertificate_multiple(t *testing.T) { { Config: testAccListenerCertificateConfig_multipleAddNew(rName, keys, certificates), Check: resource.ComposeTestCheckFunc( - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.default"), - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.additional_1"), - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.additional_2"), - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.additional_3"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.default"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.additional_1"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.additional_2"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.additional_3"), resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.default", "listener_arn"), resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.default", names.AttrCertificateARN), resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.additional_1", "listener_arn"), @@ -144,9 +140,9 @@ func TestAccELBV2ListenerCertificate_multiple(t *testing.T) { { Config: testAccListenerCertificateConfig_multiple(rName, keys, certificates), Check: resource.ComposeTestCheckFunc( - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.default"), - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.additional_1"), - testAccCheckListenerCertificateExists("aws_lb_listener_certificate.additional_2"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.default"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.additional_1"), + testAccCheckListenerCertificateExists(ctx, "aws_lb_listener_certificate.additional_2"), testAccCheckListenerCertificateNotExists("aws_lb_listener_certificate.additional_3"), resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.default", "listener_arn"), resource.TestCheckResourceAttrSet("aws_lb_listener_certificate.default", names.AttrCertificateARN), @@ -176,7 +172,7 @@ func TestAccELBV2ListenerCertificate_disappears(t *testing.T) { { Config: testAccListenerCertificateConfig_basic(rName, key, certificate), Check: resource.ComposeTestCheckFunc( - testAccCheckListenerCertificateExists(resourceName), + testAccCheckListenerCertificateExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelbv2.ResourceListenerCertificate(), resourceName), ), ExpectNonEmptyPlan: true, @@ -202,7 +198,7 @@ func TestAccELBV2ListenerCertificate_disappears_Listener(t *testing.T) { { Config: testAccListenerCertificateConfig_basic(rName, key, certificate), Check: resource.ComposeTestCheckFunc( - testAccCheckListenerCertificateExists(resourceName), + testAccCheckListenerCertificateExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelbv2.ResourceListener(), listenerResourceName), ), ExpectNonEmptyPlan: true, @@ -216,47 +212,39 @@ func testAccCheckListenerCertificateDestroy(ctx context.Context) resource.TestCh conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_lb_listener_certificate" { + if rs.Type != "aws_lb_listener_certificate" && rs.Type != "aws_alb_listener_certificate" { continue } - input := &elasticloadbalancingv2.DescribeListenerCertificatesInput{ - ListenerArn: aws.String(rs.Primary.Attributes["listener_arn"]), - PageSize: aws.Int32(400), + _, err := tfelbv2.FindListenerCertificateByTwoPartKey(ctx, conn, rs.Primary.Attributes["listener_arn"], rs.Primary.Attributes[names.AttrCertificateARN]) + + if tfresource.NotFound(err) { + continue } - resp, err := conn.DescribeListenerCertificates(ctx, input) if err != nil { - if errs.IsA[*awstypes.ListenerNotFoundException](err) { - return nil - } return err } - for _, cert := range resp.Certificates { - // We only care about additional certificates. - if aws.ToBool(cert.IsDefault) { - continue - } - - if aws.ToString(cert.CertificateArn) == rs.Primary.Attributes[names.AttrCertificateARN] { - return errors.New("LB listener certificate not destroyed") - } - } + return fmt.Errorf("ELBv2 Listener Certificate %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckListenerCertificateExists(name string) resource.TestCheckFunc { +func testAccCheckListenerCertificateExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { - _, ok := s.RootModule().Resources[name] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", name) + return fmt.Errorf("Not found: %s", n) } - return nil + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) + + _, err := tfelbv2.FindListenerCertificateByTwoPartKey(ctx, conn, rs.Primary.Attributes["listener_arn"], rs.Primary.Attributes[names.AttrCertificateARN]) + + return err } } diff --git a/internal/service/elbv2/listener_data_source.go b/internal/service/elbv2/listener_data_source.go index dc7221f1638..828348abfd0 100644 --- a/internal/service/elbv2/listener_data_source.go +++ b/internal/service/elbv2/listener_data_source.go @@ -23,10 +23,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_alb_listener") -// @SDKDataSource("aws_lb_listener") +// @SDKDataSource("aws_alb_listener", name="Listener") +// @SDKDataSource("aws_lb_listener", name="Listener") // @Testing(tagsTest=true) -func DataSourceListener() *schema.Resource { +func dataSourceListener() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceListenerRead, @@ -351,7 +351,7 @@ func dataSourceListenerRead(ctx context.Context, d *schema.ResourceData, meta in d.Set(names.AttrProtocol, listener.Protocol) d.Set("ssl_policy", listener.SslPolicy) - tags, err := listTagsV2(ctx, conn, d.Id()) + tags, err := listTags(ctx, conn, d.Id()) if errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition, err) { log.Printf("[WARN] Unable to list tags for ELBv2 Listener %s: %s", d.Id(), err) diff --git a/internal/service/elbv2/listener_data_source_test.go b/internal/service/elbv2/listener_data_source_test.go index f0a0c1aed53..5eb1369b277 100644 --- a/internal/service/elbv2/listener_data_source_test.go +++ b/internal/service/elbv2/listener_data_source_test.go @@ -32,6 +32,7 @@ func TestAccELBV2ListenerDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrCertificateARN, resourceName, names.AttrCertificateARN), resource.TestCheckResourceAttrPair(dataSourceName, "default_action.#", resourceName, "default_action.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "default_action.0.target_group_arn", resourceName, "default_action.0.target_group_arn"), resource.TestCheckResourceAttrPair(dataSourceName, "load_balancer_arn", resourceName, "load_balancer_arn"), resource.TestCheckResourceAttrPair(dataSourceName, "mutual_authentication.#", resourceName, "mutual_authentication.#"), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrPort, resourceName, names.AttrPort), @@ -42,6 +43,7 @@ func TestAccELBV2ListenerDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName2, names.AttrARN, dataSourceName, names.AttrARN), resource.TestCheckResourceAttrPair(dataSourceName2, names.AttrCertificateARN, dataSourceName, names.AttrCertificateARN), resource.TestCheckResourceAttrPair(dataSourceName2, "default_action.#", dataSourceName, "default_action.#"), + resource.TestCheckResourceAttrPair(dataSourceName2, "default_action.0.target_group_arn", dataSourceName, "default_action.0.target_group_arn"), resource.TestCheckResourceAttrPair(dataSourceName2, "load_balancer_arn", dataSourceName, "load_balancer_arn"), resource.TestCheckResourceAttrPair(dataSourceName2, "mutual_authentication.#", dataSourceName, "mutual_authentication.#"), resource.TestCheckResourceAttrPair(dataSourceName2, names.AttrPort, dataSourceName, names.AttrPort), diff --git a/internal/service/elbv2/listener_rule.go b/internal/service/elbv2/listener_rule.go index e66bfd96839..b1705884570 100644 --- a/internal/service/elbv2/listener_rule.go +++ b/internal/service/elbv2/listener_rule.go @@ -29,6 +29,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -49,7 +50,7 @@ const ( // @Tags(identifierAttribute="id") // @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;awstypes;awstypes.Rule") // @Testing(importIgnore="action.0.forward") -func ResourceListenerRule() *schema.Resource { +func resourceListenerRule() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceListenerRuleCreate, ReadWithoutTimeout: resourceListenerRuleRead, @@ -65,185 +66,11 @@ func ResourceListenerRule() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "listener_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrPriority: { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ForceNew: false, - ValidateFunc: validListenerRulePriority, - }, names.AttrAction: { Type: schema.TypeList, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.ActionTypeEnum](), - }, - "order": { - Type: schema.TypeInt, - Optional: true, - Computed: true, - ValidateFunc: validation.IntBetween(listenerActionOrderMin, listenerActionOrderMax), - }, - - "target_group_arn": { - Type: schema.TypeString, - Optional: true, - DiffSuppressFunc: suppressIfActionTypeNot(awstypes.ActionTypeEnumForward), - ValidateFunc: verify.ValidARN, - }, - - "forward": { - Type: schema.TypeList, - Optional: true, - DiffSuppressOnRefresh: true, - DiffSuppressFunc: diffSuppressMissingForward(names.AttrAction), - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "target_group": { - Type: schema.TypeSet, - MinItems: 1, - MaxItems: 5, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrWeight: { - Type: schema.TypeInt, - ValidateFunc: validation.IntBetween(0, 999), - Default: 1, - Optional: true, - }, - }, - }, - }, - "stickiness": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrEnabled: { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - names.AttrDuration: { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, 604800), - }, - }, - }, - }, - }, - }, - }, - - "redirect": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: suppressIfActionTypeNot(awstypes.ActionTypeEnumRedirect), - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "host": { - Type: schema.TypeString, - Optional: true, - Default: "#{host}", - ValidateFunc: validation.StringLenBetween(1, 128), - }, - - names.AttrPath: { - Type: schema.TypeString, - Optional: true, - Default: "/#{path}", - ValidateFunc: validation.StringLenBetween(1, 128), - }, - - names.AttrPort: { - Type: schema.TypeString, - Optional: true, - Default: "#{port}", - }, - - names.AttrProtocol: { - Type: schema.TypeString, - Optional: true, - Default: "#{protocol}", - ValidateFunc: validation.StringInSlice([]string{ - "#{protocol}", - "HTTP", - "HTTPS", - }, false), - }, - - "query": { - Type: schema.TypeString, - Optional: true, - Default: "#{query}", - ValidateFunc: validation.StringLenBetween(0, 128), - }, - - names.AttrStatusCode: { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[awstypes.RedirectActionStatusCodeEnum](), - }, - }, - }, - }, - - "fixed_response": { - Type: schema.TypeList, - Optional: true, - DiffSuppressFunc: suppressIfActionTypeNot(awstypes.ActionTypeEnumFixedResponse), - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrContentType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "text/plain", - "text/css", - "text/html", - "application/javascript", - "application/json", - }, false), - }, - - "message_body": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(0, 1024), - }, - - names.AttrStatusCode: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[245]\d\d$`), ""), - }, - }, - }, - }, - "authenticate_cognito": { Type: schema.TypeList, Optional: true, @@ -293,7 +120,6 @@ func ResourceListenerRule() *schema.Resource { }, }, }, - "authenticate_oidc": { Type: schema.TypeList, Optional: true, @@ -355,6 +181,155 @@ func ResourceListenerRule() *schema.Resource { }, }, }, + "fixed_response": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressIfActionTypeNot(awstypes.ActionTypeEnumFixedResponse), + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrContentType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "text/plain", + "text/css", + "text/html", + "application/javascript", + "application/json", + }, false), + }, + "message_body": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + }, + names.AttrStatusCode: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[245]\d\d$`), ""), + }, + }, + }, + }, + "forward": { + Type: schema.TypeList, + Optional: true, + DiffSuppressOnRefresh: true, + DiffSuppressFunc: diffSuppressMissingForward(names.AttrAction), + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "stickiness": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDuration: { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 604800), + }, + names.AttrEnabled: { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + }, + }, + "target_group": { + Type: schema.TypeSet, + MinItems: 1, + MaxItems: 5, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrWeight: { + Type: schema.TypeInt, + ValidateFunc: validation.IntBetween(0, 999), + Default: 1, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "order": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(listenerActionOrderMin, listenerActionOrderMax), + }, + "redirect": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: suppressIfActionTypeNot(awstypes.ActionTypeEnumRedirect), + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "host": { + Type: schema.TypeString, + Optional: true, + Default: "#{host}", + ValidateFunc: validation.StringLenBetween(1, 128), + }, + names.AttrPath: { + Type: schema.TypeString, + Optional: true, + Default: "/#{path}", + ValidateFunc: validation.StringLenBetween(1, 128), + }, + names.AttrPort: { + Type: schema.TypeString, + Optional: true, + Default: "#{port}", + }, + names.AttrProtocol: { + Type: schema.TypeString, + Optional: true, + Default: "#{protocol}", + ValidateFunc: validation.StringInSlice([]string{ + "#{protocol}", + "HTTP", + "HTTPS", + }, false), + }, + "query": { + Type: schema.TypeString, + Optional: true, + Default: "#{query}", + ValidateFunc: validation.StringLenBetween(0, 128), + }, + names.AttrStatusCode: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RedirectActionStatusCodeEnum](), + }, + }, + }, + }, + "target_group_arn": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: suppressIfActionTypeNot(awstypes.ActionTypeEnumForward), + ValidateFunc: verify.ValidARN, + }, + names.AttrType: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.ValidateIgnoreCase[awstypes.ActionTypeEnum](), + }, }, }, }, @@ -377,7 +352,6 @@ func ResourceListenerRule() *schema.Resource { Type: schema.TypeString, ValidateFunc: validation.StringLenBetween(1, 128), }, - Set: schema.HashString, }, }, }, @@ -400,7 +374,6 @@ func ResourceListenerRule() *schema.Resource { ValidateFunc: validation.StringLenBetween(1, 128), }, Required: true, - Set: schema.HashString, }, }, }, @@ -418,7 +391,6 @@ func ResourceListenerRule() *schema.Resource { ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z-_]{1,40}$`), ""), }, Required: true, - Set: schema.HashString, }, }, }, @@ -437,7 +409,6 @@ func ResourceListenerRule() *schema.Resource { Type: schema.TypeString, ValidateFunc: validation.StringLenBetween(1, 128), }, - Set: schema.HashString, }, }, }, @@ -471,7 +442,6 @@ func ResourceListenerRule() *schema.Resource { ValidateFunc: verify.ValidCIDRNetworkAddress, }, Required: true, - Set: schema.HashString, }, }, }, @@ -479,6 +449,19 @@ func ResourceListenerRule() *schema.Resource { }, }, }, + "listener_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrPriority: { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: false, + ValidateFunc: validListenerRulePriority, + }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), }, @@ -512,7 +495,7 @@ func resourceListenerRuleCreate(ctx context.Context, d *schema.ResourceData, met listenerARN := d.Get("listener_arn").(string) input := &elasticloadbalancingv2.CreateRuleInput{ ListenerArn: aws.String(listenerARN), - Tags: getTagsInV2(ctx), + Tags: getTagsIn(ctx), } input.Actions = expandLbListenerActions(cty.GetAttrPath(names.AttrAction), d.Get(names.AttrAction).([]any), &diags) @@ -522,7 +505,7 @@ func resourceListenerRuleCreate(ctx context.Context, d *schema.ResourceData, met var err error - input.Conditions, err = lbListenerRuleConditions(d.Get(names.AttrCondition).(*schema.Set).List()) + input.Conditions, err = expandRuleConditions(d.Get(names.AttrCondition).(*schema.Set).List()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -543,8 +526,8 @@ func resourceListenerRuleCreate(ctx context.Context, d *schema.ResourceData, met d.SetId(aws.ToString(output.Rules[0].RuleArn)) // Post-create tagging supported in some partitions - if tags := getTagsInV2(ctx); input.Tags == nil && len(tags) > 0 { - err := createTagsV2(ctx, conn, d.Id(), tags) + if tags := getTagsIn(ctx); input.Tags == nil && len(tags) > 0 { + err := createTags(ctx, conn, d.Id(), tags) // If default tags only, continue. Otherwise, error. if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(meta.(*conns.AWSClient).Partition, err) { @@ -563,54 +546,35 @@ func resourceListenerRuleRead(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - var resp *elasticloadbalancingv2.DescribeRulesOutput - var req = &elasticloadbalancingv2.DescribeRulesInput{ - RuleArns: []string{d.Id()}, - } + outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, elbv2PropagationTimeout, func() (interface{}, error) { + return findListenerRuleByARN(ctx, conn, d.Id()) + }, d.IsNewResource()) - err := retry.RetryContext(ctx, 1*time.Minute, func() *retry.RetryError { - var err error - resp, err = conn.DescribeRules(ctx, req) - if err != nil { - if d.IsNewResource() && errs.IsA[*awstypes.RuleNotFoundException](err) { - return retry.RetryableError(err) - } else { - return retry.NonRetryableError(err) - } - } - return nil - }) - if tfresource.TimedOut(err) { - resp, err = conn.DescribeRules(ctx, req) - } - if err != nil { - if errs.IsA[*awstypes.RuleNotFoundException](err) { - log.Printf("[WARN] DescribeRules - removing %s from state", d.Id()) - d.SetId("") - return diags - } - return sdkdiag.AppendErrorf(diags, "retrieving Rules for listener %q: %s", d.Id(), err) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] ELBv2 Listener Rule (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags } - if len(resp.Rules) != 1 { - return sdkdiag.AppendErrorf(diags, "retrieving Rule %q", d.Id()) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ELBv2 Listener Rule (%s): %s", d.Id(), err) } - rule := resp.Rules[0] + rule := outputRaw.(*awstypes.Rule) d.Set(names.AttrARN, rule.RuleArn) // The listener arn isn't in the response but can be derived from the rule arn - d.Set("listener_arn", ListenerARNFromRuleARN(aws.ToString(rule.RuleArn))) + d.Set("listener_arn", listenerARNFromRuleARN(aws.ToString(rule.RuleArn))) // Rules are evaluated in priority order, from the lowest value to the highest value. The default rule has the lowest priority. - if aws.ToString(rule.Priority) == "default" { + if v := aws.ToString(rule.Priority); v == "default" { d.Set(names.AttrPriority, listenerRulePriorityDefault) } else { - if priority, err := strconv.Atoi(aws.ToString(rule.Priority)); err != nil { - return sdkdiag.AppendErrorf(diags, "Cannot convert rule priority %q to int: %s", aws.ToString(rule.Priority), err) + if v, err := strconv.Atoi(v); err != nil { + return sdkdiag.AppendFromErr(diags, err) } else { - d.Set(names.AttrPriority, priority) + d.Set(names.AttrPriority, v) } } @@ -688,7 +652,7 @@ func resourceListenerRuleUpdate(ctx context.Context, d *schema.ResourceData, met if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { if d.HasChange(names.AttrPriority) { - params := &elasticloadbalancingv2.SetRulePrioritiesInput{ + input := &elasticloadbalancingv2.SetRulePrioritiesInput{ RulePriorities: []awstypes.RulePriorityPair{ { RuleArn: aws.String(d.Id()), @@ -697,7 +661,8 @@ func resourceListenerRuleUpdate(ctx context.Context, d *schema.ResourceData, met }, } - _, err := conn.SetRulePriorities(ctx, params) + _, err := conn.SetRulePriorities(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "updating ELB v2 Listener Rule (%s): setting priority: %s", d.Id(), err) } @@ -718,7 +683,7 @@ func resourceListenerRuleUpdate(ctx context.Context, d *schema.ResourceData, met if d.HasChange(names.AttrCondition) { var err error - input.Conditions, err = lbListenerRuleConditions(d.Get(names.AttrCondition).(*schema.Set).List()) + input.Conditions, err = expandRuleConditions(d.Get(names.AttrCondition).(*schema.Set).List()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -744,64 +709,119 @@ func resourceListenerRuleDelete(ctx context.Context, d *schema.ResourceData, met var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Client(ctx) + log.Printf("[INFO] Deleting ELBv2 Listener Rule: %s", d.Id()) _, err := conn.DeleteRule(ctx, &elasticloadbalancingv2.DeleteRuleInput{ RuleArn: aws.String(d.Id()), }) - if err != nil && !errs.IsA[*awstypes.RuleNotFoundException](err) { - return sdkdiag.AppendErrorf(diags, "deleting LB Listener Rule: %s", err) + + if errs.IsA[*awstypes.RuleNotFoundException](err) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting ELBv2 Listener Rule (%s): %s", d.Id(), err) } + return diags } -func retryListenerRuleCreate(ctx context.Context, conn *elasticloadbalancingv2.Client, d *schema.ResourceData, params *elasticloadbalancingv2.CreateRuleInput, listenerARN string) (*elasticloadbalancingv2.CreateRuleOutput, error) { - var resp *elasticloadbalancingv2.CreateRuleOutput +func retryListenerRuleCreate(ctx context.Context, conn *elasticloadbalancingv2.Client, d *schema.ResourceData, input *elasticloadbalancingv2.CreateRuleInput, listenerARN string) (*elasticloadbalancingv2.CreateRuleOutput, error) { if v, ok := d.GetOk(names.AttrPriority); ok { - var err error - params.Priority = aws.Int32(int32(v.(int))) - resp, err = conn.CreateRule(ctx, params) + input.Priority = aws.Int32(int32(v.(int))) + + return conn.CreateRule(ctx, input) + } + const ( + timeout = 5 * time.Minute + ) + outputRaw, err := tfresource.RetryWhenIsA[*awstypes.PriorityInUseException](ctx, timeout, func() (interface{}, error) { + priority, err := highestListenerRulePriority(ctx, conn, listenerARN) if err != nil { return nil, err } - } else { - var priority int32 - err := retry.RetryContext(ctx, 5*time.Minute, func() *retry.RetryError { - var err error - priority, err = highestListenerRulePriority(ctx, conn, listenerARN) - if err != nil { - return retry.NonRetryableError(err) - } - params.Priority = aws.Int32(priority + 1) - resp, err = conn.CreateRule(ctx, params) - if err != nil { - if errs.IsA[*awstypes.PriorityInUseException](err) { - return retry.RetryableError(err) - } - return retry.NonRetryableError(err) - } - return nil - }) + input.Priority = aws.Int32(priority + 1) + return conn.CreateRule(ctx, input) + }) - if tfresource.TimedOut(err) { - priority, err = highestListenerRulePriority(ctx, conn, listenerARN) - if err != nil { - return nil, fmt.Errorf("getting highest listener rule (%s) priority: %w", listenerARN, err) + if err != nil { + return nil, err + } + + return outputRaw.(*elasticloadbalancingv2.CreateRuleOutput), nil +} + +func findListenerRule(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeRulesInput, filter tfslices.Predicate[*awstypes.Rule]) (*awstypes.Rule, error) { + output, err := findListenerRules(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findListenerRules(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeRulesInput, filter tfslices.Predicate[*awstypes.Rule]) ([]awstypes.Rule, error) { + var output []awstypes.Rule + + err := describeRulesPages(ctx, conn, input, func(page *elasticloadbalancingv2.DescribeRulesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Rules { + if filter(&v) { + output = append(output, v) } - params.Priority = aws.Int32(priority + 1) - resp, err = conn.CreateRule(ctx, params) } - if err != nil { - return nil, err + return !lastPage + }) + + if errs.IsA[*awstypes.RuleNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - if resp == nil || len(resp.Rules) == 0 { - return nil, fmt.Errorf("creating LB Listener Rule (%s): no rules returned in response", listenerARN) + if err != nil { + return nil, err + } + + return output, nil +} + +func findListenerRuleByARN(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) (*awstypes.Rule, error) { + input := &elasticloadbalancingv2.DescribeRulesInput{ + RuleArns: []string{arn}, + } + + return findListenerRule(ctx, conn, input, tfslices.PredicateTrue[*awstypes.Rule]()) +} + +func highestListenerRulePriority(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) (int32, error) { + input := &elasticloadbalancingv2.DescribeRulesInput{ + ListenerArn: aws.String(arn), } + rules, err := findListenerRules(ctx, conn, input, func(v *awstypes.Rule) bool { + return aws.ToString(v.Priority) != "default" + }) - return resp, nil + if err != nil { + return 0, err + } + + priorities := tfslices.ApplyToAll(rules, func(v awstypes.Rule) int32 { + return flex.StringToInt32Value(v.Priority) + }) + + if len(priorities) == 0 { + return 0, nil + } + + return slices.Max(priorities), nil } func validListenerRulePriority(v interface{}, k string) (ws []string, errors []error) { @@ -820,105 +840,72 @@ func validListenerRulePriority(v interface{}, k string) (ws []string, errors []e // arn:aws:elasticloadbalancing:us-east-1:012345678912:listener/app/name/0123456789abcdef/abcdef0123456789 var lbListenerARNFromRuleARNRegexp = regexache.MustCompile(`^(arn:.+:listener)-rule(/.+)/[^/]+$`) -func ListenerARNFromRuleARN(ruleArn string) string { - if arnComponents := lbListenerARNFromRuleARNRegexp.FindStringSubmatch(ruleArn); len(arnComponents) > 1 { +func listenerARNFromRuleARN(ruleARN string) string { + if arnComponents := lbListenerARNFromRuleARNRegexp.FindStringSubmatch(ruleARN); len(arnComponents) > 1 { return arnComponents[1] + arnComponents[2] } return "" } -func highestListenerRulePriority(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) (priority int32, err error) { - var priorities []int32 - var nextMarker *string - - for { - out, aerr := conn.DescribeRules(ctx, &elasticloadbalancingv2.DescribeRulesInput{ - ListenerArn: aws.String(arn), - Marker: nextMarker, - }) - if aerr != nil { - return 0, aerr - } - for _, rule := range out.Rules { - if priority := aws.ToString(rule.Priority); priority != "default" { - p, _ := strconv.ParseInt(priority, 0, 32) - priorities = append(priorities, int32(p)) - } - } - if out.NextMarker == nil { - break - } - nextMarker = out.NextMarker - } +func expandRuleConditions(tfList []interface{}) ([]awstypes.RuleCondition, error) { + apiObjects := make([]awstypes.RuleCondition, len(tfList)) - if len(priorities) == 0 { - return 0, nil - } - - slices.Sort(priorities) + for i, tfMapRaw := range tfList { + tfMap := tfMapRaw.(map[string]interface{}) + apiObjects[i] = awstypes.RuleCondition{} - return priorities[len(priorities)-1], nil -} - -// lbListenerRuleConditions converts data source generated by Terraform into -// an elasticloadbalancingv2/types.RuleCondition object suitable for submitting to AWS API. -func lbListenerRuleConditions(conditions []interface{}) ([]awstypes.RuleCondition, error) { - elbConditions := make([]awstypes.RuleCondition, len(conditions)) - for i, condition := range conditions { - elbConditions[i] = awstypes.RuleCondition{} - conditionMap := condition.(map[string]interface{}) var field string var attrs int - if hostHeader, ok := conditionMap["host_header"].([]interface{}); ok && len(hostHeader) > 0 { + if hostHeader, ok := tfMap["host_header"].([]interface{}); ok && len(hostHeader) > 0 { field = "host-header" attrs += 1 values := hostHeader[0].(map[string]interface{})[names.AttrValues].(*schema.Set) - elbConditions[i].HostHeaderConfig = &awstypes.HostHeaderConditionConfig{ + apiObjects[i].HostHeaderConfig = &awstypes.HostHeaderConditionConfig{ Values: flex.ExpandStringValueSet(values), } } - if httpHeader, ok := conditionMap["http_header"].([]interface{}); ok && len(httpHeader) > 0 { + if httpHeader, ok := tfMap["http_header"].([]interface{}); ok && len(httpHeader) > 0 { field = "http-header" attrs += 1 httpHeaderMap := httpHeader[0].(map[string]interface{}) values := httpHeaderMap[names.AttrValues].(*schema.Set) - elbConditions[i].HttpHeaderConfig = &awstypes.HttpHeaderConditionConfig{ + apiObjects[i].HttpHeaderConfig = &awstypes.HttpHeaderConditionConfig{ HttpHeaderName: aws.String(httpHeaderMap["http_header_name"].(string)), Values: flex.ExpandStringValueSet(values), } } - if httpRequestMethod, ok := conditionMap["http_request_method"].([]interface{}); ok && len(httpRequestMethod) > 0 { + if httpRequestMethod, ok := tfMap["http_request_method"].([]interface{}); ok && len(httpRequestMethod) > 0 { field = "http-request-method" attrs += 1 values := httpRequestMethod[0].(map[string]interface{})[names.AttrValues].(*schema.Set) - elbConditions[i].HttpRequestMethodConfig = &awstypes.HttpRequestMethodConditionConfig{ + apiObjects[i].HttpRequestMethodConfig = &awstypes.HttpRequestMethodConditionConfig{ Values: flex.ExpandStringValueSet(values), } } - if pathPattern, ok := conditionMap["path_pattern"].([]interface{}); ok && len(pathPattern) > 0 { + if pathPattern, ok := tfMap["path_pattern"].([]interface{}); ok && len(pathPattern) > 0 { field = "path-pattern" attrs += 1 values := pathPattern[0].(map[string]interface{})[names.AttrValues].(*schema.Set) - elbConditions[i].PathPatternConfig = &awstypes.PathPatternConditionConfig{ + apiObjects[i].PathPatternConfig = &awstypes.PathPatternConditionConfig{ Values: flex.ExpandStringValueSet(values), } } - if queryString, ok := conditionMap["query_string"].(*schema.Set); ok && queryString.Len() > 0 { + if queryString, ok := tfMap["query_string"].(*schema.Set); ok && queryString.Len() > 0 { field = "query-string" attrs += 1 values := queryString.List() - elbConditions[i].QueryStringConfig = &awstypes.QueryStringConditionConfig{ + apiObjects[i].QueryStringConfig = &awstypes.QueryStringConditionConfig{ Values: make([]awstypes.QueryStringKeyValuePair, len(values)), } for j, p := range values { @@ -929,16 +916,16 @@ func lbListenerRuleConditions(conditions []interface{}) ([]awstypes.RuleConditio if valuePair[names.AttrKey].(string) != "" { elbValuePair.Key = aws.String(valuePair[names.AttrKey].(string)) } - elbConditions[i].QueryStringConfig.Values[j] = elbValuePair + apiObjects[i].QueryStringConfig.Values[j] = elbValuePair } } - if sourceIp, ok := conditionMap["source_ip"].([]interface{}); ok && len(sourceIp) > 0 { + if sourceIp, ok := tfMap["source_ip"].([]interface{}); ok && len(sourceIp) > 0 { field = "source-ip" attrs += 1 values := sourceIp[0].(map[string]interface{})[names.AttrValues].(*schema.Set) - elbConditions[i].SourceIpConfig = &awstypes.SourceIpConditionConfig{ + apiObjects[i].SourceIpConfig = &awstypes.SourceIpConditionConfig{ Values: flex.ExpandStringValueSet(values), } } @@ -952,7 +939,8 @@ func lbListenerRuleConditions(conditions []interface{}) ([]awstypes.RuleConditio return nil, errors.New("Only one of host_header, http_header, http_request_method, path_pattern, query_string or source_ip can be set in a condition block") } - elbConditions[i].Field = aws.String(field) + apiObjects[i].Field = aws.String(field) } - return elbConditions, nil + + return apiObjects, nil } diff --git a/internal/service/elbv2/listener_rule_test.go b/internal/service/elbv2/listener_rule_test.go index 3f1f88ca871..f0bcfdcd361 100644 --- a/internal/service/elbv2/listener_rule_test.go +++ b/internal/service/elbv2/listener_rule_test.go @@ -5,7 +5,6 @@ package elbv2_test import ( "context" - "errors" "fmt" "regexp" "slices" @@ -21,8 +20,8 @@ import ( "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/errs" tfelbv2 "github.com/hashicorp/terraform-provider-aws/internal/service/elbv2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -1470,6 +1469,7 @@ func TestAccELBV2ListenerRule_EmptyAction(t *testing.T) { } } +// https://github.com/hashicorp/terraform-provider-aws/issues/35668. func TestAccELBV2ListenerRule_redirectWithTargetGroupARN(t *testing.T) { ctx := acctest.Context(t) var conf awstypes.Rule @@ -1503,10 +1503,15 @@ func TestAccELBV2ListenerRule_redirectWithTargetGroupARN(t *testing.T) { ), }, { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccListenerRuleConfig_redirectWithTargetGroupARN(lbName), - PlanOnly: true, - ExpectNonEmptyPlan: false, + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.36.0", + }, + }, + Config: testAccListenerRuleConfig_redirectWithTargetGroupARN(lbName), + PlanOnly: true, + ExpectNonEmptyPlan: false, }, }, }) @@ -2147,33 +2152,23 @@ func testAccCheckListenerRuleRecreated(t *testing.T, before, after *awstypes.Rul } } -func testAccCheckListenerRuleExists(ctx context.Context, n string, res *awstypes.Rule) resource.TestCheckFunc { +func testAccCheckListenerRuleExists(ctx context.Context, n string, v *awstypes.Rule) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No Listener Rule ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) - describe, err := conn.DescribeRules(ctx, &elasticloadbalancingv2.DescribeRulesInput{ - RuleArns: []string{rs.Primary.ID}, - }) + output, err := tfelbv2.FindListenerRuleByARN(ctx, conn, rs.Primary.ID) if err != nil { return err } - if len(describe.Rules) != 1 || - *describe.Rules[0].RuleArn != rs.Primary.ID { - return errors.New("Listener Rule not found") - } + *v = *output - *res = describe.Rules[0] return nil } } @@ -2187,23 +2182,17 @@ func testAccCheckListenerRuleDestroy(ctx context.Context) resource.TestCheckFunc continue } - describe, err := conn.DescribeRules(ctx, &elasticloadbalancingv2.DescribeRulesInput{ - RuleArns: []string{rs.Primary.ID}, - }) + _, err := tfelbv2.FindListenerRuleByARN(ctx, conn, rs.Primary.ID) - if err == nil { - if len(describe.Rules) != 0 && - *describe.Rules[0].RuleArn == rs.Primary.ID { - return fmt.Errorf("Listener Rule %q still exists", rs.Primary.ID) - } + if tfresource.NotFound(err) { + continue } - // Verify the error - if errs.IsA[*awstypes.RuleNotFoundException](err) { - return nil - } else { - return fmt.Errorf("Unexpected error checking LB Listener Rule destroyed: %s", err) + if err != nil { + return err } + + return fmt.Errorf("ELBv2 Listener Rule %s still exists", rs.Primary.ID) } return nil diff --git a/internal/service/elbv2/listener_test.go b/internal/service/elbv2/listener_test.go index d78b3b82f3a..76af9f2cf7c 100644 --- a/internal/service/elbv2/listener_test.go +++ b/internal/service/elbv2/listener_test.go @@ -1572,6 +1572,7 @@ func TestAccELBV2Listener_EmptyDefaultAction(t *testing.T) { } } +// https://github.com/hashicorp/terraform-provider-aws/issues/35668. func TestAccELBV2Listener_redirectWithTargetGroupARN(t *testing.T) { ctx := acctest.Context(t) var conf awstypes.Listener @@ -1605,10 +1606,15 @@ func TestAccELBV2Listener_redirectWithTargetGroupARN(t *testing.T) { ), }, { - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - Config: testAccListenerConfig_redirectWithTargetGroupARN(rName), - PlanOnly: true, - ExpectNonEmptyPlan: false, + ExternalProviders: map[string]resource.ExternalProvider{ + "aws": { + Source: "hashicorp/aws", + VersionConstraint: "5.36.0", + }, + }, + Config: testAccListenerConfig_redirectWithTargetGroupARN(rName), + PlanOnly: true, + ExpectNonEmptyPlan: false, }, }, }) diff --git a/internal/service/elbv2/load_balancer.go b/internal/service/elbv2/load_balancer.go index 7d356cbf3f2..2381931882c 100644 --- a/internal/service/elbv2/load_balancer.go +++ b/internal/service/elbv2/load_balancer.go @@ -4,7 +4,6 @@ package elbv2 import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports - "context" "errors" "fmt" @@ -13,11 +12,11 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "time" "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -25,6 +24,7 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -39,8 +39,8 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports // @SDKResource("aws_alb", name="Load Balancer") // @SDKResource("aws_lb", name="Load Balancer") // @Tags(identifierAttribute="id") -// @Testing(existsType="github.com/aws/aws-sdk-go/service/elbv2;elbv2.LoadBalancer") -func ResourceLoadBalancer() *schema.Resource { +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;types.LoadBalancer") +func resourceLoadBalancer() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLoadBalancerCreate, ReadWithoutTimeout: resourceLoadBalancerRead, @@ -106,7 +106,7 @@ func ResourceLoadBalancer() *schema.Resource { Type: schema.TypeInt, Optional: true, Default: 3600, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, "connection_logs": { Type: schema.TypeList, @@ -147,7 +147,7 @@ func ResourceLoadBalancer() *schema.Resource { Optional: true, Default: httpDesyncMitigationModeDefensive, ValidateFunc: validation.StringInSlice(httpDesyncMitigationMode_Values(), false), - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, names.AttrDNSName: { Type: schema.TypeString, @@ -157,20 +157,20 @@ func ResourceLoadBalancer() *schema.Resource { Type: schema.TypeString, Optional: true, Default: dnsRecordClientRoutingPolicyAnyAvailabilityZone, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumNetwork), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumNetwork), ValidateFunc: validation.StringInSlice(dnsRecordClientRoutingPolicy_Values(), false), }, "drop_invalid_header_fields": { Type: schema.TypeBool, Optional: true, Default: false, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, "enable_cross_zone_load_balancing": { Type: schema.TypeBool, Optional: true, Default: false, - DiffSuppressFunc: suppressIfLBType(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBType(awstypes.LoadBalancerTypeEnumApplication), }, "enable_deletion_protection": { Type: schema.TypeBool, @@ -181,38 +181,38 @@ func ResourceLoadBalancer() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, "enable_tls_version_and_cipher_suite_headers": { Type: schema.TypeBool, Optional: true, Default: false, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, "enable_waf_fail_open": { Type: schema.TypeBool, Optional: true, Default: false, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, "enable_xff_client_port": { Type: schema.TypeBool, Optional: true, Default: false, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, "enforce_security_group_inbound_rules_on_private_link_traffic": { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.StringInSlice(elbv2.EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum_Values(), false), - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumNetwork), + ValidateDiagFunc: enum.Validate[awstypes.EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum](), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumNetwork), }, "idle_timeout": { Type: schema.TypeInt, Optional: true, Default: 60, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, "internal": { Type: schema.TypeBool, @@ -221,17 +221,17 @@ func ResourceLoadBalancer() *schema.Resource { Computed: true, }, names.AttrIPAddressType: { - Type: schema.TypeString, - Computed: true, - Optional: true, - ValidateFunc: validation.StringInSlice(elbv2.IpAddressType_Values(), false), + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.IpAddressType](), }, "load_balancer_type": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - Default: elbv2.LoadBalancerTypeEnumApplication, - ValidateFunc: validation.StringInSlice(elbv2.LoadBalancerTypeEnum_Values(), false), + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: awstypes.LoadBalancerTypeEnumApplication, + ValidateDiagFunc: enum.Validate[awstypes.LoadBalancerTypeEnum](), }, names.AttrName: { Type: schema.TypeString, @@ -253,7 +253,7 @@ func ResourceLoadBalancer() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: false, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), }, names.AttrSecurityGroups: { Type: schema.TypeSet, @@ -310,7 +310,7 @@ func ResourceLoadBalancer() *schema.Resource { Type: schema.TypeString, Optional: true, Default: httpXFFHeaderProcessingModeAppend, - DiffSuppressFunc: suppressIfLBTypeNot(elbv2.LoadBalancerTypeEnumApplication), + DiffSuppressFunc: suppressIfLBTypeNot(awstypes.LoadBalancerTypeEnumApplication), ValidateFunc: validation.StringInSlice(httpXFFHeaderProcessingMode_Values(), false), }, "zone_id": { @@ -321,29 +321,30 @@ func ResourceLoadBalancer() *schema.Resource { } } -func suppressIfLBType(types ...string) schema.SchemaDiffSuppressFunc { +func suppressIfLBType(types ...awstypes.LoadBalancerTypeEnum) schema.SchemaDiffSuppressFunc { return func(k string, old string, new string, d *schema.ResourceData) bool { - return slices.Contains(types, d.Get("load_balancer_type").(string)) + return slices.Contains(types, awstypes.LoadBalancerTypeEnum(d.Get("load_balancer_type").(string))) } } -func suppressIfLBTypeNot(types ...string) schema.SchemaDiffSuppressFunc { +func suppressIfLBTypeNot(types ...awstypes.LoadBalancerTypeEnum) schema.SchemaDiffSuppressFunc { return func(k string, old string, new string, d *schema.ResourceData) bool { - return !slices.Contains(types, d.Get("load_balancer_type").(string)) + return !slices.Contains(types, awstypes.LoadBalancerTypeEnum(d.Get("load_balancer_type").(string))) } } func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) + partition := meta.(*conns.AWSClient).Partition name := create.NewNameGenerator( create.WithConfiguredName(d.Get(names.AttrName).(string)), create.WithConfiguredPrefix(d.Get(names.AttrNamePrefix).(string)), create.WithDefaultPrefix("tf-lb-"), ).Generate() - exist, err := findLoadBalancer(ctx, conn, &elbv2.DescribeLoadBalancersInput{ - Names: aws.StringSlice([]string{name}), + exist, err := findLoadBalancer(ctx, conn, &elasticloadbalancingv2.DescribeLoadBalancersInput{ + Names: []string{name}, }) if err != nil && !tfresource.NotFound(err) { @@ -356,11 +357,11 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met d.Set(names.AttrName, name) - lbType := d.Get("load_balancer_type").(string) - input := &elbv2.CreateLoadBalancerInput{ + lbType := awstypes.LoadBalancerTypeEnum(d.Get("load_balancer_type").(string)) + input := &elasticloadbalancingv2.CreateLoadBalancerInput{ Name: aws.String(name), Tags: getTagsIn(ctx), - Type: aws.String(lbType), + Type: lbType, } if v, ok := d.GetOk("customer_owned_ipv4_pool"); ok { @@ -368,15 +369,15 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met } if _, ok := d.GetOk("internal"); ok { - input.Scheme = aws.String(elbv2.LoadBalancerSchemeEnumInternal) + input.Scheme = awstypes.LoadBalancerSchemeEnumInternal } if v, ok := d.GetOk(names.AttrIPAddressType); ok { - input.IpAddressType = aws.String(v.(string)) + input.IpAddressType = awstypes.IpAddressType(v.(string)) } if v, ok := d.GetOk(names.AttrSecurityGroups); ok { - input.SecurityGroups = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroups = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("subnet_mapping"); ok && v.(*schema.Set).Len() > 0 { @@ -384,23 +385,23 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met } if v, ok := d.GetOk(names.AttrSubnets); ok { - input.Subnets = flex.ExpandStringSet(v.(*schema.Set)) + input.Subnets = flex.ExpandStringValueSet(v.(*schema.Set)) } - output, err := conn.CreateLoadBalancerWithContext(ctx, input) + output, err := conn.CreateLoadBalancer(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateLoadBalancerWithContext(ctx, input) + output, err = conn.CreateLoadBalancer(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating ELBv2 %s Load Balancer (%s): %s", lbType, name, err) } - d.SetId(aws.StringValue(output.LoadBalancers[0].LoadBalancerArn)) + d.SetId(aws.ToString(output.LoadBalancers[0].LoadBalancerArn)) if _, err := waitLoadBalancerActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ELBv2 Load Balancer (%s) create: %s", d.Id(), err) @@ -411,7 +412,7 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met err := createTags(ctx, conn, d.Id(), tags) // If default tags only, continue. Otherwise, error. - if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(partition, err) { return append(diags, resourceLoadBalancerUpdate(ctx, d, meta)...) } @@ -420,31 +421,31 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met } } - var attributes []*elbv2.LoadBalancerAttribute + var attributes []awstypes.LoadBalancerAttribute - if lbType == elbv2.LoadBalancerTypeEnumApplication || lbType == elbv2.LoadBalancerTypeEnumNetwork { + if lbType == awstypes.LoadBalancerTypeEnumApplication || lbType == awstypes.LoadBalancerTypeEnumNetwork { if v, ok := d.GetOk("access_logs"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandLoadBalancerAccessLogsAttributes(v.([]interface{})[0].(map[string]interface{}), false)...) } else { - attributes = append(attributes, &elbv2.LoadBalancerAttribute{ + attributes = append(attributes, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeAccessLogsS3Enabled), Value: flex.BoolValueToString(false), }) } } - if lbType == elbv2.LoadBalancerTypeEnumApplication { + if lbType == awstypes.LoadBalancerTypeEnumApplication { if v, ok := d.GetOk("connection_logs"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandLoadBalancerConnectionLogsAttributes(v.([]interface{})[0].(map[string]interface{}), false)...) } else { - attributes = append(attributes, &elbv2.LoadBalancerAttribute{ + attributes = append(attributes, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeConnectionLogsS3Enabled), Value: flex.BoolValueToString(false), }) } } - attributes = append(attributes, loadBalancerAttributes.expand(d, false)...) + attributes = append(attributes, loadBalancerAttributes.expand(d, lbType, false)...) wait := false if len(attributes) > 0 { @@ -455,17 +456,17 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met wait = true } - if v, ok := d.GetOk("enforce_security_group_inbound_rules_on_private_link_traffic"); ok && lbType == elbv2.LoadBalancerTypeEnumNetwork { - input := &elbv2.SetSecurityGroupsInput{ - EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic: aws.String(v.(string)), + if v, ok := d.GetOk("enforce_security_group_inbound_rules_on_private_link_traffic"); ok && lbType == awstypes.LoadBalancerTypeEnumNetwork { + input := &elasticloadbalancingv2.SetSecurityGroupsInput{ + EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic: awstypes.EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum(v.(string)), LoadBalancerArn: aws.String(d.Id()), } if v, ok := d.GetOk(names.AttrSecurityGroups); ok { - input.SecurityGroups = flex.ExpandStringSet(v.(*schema.Set)) + input.SecurityGroups = flex.ExpandStringValueSet(v.(*schema.Set)) } - _, err := conn.SetSecurityGroupsWithContext(ctx, input) + _, err := conn.SetSecurityGroups(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELBv2 Load Balancer (%s) security groups: %s", d.Id(), err) @@ -485,9 +486,9 @@ func resourceLoadBalancerCreate(ctx context.Context, d *schema.ResourceData, met func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - lb, err := FindLoadBalancerByARN(ctx, conn, d.Id()) + lb, err := findLoadBalancerByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Load Balancer %s not found, removing from state", d.Id()) @@ -500,17 +501,16 @@ func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta } d.Set(names.AttrARN, lb.LoadBalancerArn) - d.Set("arn_suffix", SuffixFromARN(lb.LoadBalancerArn)) + d.Set("arn_suffix", suffixFromARN(lb.LoadBalancerArn)) d.Set("customer_owned_ipv4_pool", lb.CustomerOwnedIpv4Pool) d.Set(names.AttrDNSName, lb.DNSName) d.Set("enforce_security_group_inbound_rules_on_private_link_traffic", lb.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic) - d.Set("internal", aws.StringValue(lb.Scheme) == elbv2.LoadBalancerSchemeEnumInternal) + d.Set("internal", lb.Scheme == awstypes.LoadBalancerSchemeEnumInternal) d.Set(names.AttrIPAddressType, lb.IpAddressType) - lbType := aws.StringValue(lb.Type) - d.Set("load_balancer_type", lbType) + d.Set("load_balancer_type", lb.Type) d.Set(names.AttrName, lb.LoadBalancerName) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.StringValue(lb.LoadBalancerName))) - d.Set(names.AttrSecurityGroups, aws.StringValueSlice(lb.SecurityGroups)) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(lb.LoadBalancerName))) + d.Set(names.AttrSecurityGroups, lb.SecurityGroups) if err := d.Set("subnet_mapping", flattenSubnetMappingsFromAvailabilityZones(lb.AvailabilityZones)); err != nil { return sdkdiag.AppendErrorf(diags, "setting subnet_mapping: %s", err) } @@ -520,7 +520,7 @@ func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta d.Set(names.AttrVPCID, lb.VpcId) d.Set("zone_id", lb.CanonicalHostedZoneId) - attributes, err := FindLoadBalancerAttributesByARN(ctx, conn, d.Id()) + attributes, err := findLoadBalancerAttributesByARN(ctx, conn, d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading ELBv2 Load Balancer (%s) attributes: %s", d.Id(), err) @@ -530,7 +530,7 @@ func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "setting access_logs: %s", err) } - if lbType == elbv2.LoadBalancerTypeEnumApplication { + if lb.Type == awstypes.LoadBalancerTypeEnumApplication { if err := d.Set("connection_logs", []interface{}{flattenLoadBalancerConnectionLogsAttributes(attributes)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting connection_logs: %s", err) } @@ -543,15 +543,16 @@ func resourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - var attributes []*elbv2.LoadBalancerAttribute + lbType := awstypes.LoadBalancerTypeEnum(d.Get("load_balancer_type").(string)) + var attributes []awstypes.LoadBalancerAttribute if d.HasChange("access_logs") { if v, ok := d.GetOk("access_logs"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandLoadBalancerAccessLogsAttributes(v.([]interface{})[0].(map[string]interface{}), true)...) } else { - attributes = append(attributes, &elbv2.LoadBalancerAttribute{ + attributes = append(attributes, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeAccessLogsS3Enabled), Value: flex.BoolValueToString(false), }) @@ -562,14 +563,14 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met if v, ok := d.GetOk("connection_logs"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandLoadBalancerConnectionLogsAttributes(v.([]interface{})[0].(map[string]interface{}), true)...) } else { - attributes = append(attributes, &elbv2.LoadBalancerAttribute{ + attributes = append(attributes, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeConnectionLogsS3Enabled), Value: flex.BoolValueToString(false), }) } } - attributes = append(attributes, loadBalancerAttributes.expand(d, true)...) + attributes = append(attributes, loadBalancerAttributes.expand(d, lbType, true)...) if len(attributes) > 0 { if err := modifyLoadBalancerAttributes(ctx, conn, d.Id(), attributes); err != nil { @@ -578,18 +579,18 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met } if d.HasChanges("enforce_security_group_inbound_rules_on_private_link_traffic", names.AttrSecurityGroups) { - input := &elbv2.SetSecurityGroupsInput{ + input := &elasticloadbalancingv2.SetSecurityGroupsInput{ LoadBalancerArn: aws.String(d.Id()), - SecurityGroups: flex.ExpandStringSet(d.Get(names.AttrSecurityGroups).(*schema.Set)), + SecurityGroups: flex.ExpandStringValueSet(d.Get(names.AttrSecurityGroups).(*schema.Set)), } - if v := d.Get("load_balancer_type"); v == elbv2.LoadBalancerTypeEnumNetwork { + if lbType == awstypes.LoadBalancerTypeEnumNetwork { if v, ok := d.GetOk("enforce_security_group_inbound_rules_on_private_link_traffic"); ok { - input.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = aws.String(v.(string)) + input.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic = awstypes.EnforceSecurityGroupInboundRulesOnPrivateLinkTrafficEnum(v.(string)) } } - _, err := conn.SetSecurityGroupsWithContext(ctx, input) + _, err := conn.SetSecurityGroups(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELBv2 Load Balancer (%s) security groups: %s", d.Id(), err) @@ -597,7 +598,7 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met } if d.HasChanges("subnet_mapping", names.AttrSubnets) { - input := &elbv2.SetSubnetsInput{ + input := &elasticloadbalancingv2.SetSubnetsInput{ LoadBalancerArn: aws.String(d.Id()), } @@ -609,11 +610,11 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met if d.HasChange(names.AttrSubnets) { if v, ok := d.GetOk(names.AttrSubnets); ok { - input.Subnets = flex.ExpandStringSet(v.(*schema.Set)) + input.Subnets = flex.ExpandStringValueSet(v.(*schema.Set)) } } - _, err := conn.SetSubnetsWithContext(ctx, input) + _, err := conn.SetSubnets(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELBv2 Load Balancer (%s) subnets: %s", d.Id(), err) @@ -621,12 +622,12 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met } if d.HasChange(names.AttrIPAddressType) { - input := &elbv2.SetIpAddressTypeInput{ - IpAddressType: aws.String(d.Get(names.AttrIPAddressType).(string)), + input := &elasticloadbalancingv2.SetIpAddressTypeInput{ + IpAddressType: awstypes.IpAddressType(d.Get(names.AttrIPAddressType).(string)), LoadBalancerArn: aws.String(d.Id()), } - _, err := conn.SetIpAddressTypeWithContext(ctx, input) + _, err := conn.SetIpAddressType(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "setting ELBv2 Load Balancer (%s) address type: %s", d.Id(), err) @@ -642,10 +643,10 @@ func resourceLoadBalancerUpdate(ctx context.Context, d *schema.ResourceData, met func resourceLoadBalancerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) log.Printf("[INFO] Deleting ELBv2 Load Balancer: %s", d.Id()) - _, err := conn.DeleteLoadBalancerWithContext(ctx, &elbv2.DeleteLoadBalancerInput{ + _, err := conn.DeleteLoadBalancer(ctx, &elasticloadbalancingv2.DeleteLoadBalancerInput{ LoadBalancerArn: aws.String(d.Id()), }) @@ -666,8 +667,8 @@ func resourceLoadBalancerDelete(ctx context.Context, d *schema.ResourceData, met return diags } -func modifyLoadBalancerAttributes(ctx context.Context, conn *elbv2.ELBV2, arn string, attributes []*elbv2.LoadBalancerAttribute) error { - input := &elbv2.ModifyLoadBalancerAttributesInput{ +func modifyLoadBalancerAttributes(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string, attributes []awstypes.LoadBalancerAttribute) error { + input := &elasticloadbalancingv2.ModifyLoadBalancerAttributesInput{ Attributes: attributes, LoadBalancerArn: aws.String(arn), } @@ -678,7 +679,7 @@ func modifyLoadBalancerAttributes(ctx context.Context, conn *elbv2.ELBV2, arn st return nil } - _, err := conn.ModifyLoadBalancerAttributesWithContext(ctx, input) + _, err := conn.ModifyLoadBalancerAttributes(ctx, input) if err != nil { // "Validation error: Load balancer attribute key 'routing.http.desync_mitigation_mode' is not recognized" @@ -686,8 +687,8 @@ func modifyLoadBalancerAttributes(ctx context.Context, conn *elbv2.ELBV2, arn st re := regexache.MustCompile(`attribute key ('|")?([^'" ]+)('|")? is not (recognized|supported)`) if sm := re.FindStringSubmatch(err.Error()); len(sm) > 1 { key := sm[2] - input.Attributes = slices.DeleteFunc(input.Attributes, func(v *elbv2.LoadBalancerAttribute) bool { - return aws.StringValue(v.Key) == key + input.Attributes = slices.DeleteFunc(input.Attributes, func(v awstypes.LoadBalancerAttribute) bool { + return aws.ToString(v.Key) == key }) continue @@ -703,7 +704,7 @@ func modifyLoadBalancerAttributes(ctx context.Context, conn *elbv2.ELBV2, arn st type loadBalancerAttributeInfo struct { apiAttributeKey string tfType schema.ValueType - loadBalancerTypesSupported []string + loadBalancerTypesSupported []awstypes.LoadBalancerTypeEnum } type loadBalancerAttributeMap map[string]loadBalancerAttributeInfo @@ -712,100 +713,99 @@ var loadBalancerAttributes = loadBalancerAttributeMap(map[string]loadBalancerAtt "client_keep_alive": { apiAttributeKey: loadBalancerAttributeClientKeepAliveSeconds, tfType: schema.TypeInt, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "desync_mitigation_mode": { apiAttributeKey: loadBalancerAttributeRoutingHTTPDesyncMitigationMode, tfType: schema.TypeString, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "dns_record_client_routing_policy": { apiAttributeKey: loadBalancerAttributeDNSRecordClientRoutingPolicy, tfType: schema.TypeString, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumNetwork}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumNetwork}, }, "drop_invalid_header_fields": { apiAttributeKey: loadBalancerAttributeRoutingHTTPDropInvalidHeaderFieldsEnabled, tfType: schema.TypeBool, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "enable_cross_zone_load_balancing": { apiAttributeKey: loadBalancerAttributeLoadBalancingCrossZoneEnabled, tfType: schema.TypeBool, // Although this attribute is supported for ALBs, it must always be true. - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumNetwork, elbv2.LoadBalancerTypeEnumGateway}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumNetwork, awstypes.LoadBalancerTypeEnumGateway}, }, "enable_deletion_protection": { apiAttributeKey: loadBalancerAttributeDeletionProtectionEnabled, tfType: schema.TypeBool, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication, elbv2.LoadBalancerTypeEnumNetwork, elbv2.LoadBalancerTypeEnumGateway}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication, awstypes.LoadBalancerTypeEnumNetwork, awstypes.LoadBalancerTypeEnumGateway}, }, "enable_http2": { apiAttributeKey: loadBalancerAttributeRoutingHTTP2Enabled, tfType: schema.TypeBool, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "enable_tls_version_and_cipher_suite_headers": { apiAttributeKey: loadBalancerAttributeRoutingHTTPXAmznTLSVersionAndCipherSuiteEnabled, tfType: schema.TypeBool, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "enable_waf_fail_open": { apiAttributeKey: loadBalancerAttributeWAFFailOpenEnabled, tfType: schema.TypeBool, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "enable_xff_client_port": { apiAttributeKey: loadBalancerAttributeRoutingHTTPXFFClientPortEnabled, tfType: schema.TypeBool, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "idle_timeout": { apiAttributeKey: loadBalancerAttributeIdleTimeoutTimeoutSeconds, tfType: schema.TypeInt, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "preserve_host_header": { apiAttributeKey: loadBalancerAttributeRoutingHTTPPreserveHostHeaderEnabled, tfType: schema.TypeBool, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, "xff_header_processing_mode": { apiAttributeKey: loadBalancerAttributeRoutingHTTPXFFHeaderProcessingMode, tfType: schema.TypeString, - loadBalancerTypesSupported: []string{elbv2.LoadBalancerTypeEnumApplication}, + loadBalancerTypesSupported: []awstypes.LoadBalancerTypeEnum{awstypes.LoadBalancerTypeEnumApplication}, }, }) -func (m loadBalancerAttributeMap) expand(d *schema.ResourceData, update bool) []*elbv2.LoadBalancerAttribute { - var apiObjects []*elbv2.LoadBalancerAttribute +func (m loadBalancerAttributeMap) expand(d *schema.ResourceData, lbType awstypes.LoadBalancerTypeEnum, update bool) []awstypes.LoadBalancerAttribute { + var apiObjects []awstypes.LoadBalancerAttribute - loadBalancerType := d.Get("load_balancer_type").(string) for tfAttributeName, attributeInfo := range m { if update && !d.HasChange(tfAttributeName) { continue } - if !slices.Contains(attributeInfo.loadBalancerTypesSupported, loadBalancerType) { + if !slices.Contains(attributeInfo.loadBalancerTypesSupported, lbType) { continue } switch v, t, k := d.Get(tfAttributeName), attributeInfo.tfType, aws.String(attributeInfo.apiAttributeKey); t { case schema.TypeBool: v := v.(bool) - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: k, Value: flex.BoolValueToString(v), }) case schema.TypeInt: v := v.(int) - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: k, Value: flex.IntValueToString(v), }) case schema.TypeString: if v := v.(string); v != "" { - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: k, Value: aws.String(v), }) @@ -816,11 +816,11 @@ func (m loadBalancerAttributeMap) expand(d *schema.ResourceData, update bool) [] return apiObjects } -func (m loadBalancerAttributeMap) flatten(d *schema.ResourceData, apiObjects []*elbv2.LoadBalancerAttribute) { +func (m loadBalancerAttributeMap) flatten(d *schema.ResourceData, apiObjects []awstypes.LoadBalancerAttribute) { for tfAttributeName, attributeInfo := range m { k := attributeInfo.apiAttributeKey - i := slices.IndexFunc(apiObjects, func(v *elbv2.LoadBalancerAttribute) bool { - return aws.StringValue(v.Key) == k + i := slices.IndexFunc(apiObjects, func(v awstypes.LoadBalancerAttribute) bool { + return aws.ToString(v.Key) == k }) if i == -1 { @@ -838,76 +838,69 @@ func (m loadBalancerAttributeMap) flatten(d *schema.ResourceData, apiObjects []* } } -func FindLoadBalancerByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (*elbv2.LoadBalancer, error) { - input := &elbv2.DescribeLoadBalancersInput{ - LoadBalancerArns: aws.StringSlice([]string{arn}), - } - - output, err := findLoadBalancer(ctx, conn, input) +func findLoadBalancer(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeLoadBalancersInput) (*awstypes.LoadBalancer, error) { + output, err := findLoadBalancers(ctx, conn, input) if err != nil { return nil, err } - // Eventual consistency check. - if aws.StringValue(output.LoadBalancerArn) != arn { - return nil, &retry.NotFoundError{ - LastRequest: input, + return tfresource.AssertSingleValueResult(output) +} + +func findLoadBalancers(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeLoadBalancersInput) ([]awstypes.LoadBalancer, error) { + var output []awstypes.LoadBalancer + + pages := elasticloadbalancingv2.NewDescribeLoadBalancersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.LoadBalancerNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err } + + output = append(output, page.LoadBalancers...) } return output, nil } -func findLoadBalancer(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeLoadBalancersInput) (*elbv2.LoadBalancer, error) { - output, err := findLoadBalancers(ctx, conn, input) +func findLoadBalancerByARN(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) (*awstypes.LoadBalancer, error) { + input := &elasticloadbalancingv2.DescribeLoadBalancersInput{ + LoadBalancerArns: []string{arn}, + } + + output, err := findLoadBalancer(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) -} - -func findLoadBalancers(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeLoadBalancersInput) ([]*elbv2.LoadBalancer, error) { - var output []*elbv2.LoadBalancer - - err := conn.DescribeLoadBalancersPagesWithContext(ctx, input, func(page *elbv2.DescribeLoadBalancersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.LoadBalancers { - if v != nil && v.State != nil { - output = append(output, v) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeLoadBalancerNotFoundException) { + // Eventual consistency check. + if aws.ToString(output.LoadBalancerArn) != arn { return nil, &retry.NotFoundError{ - LastError: err, LastRequest: input, } } - if err != nil { - return nil, err - } - return output, nil } -func FindLoadBalancerAttributesByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) ([]*elbv2.LoadBalancerAttribute, error) { - input := &elbv2.DescribeLoadBalancerAttributesInput{ +func findLoadBalancerAttributesByARN(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) ([]awstypes.LoadBalancerAttribute, error) { + input := &elasticloadbalancingv2.DescribeLoadBalancerAttributesInput{ LoadBalancerArn: aws.String(arn), } - output, err := conn.DescribeLoadBalancerAttributesWithContext(ctx, input) + output, err := conn.DescribeLoadBalancerAttributes(ctx, input) - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeLoadBalancerNotFoundException) { + if errs.IsA[*awstypes.LoadBalancerNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -925,9 +918,9 @@ func FindLoadBalancerAttributesByARN(ctx context.Context, conn *elbv2.ELBV2, arn return output.Attributes, nil } -func statusLoadBalancer(ctx context.Context, conn *elbv2.ELBV2, arn string) retry.StateRefreshFunc { +func statusLoadBalancer(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindLoadBalancerByARN(ctx, conn, arn) + output, err := findLoadBalancerByARN(ctx, conn, arn) if tfresource.NotFound(err) { return nil, "", nil @@ -937,14 +930,14 @@ func statusLoadBalancer(ctx context.Context, conn *elbv2.ELBV2, arn string) retr return nil, "", err } - return output, aws.StringValue(output.State.Code), nil + return output, string(output.State.Code), nil } } -func waitLoadBalancerActive(ctx context.Context, conn *elbv2.ELBV2, arn string, timeout time.Duration) (*elbv2.LoadBalancer, error) { //nolint:unparam +func waitLoadBalancerActive(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string, timeout time.Duration) (*awstypes.LoadBalancer, error) { //nolint:unparam stateConf := &retry.StateChangeConf{ - Pending: []string{elbv2.LoadBalancerStateEnumProvisioning, elbv2.LoadBalancerStateEnumFailed}, - Target: []string{elbv2.LoadBalancerStateEnumActive}, + Pending: enum.Slice(awstypes.LoadBalancerStateEnumProvisioning, awstypes.LoadBalancerStateEnumFailed), + Target: enum.Slice(awstypes.LoadBalancerStateEnumActive), Refresh: statusLoadBalancer(ctx, conn, arn), Timeout: timeout, MinTimeout: 10 * time.Second, @@ -953,8 +946,8 @@ func waitLoadBalancerActive(ctx context.Context, conn *elbv2.ELBV2, arn string, outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*elbv2.LoadBalancer); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.State.Reason))) + if output, ok := outputRaw.(*awstypes.LoadBalancer); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.State.Reason))) return output, err } @@ -984,8 +977,8 @@ func cleanupALBNetworkInterfaces(ctx context.Context, conn *ec2.Client, arn stri continue } - attachmentID := aws.StringValue(v.Attachment.AttachmentId) - networkInterfaceID := aws.StringValue(v.NetworkInterfaceId) + attachmentID := aws.ToString(v.Attachment.AttachmentId) + networkInterfaceID := aws.ToString(v.NetworkInterfaceId) if err := tfec2.DetachNetworkInterface(ctx, conn, networkInterfaceID, attachmentID, tfec2.NetworkInterfaceDetachedTimeout); err != nil { errs = append(errs, err) @@ -1037,30 +1030,30 @@ func loadBalancerNameFromARN(s string) (string, error) { return matches[1], nil } -func flattenSubnetsFromAvailabilityZones(apiObjects []*elbv2.AvailabilityZone) []string { - return tfslices.ApplyToAll(apiObjects, func(apiObject *elbv2.AvailabilityZone) string { - return aws.StringValue(apiObject.SubnetId) +func flattenSubnetsFromAvailabilityZones(apiObjects []awstypes.AvailabilityZone) []string { + return tfslices.ApplyToAll(apiObjects, func(apiObject awstypes.AvailabilityZone) string { + return aws.ToString(apiObject.SubnetId) }) } -func flattenSubnetMappingsFromAvailabilityZones(apiObjects []*elbv2.AvailabilityZone) []map[string]interface{} { - return tfslices.ApplyToAll(apiObjects, func(apiObject *elbv2.AvailabilityZone) map[string]interface{} { +func flattenSubnetMappingsFromAvailabilityZones(apiObjects []awstypes.AvailabilityZone) []map[string]interface{} { + return tfslices.ApplyToAll(apiObjects, func(apiObject awstypes.AvailabilityZone) map[string]interface{} { tfMap := map[string]interface{}{ - "outpost_id": aws.StringValue(apiObject.OutpostId), - names.AttrSubnetID: aws.StringValue(apiObject.SubnetId), + "outpost_id": aws.ToString(apiObject.OutpostId), + names.AttrSubnetID: aws.ToString(apiObject.SubnetId), } if apiObjects := apiObject.LoadBalancerAddresses; len(apiObjects) > 0 { apiObject := apiObjects[0] - tfMap["allocation_id"] = aws.StringValue(apiObject.AllocationId) - tfMap["ipv6_address"] = aws.StringValue(apiObject.IPv6Address) - tfMap["private_ipv4_address"] = aws.StringValue(apiObject.PrivateIPv4Address) + tfMap["allocation_id"] = aws.ToString(apiObject.AllocationId) + tfMap["ipv6_address"] = aws.ToString(apiObject.IPv6Address) + tfMap["private_ipv4_address"] = aws.ToString(apiObject.PrivateIPv4Address) } return tfMap }) } -func SuffixFromARN(arn *string) string { +func suffixFromARN(arn *string) string { if arn == nil { return "" } @@ -1091,7 +1084,7 @@ func customizeDiffLoadBalancerNLB(_ context.Context, diff *schema.ResourceDiff, // Application Load Balancers, so the logic below is simple individual checks. // If other differences arise we'll want to refactor to check other // conditions in combinations, but for now all we handle is subnets - if lbType := diff.Get("load_balancer_type").(string); lbType != elbv2.LoadBalancerTypeEnumNetwork { + if lbType := awstypes.LoadBalancerTypeEnum(diff.Get("load_balancer_type").(string)); lbType != awstypes.LoadBalancerTypeEnumNetwork { return nil } @@ -1169,7 +1162,7 @@ func customizeDiffLoadBalancerNLB(_ context.Context, diff *schema.ResourceDiff, } func customizeDiffLoadBalancerALB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - if lbType := diff.Get("load_balancer_type").(string); lbType != elbv2.LoadBalancerTypeEnumApplication { + if lbType := awstypes.LoadBalancerTypeEnum(diff.Get("load_balancer_type").(string)); lbType != awstypes.LoadBalancerTypeEnumApplication { return nil } @@ -1225,7 +1218,7 @@ func customizeDiffLoadBalancerALB(_ context.Context, diff *schema.ResourceDiff, } func customizeDiffLoadBalancerGWLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { - if lbType := diff.Get("load_balancer_type").(string); lbType != elbv2.LoadBalancerTypeEnumGateway { + if lbType := awstypes.LoadBalancerTypeEnum(diff.Get("load_balancer_type").(string)); lbType != awstypes.LoadBalancerTypeEnumGateway { return nil } @@ -1236,29 +1229,29 @@ func customizeDiffLoadBalancerGWLB(_ context.Context, diff *schema.ResourceDiff, return nil } -func expandLoadBalancerAccessLogsAttributes(tfMap map[string]interface{}, update bool) []*elbv2.LoadBalancerAttribute { +func expandLoadBalancerAccessLogsAttributes(tfMap map[string]interface{}, update bool) []awstypes.LoadBalancerAttribute { if tfMap == nil { return nil } - var apiObjects []*elbv2.LoadBalancerAttribute + var apiObjects []awstypes.LoadBalancerAttribute if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeAccessLogsS3Enabled), Value: flex.BoolValueToString(v), }) if v { if v, ok := tfMap[names.AttrBucket].(string); ok && (update || v != "") { - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeAccessLogsS3Bucket), Value: aws.String(v), }) } if v, ok := tfMap[names.AttrPrefix].(string); ok && (update || v != "") { - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeAccessLogsS3Prefix), Value: aws.String(v), }) @@ -1269,29 +1262,29 @@ func expandLoadBalancerAccessLogsAttributes(tfMap map[string]interface{}, update return apiObjects } -func expandLoadBalancerConnectionLogsAttributes(tfMap map[string]interface{}, update bool) []*elbv2.LoadBalancerAttribute { +func expandLoadBalancerConnectionLogsAttributes(tfMap map[string]interface{}, update bool) []awstypes.LoadBalancerAttribute { if tfMap == nil { return nil } - var apiObjects []*elbv2.LoadBalancerAttribute + var apiObjects []awstypes.LoadBalancerAttribute if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeConnectionLogsS3Enabled), Value: flex.BoolValueToString(v), }) if v { if v, ok := tfMap[names.AttrBucket].(string); ok && (update || v != "") { - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeConnectionLogsS3Bucket), Value: aws.String(v), }) } if v, ok := tfMap[names.AttrPrefix].(string); ok && (update || v != "") { - apiObjects = append(apiObjects, &elbv2.LoadBalancerAttribute{ + apiObjects = append(apiObjects, awstypes.LoadBalancerAttribute{ Key: aws.String(loadBalancerAttributeConnectionLogsS3Prefix), Value: aws.String(v), }) @@ -1302,7 +1295,7 @@ func expandLoadBalancerConnectionLogsAttributes(tfMap map[string]interface{}, up return apiObjects } -func flattenLoadBalancerAccessLogsAttributes(apiObjects []*elbv2.LoadBalancerAttribute) map[string]interface{} { +func flattenLoadBalancerAccessLogsAttributes(apiObjects []awstypes.LoadBalancerAttribute) map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -1310,20 +1303,20 @@ func flattenLoadBalancerAccessLogsAttributes(apiObjects []*elbv2.LoadBalancerAtt tfMap := map[string]interface{}{} for _, apiObject := range apiObjects { - switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + switch k, v := aws.ToString(apiObject.Key), apiObject.Value; k { case loadBalancerAttributeAccessLogsS3Enabled: tfMap[names.AttrEnabled] = flex.StringToBoolValue(v) case loadBalancerAttributeAccessLogsS3Bucket: - tfMap[names.AttrBucket] = aws.StringValue(v) + tfMap[names.AttrBucket] = aws.ToString(v) case loadBalancerAttributeAccessLogsS3Prefix: - tfMap[names.AttrPrefix] = aws.StringValue(v) + tfMap[names.AttrPrefix] = aws.ToString(v) } } return tfMap } -func flattenLoadBalancerConnectionLogsAttributes(apiObjects []*elbv2.LoadBalancerAttribute) map[string]interface{} { +func flattenLoadBalancerConnectionLogsAttributes(apiObjects []awstypes.LoadBalancerAttribute) map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -1331,25 +1324,21 @@ func flattenLoadBalancerConnectionLogsAttributes(apiObjects []*elbv2.LoadBalance tfMap := map[string]interface{}{} for _, apiObject := range apiObjects { - switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + switch k, v := aws.ToString(apiObject.Key), apiObject.Value; k { case loadBalancerAttributeConnectionLogsS3Enabled: tfMap[names.AttrEnabled] = flex.StringToBoolValue(v) case loadBalancerAttributeConnectionLogsS3Bucket: - tfMap[names.AttrBucket] = aws.StringValue(v) + tfMap[names.AttrBucket] = aws.ToString(v) case loadBalancerAttributeConnectionLogsS3Prefix: - tfMap[names.AttrPrefix] = aws.StringValue(v) + tfMap[names.AttrPrefix] = aws.ToString(v) } } return tfMap } -func expandSubnetMapping(tfMap map[string]interface{}) *elbv2.SubnetMapping { - if tfMap == nil { - return nil - } - - apiObject := &elbv2.SubnetMapping{} +func expandSubnetMapping(tfMap map[string]interface{}) awstypes.SubnetMapping { + apiObject := awstypes.SubnetMapping{} if v, ok := tfMap["allocation_id"].(string); ok && v != "" { apiObject.AllocationId = aws.String(v) @@ -1370,12 +1359,12 @@ func expandSubnetMapping(tfMap map[string]interface{}) *elbv2.SubnetMapping { return apiObject } -func expandSubnetMappings(tfList []interface{}) []*elbv2.SubnetMapping { +func expandSubnetMappings(tfList []interface{}) []awstypes.SubnetMapping { if len(tfList) == 0 { return nil } - var apiObjects []*elbv2.SubnetMapping + var apiObjects []awstypes.SubnetMapping for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -1386,10 +1375,6 @@ func expandSubnetMappings(tfList []interface{}) []*elbv2.SubnetMapping { apiObject := expandSubnetMapping(tfMap) - if apiObject == nil { - continue - } - apiObjects = append(apiObjects, apiObject) } diff --git a/internal/service/elbv2/load_balancer_data_source.go b/internal/service/elbv2/load_balancer_data_source.go index dbd7724f947..b5ca617f68f 100644 --- a/internal/service/elbv2/load_balancer_data_source.go +++ b/internal/service/elbv2/load_balancer_data_source.go @@ -8,9 +8,9 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -21,10 +21,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_alb") -// @SDKDataSource("aws_lb") +// @SDKDataSource("aws_alb", name="Load Balancer") +// @SDKDataSource("aws_lb", name="Load Balancer") // @Testing(tagsTest=true) -func DataSourceLoadBalancer() *schema.Resource { +func dataSourceLoadBalancer() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLoadBalancerRead, @@ -217,17 +217,18 @@ func DataSourceLoadBalancer() *schema.Resource { func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) + partition := meta.(*conns.AWSClient).Partition ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig tagsToMatch := tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{})).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - input := &elbv2.DescribeLoadBalancersInput{} + input := &elasticloadbalancingv2.DescribeLoadBalancersInput{} if v, ok := d.GetOk(names.AttrARN); ok { - input.LoadBalancerArns = aws.StringSlice([]string{v.(string)}) + input.LoadBalancerArns = []string{v.(string)} } else if v, ok := d.GetOk(names.AttrName); ok { - input.Names = aws.StringSlice([]string{v.(string)}) + input.Names = []string{v.(string)} } results, err := findLoadBalancers(ctx, conn, input) @@ -237,13 +238,13 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met } if len(tagsToMatch) > 0 { - var loadBalancers []*elbv2.LoadBalancer + var loadBalancers []awstypes.LoadBalancer for _, loadBalancer := range results { - arn := aws.StringValue(loadBalancer.LoadBalancerArn) + arn := aws.ToString(loadBalancer.LoadBalancerArn) tags, err := listTags(ctx, conn, arn) - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeLoadBalancerNotFoundException) { + if errs.IsA[*awstypes.LoadBalancerNotFoundException](err) { continue } @@ -266,17 +267,17 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met } lb := results[0] - d.SetId(aws.StringValue(lb.LoadBalancerArn)) + d.SetId(aws.ToString(lb.LoadBalancerArn)) d.Set(names.AttrARN, lb.LoadBalancerArn) - d.Set("arn_suffix", SuffixFromARN(lb.LoadBalancerArn)) + d.Set("arn_suffix", suffixFromARN(lb.LoadBalancerArn)) d.Set("customer_owned_ipv4_pool", lb.CustomerOwnedIpv4Pool) d.Set(names.AttrDNSName, lb.DNSName) d.Set("enforce_security_group_inbound_rules_on_private_link_traffic", lb.EnforceSecurityGroupInboundRulesOnPrivateLinkTraffic) d.Set(names.AttrIPAddressType, lb.IpAddressType) d.Set(names.AttrName, lb.LoadBalancerName) - d.Set("internal", aws.StringValue(lb.Scheme) == "internal") + d.Set("internal", string(lb.Scheme) == "internal") d.Set("load_balancer_type", lb.Type) - d.Set(names.AttrSecurityGroups, aws.StringValueSlice(lb.SecurityGroups)) + d.Set(names.AttrSecurityGroups, lb.SecurityGroups) if err := d.Set("subnet_mapping", flattenSubnetMappingsFromAvailabilityZones(lb.AvailabilityZones)); err != nil { return sdkdiag.AppendErrorf(diags, "setting subnet_mapping: %s", err) } @@ -286,7 +287,7 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met d.Set(names.AttrVPCID, lb.VpcId) d.Set("zone_id", lb.CanonicalHostedZoneId) - attributes, err := FindLoadBalancerAttributesByARN(ctx, conn, d.Id()) + attributes, err := findLoadBalancerAttributesByARN(ctx, conn, d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading ELBv2 Load Balancer (%s) attributes: %s", d.Id(), err) @@ -304,7 +305,7 @@ func dataSourceLoadBalancerRead(ctx context.Context, d *schema.ResourceData, met tags, err := listTags(ctx, conn, d.Id()) - if errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if errs.IsUnsupportedOperationInPartitionError(partition, err) { log.Printf("[WARN] Unable to list tags for ELBv2 Load Balancer (%s): %s", d.Id(), err) return diags } diff --git a/internal/service/elbv2/load_balancer_tags_gen_test.go b/internal/service/elbv2/load_balancer_tags_gen_test.go index 6632789210c..3af95d4b60a 100644 --- a/internal/service/elbv2/load_balancer_tags_gen_test.go +++ b/internal/service/elbv2/load_balancer_tags_gen_test.go @@ -5,7 +5,7 @@ package elbv2_test import ( "testing" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-testing/config" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -19,7 +19,7 @@ import ( func TestAccELBV2LoadBalancer_tags(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -190,7 +190,7 @@ func TestAccELBV2LoadBalancer_tags(t *testing.T) { func TestAccELBV2LoadBalancer_tags_null(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -250,7 +250,7 @@ func TestAccELBV2LoadBalancer_tags_null(t *testing.T) { func TestAccELBV2LoadBalancer_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -327,7 +327,7 @@ func TestAccELBV2LoadBalancer_tags_AddOnUpdate(t *testing.T) { func TestAccELBV2LoadBalancer_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -412,7 +412,7 @@ func TestAccELBV2LoadBalancer_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccELBV2LoadBalancer_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -539,7 +539,7 @@ func TestAccELBV2LoadBalancer_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2LoadBalancer_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -622,7 +622,7 @@ func TestAccELBV2LoadBalancer_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -803,7 +803,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -963,7 +963,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1139,7 +1139,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_overlapping(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1229,7 +1229,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_updateToProviderOnly(t *testing.T func TestAccELBV2LoadBalancer_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1318,7 +1318,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_updateToResourceOnly(t *testing.T func TestAccELBV2LoadBalancer_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1383,7 +1383,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccELBV2LoadBalancer_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1440,7 +1440,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T func TestAccELBV2LoadBalancer_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1502,7 +1502,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_nullOverlappingResourceTag(t *tes func TestAccELBV2LoadBalancer_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1564,7 +1564,7 @@ func TestAccELBV2LoadBalancer_tags_DefaultTags_nullNonOverlappingResourceTag(t * func TestAccELBV2LoadBalancer_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1618,7 +1618,7 @@ func TestAccELBV2LoadBalancer_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccELBV2LoadBalancer_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1708,7 +1708,7 @@ func TestAccELBV2LoadBalancer_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2LoadBalancer_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.LoadBalancer + var v types.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) diff --git a/internal/service/elbv2/load_balancer_test.go b/internal/service/elbv2/load_balancer_test.go index f17446f0bb9..6046d24ea8d 100644 --- a/internal/service/elbv2/load_balancer_test.go +++ b/internal/service/elbv2/load_balancer_test.go @@ -11,8 +11,9 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -70,7 +71,7 @@ func TestLBCloudWatchSuffixFromARN(t *testing.T) { func TestAccELBV2LoadBalancer_ALB_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -114,7 +115,7 @@ func TestAccELBV2LoadBalancer_ALB_basic(t *testing.T) { func TestAccELBV2LoadBalancer_NLB_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -153,7 +154,7 @@ func TestAccELBV2LoadBalancer_NLB_basic(t *testing.T) { func TestAccELBV2LoadBalancer_LoadBalancerType_gateway(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -168,7 +169,7 @@ func TestAccELBV2LoadBalancer_LoadBalancerType_gateway(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLoadBalancerExists(ctx, resourceName, &conf), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "elasticloadbalancing", regexache.MustCompile(fmt.Sprintf("loadbalancer/gwy/%s/.+", rName))), - resource.TestCheckResourceAttr(resourceName, "load_balancer_type", elbv2.LoadBalancerTypeEnumGateway), + resource.TestCheckResourceAttr(resourceName, "load_balancer_type", string(awstypes.LoadBalancerTypeEnumGateway)), ), }, { @@ -188,7 +189,7 @@ func TestAccELBV2LoadBalancer_LoadBalancerType_gateway(t *testing.T) { func TestAccELBV2LoadBalancer_disappears(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -212,7 +213,7 @@ func TestAccELBV2LoadBalancer_disappears(t *testing.T) { func TestAccELBV2LoadBalancer_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -247,7 +248,7 @@ func TestAccELBV2LoadBalancer_nameGenerated(t *testing.T) { func TestAccELBV2LoadBalancer_nameGeneratedForZeroValue(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -271,7 +272,7 @@ func TestAccELBV2LoadBalancer_nameGeneratedForZeroValue(t *testing.T) { func TestAccELBV2LoadBalancer_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -306,7 +307,7 @@ func TestAccELBV2LoadBalancer_namePrefix(t *testing.T) { func TestAccELBV2LoadBalancer_duplicateName(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -333,7 +334,7 @@ func TestAccELBV2LoadBalancer_duplicateName(t *testing.T) { func TestAccELBV2LoadBalancer_ipv6SubnetMapping(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -374,7 +375,7 @@ func TestAccELBV2LoadBalancer_ipv6SubnetMapping(t *testing.T) { func TestAccELBV2LoadBalancer_LoadBalancerTypeGateway_enableCrossZoneLoadBalancing(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -389,7 +390,7 @@ func TestAccELBV2LoadBalancer_LoadBalancerTypeGateway_enableCrossZoneLoadBalanci Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLoadBalancerExists(ctx, resourceName, &conf), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "elasticloadbalancing", regexache.MustCompile(fmt.Sprintf("loadbalancer/gwy/%s/.+", rName))), - resource.TestCheckResourceAttr(resourceName, "load_balancer_type", elbv2.LoadBalancerTypeEnumGateway), + resource.TestCheckResourceAttr(resourceName, "load_balancer_type", string(awstypes.LoadBalancerTypeEnumGateway)), resource.TestCheckResourceAttr(resourceName, "enable_cross_zone_load_balancing", acctest.CtTrue), ), }, @@ -409,7 +410,7 @@ func TestAccELBV2LoadBalancer_LoadBalancerTypeGateway_enableCrossZoneLoadBalanci Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLoadBalancerExists(ctx, resourceName, &conf), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "elasticloadbalancing", regexache.MustCompile(fmt.Sprintf("loadbalancer/gwy/%s/.+", rName))), - resource.TestCheckResourceAttr(resourceName, "load_balancer_type", elbv2.LoadBalancerTypeEnumGateway), + resource.TestCheckResourceAttr(resourceName, "load_balancer_type", string(awstypes.LoadBalancerTypeEnumGateway)), resource.TestCheckResourceAttr(resourceName, "enable_cross_zone_load_balancing", acctest.CtFalse), ), }, @@ -419,7 +420,7 @@ func TestAccELBV2LoadBalancer_LoadBalancerTypeGateway_enableCrossZoneLoadBalanci func TestAccELBV2LoadBalancer_ALB_outpost(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -459,7 +460,7 @@ func TestAccELBV2LoadBalancer_ALB_outpost(t *testing.T) { func TestAccELBV2LoadBalancer_networkLoadBalancerEIP(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer resourceName := "aws_lb.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -490,7 +491,7 @@ func TestAccELBV2LoadBalancer_networkLoadBalancerEIP(t *testing.T) { func TestAccELBV2LoadBalancer_NLB_privateIPv4Address(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -527,7 +528,7 @@ func TestAccELBV2LoadBalancer_NLB_privateIPv4Address(t *testing.T) { func TestAccELBV2LoadBalancer_backwardsCompatibility(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb.test" @@ -563,7 +564,7 @@ func TestAccELBV2LoadBalancer_backwardsCompatibility(t *testing.T) { func TestAccELBV2LoadBalancer_NetworkLoadBalancer_updateCrossZone(t *testing.T) { ctx := acctest.Context(t) - var pre, mid, post elbv2.LoadBalancer + var pre, mid, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -605,7 +606,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_updateCrossZone(t *testing.T) func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updateHTTP2(t *testing.T) { ctx := acctest.Context(t) - var pre, mid, post elbv2.LoadBalancer + var pre, mid, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -647,7 +648,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updateHTTP2(t *testing.T) func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_clientKeepAlive(t *testing.T) { ctx := acctest.Context(t) - var pre, mid, post elbv2.LoadBalancer + var pre, mid, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -689,7 +690,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_clientKeepAlive(t *testing func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updateDropInvalidHeaderFields(t *testing.T) { ctx := acctest.Context(t) - var pre, mid, post elbv2.LoadBalancer + var pre, mid, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -731,7 +732,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updateDropInvalidHeaderFie func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updatePreserveHostHeader(t *testing.T) { ctx := acctest.Context(t) - var pre, mid, post elbv2.LoadBalancer + var pre, mid, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -773,7 +774,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updatePreserveHostHeader(t func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updateDeletionProtection(t *testing.T) { ctx := acctest.Context(t) - var pre, mid, post elbv2.LoadBalancer + var pre, mid, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -815,7 +816,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updateDeletionProtection(t func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updateWAFFailOpen(t *testing.T) { ctx := acctest.Context(t) - var pre, mid, post elbv2.LoadBalancer + var pre, mid, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -859,7 +860,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updateWAFFailOpen(t *testi func TestAccELBV2LoadBalancer_updateIPAddressType(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -896,7 +897,7 @@ func TestAccELBV2LoadBalancer_updateIPAddressType(t *testing.T) { func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updatedSecurityGroups(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -933,7 +934,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_updatedSecurityGroups(t *t func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_addSubnet(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -970,7 +971,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_addSubnet(t *testing.T) { func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_deleteSubnet(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1007,7 +1008,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_deleteSubnet(t *testing.T) func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_addSubnetMapping(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1044,7 +1045,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_addSubnetMapping(t *testin func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_deleteSubnetMapping(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1084,7 +1085,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_deleteSubnetMapping(t *tes // is assigned. func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_noSecurityGroup(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1115,7 +1116,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_noSecurityGroup(t *testing func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_accessLogs(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1188,7 +1189,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_accessLogs(t *testing.T) { func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_accessLogsPrefix(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1248,7 +1249,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_accessLogsPrefix(t *testin func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_connectionLogs(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1321,7 +1322,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_connectionLogs(t *testing. func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_connectionLogsPrefix(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1381,7 +1382,7 @@ func TestAccELBV2LoadBalancer_ApplicationLoadBalancer_connectionLogsPrefix(t *te func TestAccELBV2LoadBalancer_NetworkLoadBalancer_accessLogs(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1461,7 +1462,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_accessLogs(t *testing.T) { func TestAccELBV2LoadBalancer_NetworkLoadBalancer_accessLogsPrefix(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1528,7 +1529,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_accessLogsPrefix(t *testing.T) func TestAccELBV2LoadBalancer_NetworkLoadBalancer_updateDNSRecordClientRoutingPolicy(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1574,7 +1575,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_updateDNSRecordClientRoutingPo func TestAccELBV2LoadBalancer_NetworkLoadBalancer_updateSecurityGroups(t *testing.T) { ctx := acctest.Context(t) - var lb1, lb2, lb3, lb4 elbv2.LoadBalancer + var lb1, lb2, lb3, lb4 awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1625,7 +1626,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_updateSecurityGroups(t *testin func TestAccELBV2LoadBalancer_NetworkLoadBalancer_enforcePrivateLink(t *testing.T) { ctx := acctest.Context(t) - var lb elbv2.LoadBalancer + var lb awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1692,7 +1693,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_enforcePrivateLink(t *testing. func TestAccELBV2LoadBalancer_NetworkLoadBalancer_addSubnet(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1727,7 +1728,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_addSubnet(t *testing.T) { func TestAccELBV2LoadBalancer_NetworkLoadBalancer_deleteSubnet(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1762,7 +1763,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_deleteSubnet(t *testing.T) { func TestAccELBV2LoadBalancer_NetworkLoadBalancer_addSubnetMapping(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1799,7 +1800,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_addSubnetMapping(t *testing.T) func TestAccELBV2LoadBalancer_NetworkLoadBalancer_deleteSubnetMapping(t *testing.T) { ctx := acctest.Context(t) - var pre, post elbv2.LoadBalancer + var pre, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1836,7 +1837,7 @@ func TestAccELBV2LoadBalancer_NetworkLoadBalancer_deleteSubnetMapping(t *testing func TestAccELBV2LoadBalancer_updateDesyncMitigationMode(t *testing.T) { ctx := acctest.Context(t) - var pre, mid, post elbv2.LoadBalancer + var pre, mid, post awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1881,7 +1882,7 @@ func TestAccELBV2LoadBalancer_updateDesyncMitigationMode(t *testing.T) { func TestAccELBV2LoadBalancer_ALB_updateTLSVersionAndCipherSuite(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1926,7 +1927,7 @@ func TestAccELBV2LoadBalancer_ALB_updateTLSVersionAndCipherSuite(t *testing.T) { func TestAccELBV2LoadBalancer_ALB_updateXffHeaderProcessingMode(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -1971,7 +1972,7 @@ func TestAccELBV2LoadBalancer_ALB_updateXffHeaderProcessingMode(t *testing.T) { func TestAccELBV2LoadBalancer_ALB_updateXffClientPort(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.LoadBalancer + var conf awstypes.LoadBalancer rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb.test" @@ -2014,9 +2015,9 @@ func TestAccELBV2LoadBalancer_ALB_updateXffClientPort(t *testing.T) { }) } -func testAccCheckLoadBalancerNotRecreated(i, j *elbv2.LoadBalancer) resource.TestCheckFunc { +func testAccCheckLoadBalancerNotRecreated(i, j *awstypes.LoadBalancer) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.LoadBalancerArn) != aws.StringValue(j.LoadBalancerArn) { + if aws.ToString(i.LoadBalancerArn) != aws.ToString(j.LoadBalancerArn) { return errors.New("ELBv2 Load Balancer was recreated") } @@ -2024,9 +2025,9 @@ func testAccCheckLoadBalancerNotRecreated(i, j *elbv2.LoadBalancer) resource.Tes } } -func testAccCheckLoadBalancerRecreated(i, j *elbv2.LoadBalancer) resource.TestCheckFunc { +func testAccCheckLoadBalancerRecreated(i, j *awstypes.LoadBalancer) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.LoadBalancerArn) == aws.StringValue(j.LoadBalancerArn) { + if aws.ToString(i.LoadBalancerArn) == aws.ToString(j.LoadBalancerArn) { return errors.New("ELBv2 Load Balancer was not recreated") } @@ -2034,14 +2035,14 @@ func testAccCheckLoadBalancerRecreated(i, j *elbv2.LoadBalancer) resource.TestCh } } -func testAccCheckLoadBalancerExists(ctx context.Context, n string, v *elbv2.LoadBalancer) resource.TestCheckFunc { +func testAccCheckLoadBalancerExists(ctx context.Context, n string, v *awstypes.LoadBalancer) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) output, err := tfelbv2.FindLoadBalancerByARN(ctx, conn, rs.Primary.ID) @@ -2062,7 +2063,7 @@ func testAccCheckLoadBalancerAttribute(ctx context.Context, n, key, value string return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) attributes, err := tfelbv2.FindLoadBalancerAttributesByARN(ctx, conn, rs.Primary.ID) @@ -2071,8 +2072,8 @@ func testAccCheckLoadBalancerAttribute(ctx context.Context, n, key, value string } for _, v := range attributes { - if aws.StringValue(v.Key) == key { - got := aws.StringValue(v.Value) + if aws.ToString(v.Key) == key { + got := aws.ToString(v.Value) if got == value { return nil } @@ -2087,7 +2088,7 @@ func testAccCheckLoadBalancerAttribute(ctx context.Context, n, key, value string func testAccCheckLoadBalancerDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lb" && rs.Type != "aws_alb" { @@ -2112,11 +2113,11 @@ func testAccCheckLoadBalancerDestroy(ctx context.Context) resource.TestCheckFunc } func testAccPreCheckGatewayLoadBalancer(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) - input := &elbv2.DescribeAccountLimitsInput{} + input := &elasticloadbalancingv2.DescribeAccountLimitsInput{} - output, err := conn.DescribeAccountLimitsWithContext(ctx, input) + output, err := conn.DescribeAccountLimits(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) @@ -2131,11 +2132,7 @@ func testAccPreCheckGatewayLoadBalancer(ctx context.Context, t *testing.T) { } for _, limit := range output.Limits { - if limit == nil { - continue - } - - if aws.StringValue(limit.Name) == "gateway-load-balancers" { + if aws.ToString(limit.Name) == "gateway-load-balancers" { return } } diff --git a/internal/service/elbv2/load_balancers_data_source.go b/internal/service/elbv2/load_balancers_data_source.go index 624ff1fd3f4..56eb0121eec 100644 --- a/internal/service/elbv2/load_balancers_data_source.go +++ b/internal/service/elbv2/load_balancers_data_source.go @@ -6,21 +6,23 @@ package elbv2 import ( "context" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_lbs") -func DataSourceLoadBalancers() *schema.Resource { +// @SDKDataSource("aws_lbs", name="Load Balancers") +func dataSourceLoadBalancers() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceLoadBalancersRead, + Schema: map[string]*schema.Schema{ names.AttrARNs: { Type: schema.TypeSet, @@ -39,10 +41,10 @@ const ( func dataSourceLoadBalancersRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - results, err := findLoadBalancers(ctx, conn, &elbv2.DescribeLoadBalancersInput{}) + results, err := findLoadBalancers(ctx, conn, &elasticloadbalancingv2.DescribeLoadBalancersInput{}) if err != nil { return create.AppendDiagError(diags, names.ELBV2, create.ErrActionReading, DSNameLoadBalancers, "", err) @@ -50,13 +52,13 @@ func dataSourceLoadBalancersRead(ctx context.Context, d *schema.ResourceData, me tagsToMatch := tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{})).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) if len(tagsToMatch) > 0 { - var loadBalancers []*elbv2.LoadBalancer + var loadBalancers []awstypes.LoadBalancer for _, loadBalancer := range results { arn := aws.StringValue(loadBalancer.LoadBalancerArn) tags, err := listTags(ctx, conn, arn) - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeLoadBalancerNotFoundException) { + if errs.IsA[*awstypes.LoadBalancerNotFoundException](err) { continue } diff --git a/internal/service/elbv2/service_endpoint_resolver_gen.go b/internal/service/elbv2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..d1a1bde0233 --- /dev/null +++ b/internal/service/elbv2/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package elbv2 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + elasticloadbalancingv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ elasticloadbalancingv2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver elasticloadbalancingv2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: elasticloadbalancingv2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params elasticloadbalancingv2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up elasticloadbalancingv2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*elasticloadbalancingv2_sdkv2.Options) { + return func(o *elasticloadbalancingv2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/elbv2/service_endpoints_gen_test.go b/internal/service/elbv2/service_endpoints_gen_test.go index c51b45db7ce..3405dc4194a 100644 --- a/internal/service/elbv2/service_endpoints_gen_test.go +++ b/internal/service/elbv2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -16,8 +18,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" elasticloadbalancingv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - elbv2_sdkv1 "github.com/aws/aws-sdk-go/service/elbv2" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" @@ -93,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -276,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -288,45 +288,33 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }, } - t.Run("v1", func(t *testing.T) { - for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv - testcase := testcase + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase - t.Run(name, func(t *testing.T) { - testEndpointCase(t, providerRegion, testcase, callServiceV1) - }) - } - }) - - t.Run("v2", func(t *testing.T) { - for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv - testcase := testcase - - t.Run(name, func(t *testing.T) { - testEndpointCase(t, providerRegion, testcase, callServiceV2) - }) - } - }) + t.Run(name, func(t *testing.T) { + testEndpointCase(t, providerRegion, testcase, callService) + }) + } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := elasticloadbalancingv2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), elasticloadbalancingv2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := elasticloadbalancingv2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), elasticloadbalancingv2_sdkv2.EndpointParameters{ @@ -334,17 +322,17 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() client := meta.ELBV2Client(ctx) @@ -369,21 +357,6 @@ func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) api return result } -func callServiceV1(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { - t.Helper() - - client := meta.ELBV2Conn(ctx) - - req, _ := client.DescribeLoadBalancersRequest(&elbv2_sdkv1.DescribeLoadBalancersInput{}) - - req.HTTPRequest.URL.Path = "/" - - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), - } -} - func withNoConfig(_ *caseSetup) { // no-op } @@ -437,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/elbv2/service_package_gen.go b/internal/service/elbv2/service_package_gen.go index 087bcb97606..46dbfbc0b7f 100644 --- a/internal/service/elbv2/service_package_gen.go +++ b/internal/service/elbv2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package elbv2 @@ -7,11 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" elasticloadbalancingv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - elbv2_sdkv1 "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -30,41 +25,49 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceLoadBalancer, + Factory: dataSourceLoadBalancer, TypeName: "aws_alb", + Name: "Load Balancer", }, { - Factory: DataSourceListener, + Factory: dataSourceListener, TypeName: "aws_alb_listener", + Name: "Listener", }, { - Factory: DataSourceTargetGroup, + Factory: dataSourceTargetGroup, TypeName: "aws_alb_target_group", + Name: "Target Group", }, { - Factory: DataSourceLoadBalancer, + Factory: dataSourceLoadBalancer, TypeName: "aws_lb", + Name: "Load Balancer", }, { - Factory: DataSourceHostedZoneID, + Factory: dataSourceHostedZoneID, TypeName: "aws_lb_hosted_zone_id", + Name: "Hosted Zone ID", }, { - Factory: DataSourceListener, + Factory: dataSourceListener, TypeName: "aws_lb_listener", + Name: "Listener", }, { - Factory: DataSourceTargetGroup, + Factory: dataSourceTargetGroup, TypeName: "aws_lb_target_group", + Name: "Target Group", }, { - Factory: DataSourceTrustStore, + Factory: dataSourceTrustStore, TypeName: "aws_lb_trust_store", Name: "Trust Store", }, { - Factory: DataSourceLoadBalancers, + Factory: dataSourceLoadBalancers, TypeName: "aws_lbs", + Name: "Load Balancers", }, } } @@ -72,7 +75,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceLoadBalancer, + Factory: resourceLoadBalancer, TypeName: "aws_alb", Name: "Load Balancer", Tags: &types.ServicePackageResourceTags{ @@ -80,7 +83,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceListener, + Factory: resourceListener, TypeName: "aws_alb_listener", Name: "Listener", Tags: &types.ServicePackageResourceTags{ @@ -88,11 +91,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceListenerCertificate, + Factory: resourceListenerCertificate, TypeName: "aws_alb_listener_certificate", + Name: "Listener Certificate", }, { - Factory: ResourceListenerRule, + Factory: resourceListenerRule, TypeName: "aws_alb_listener_rule", Name: "Listener Rule", Tags: &types.ServicePackageResourceTags{ @@ -100,7 +104,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceTargetGroup, + Factory: resourceTargetGroup, TypeName: "aws_alb_target_group", Name: "Target Group", Tags: &types.ServicePackageResourceTags{ @@ -108,11 +112,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceTargetGroupAttachment, + Factory: resourceTargetGroupAttachment, TypeName: "aws_alb_target_group_attachment", + Name: "Target Group Attachment", }, { - Factory: ResourceLoadBalancer, + Factory: resourceLoadBalancer, TypeName: "aws_lb", Name: "Load Balancer", Tags: &types.ServicePackageResourceTags{ @@ -120,7 +125,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceListener, + Factory: resourceListener, TypeName: "aws_lb_listener", Name: "Listener", Tags: &types.ServicePackageResourceTags{ @@ -128,11 +133,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceListenerCertificate, + Factory: resourceListenerCertificate, TypeName: "aws_lb_listener_certificate", + Name: "Listener Certificate", }, { - Factory: ResourceListenerRule, + Factory: resourceListenerRule, TypeName: "aws_lb_listener_rule", Name: "Listener Rule", Tags: &types.ServicePackageResourceTags{ @@ -140,7 +146,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceTargetGroup, + Factory: resourceTargetGroup, TypeName: "aws_lb_target_group", Name: "Target Group", Tags: &types.ServicePackageResourceTags{ @@ -148,11 +154,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceTargetGroupAttachment, + Factory: resourceTargetGroupAttachment, TypeName: "aws_lb_target_group_attachment", + Name: "Target Group Attachment", }, { - Factory: ResourceTrustStore, + Factory: resourceTrustStore, TypeName: "aws_lb_trust_store", Name: "Trust Store", Tags: &types.ServicePackageResourceTags{ @@ -160,7 +167,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceTrustStoreRevocation, + Factory: resourceTrustStoreRevocation, TypeName: "aws_lb_trust_store_revocation", Name: "Trust Store Revocation", }, @@ -171,44 +178,14 @@ func (p *servicePackage) ServicePackageName() string { return names.ELBV2 } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*elbv2_sdkv1.ELBV2, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return elbv2_sdkv1.New(sess.Copy(&cfg)), nil -} - // NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*elasticloadbalancingv2_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return elasticloadbalancingv2_sdkv2.NewFromConfig(cfg, func(o *elasticloadbalancingv2_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return elasticloadbalancingv2_sdkv2.NewFromConfig(cfg, + elasticloadbalancingv2_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/elbv2/sweep.go b/internal/service/elbv2/sweep.go index ab41a0494ab..3901e2c2725 100644 --- a/internal/service/elbv2/sweep.go +++ b/internal/service/elbv2/sweep.go @@ -7,12 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/go-multierror" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -46,38 +45,39 @@ func sweepLoadBalancers(region string) error { if err != nil { return fmt.Errorf("getting client: %s", err) } - conn := client.ELBV2Conn(ctx) + input := &elasticloadbalancingv2.DescribeLoadBalancersInput{} + conn := client.ELBV2Client(ctx) + sweepResources := make([]sweep.Sweepable, 0) + + pages := elasticloadbalancingv2.NewDescribeLoadBalancersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - var sweeperErrs *multierror.Error - err = conn.DescribeLoadBalancersPagesWithContext(ctx, &elbv2.DescribeLoadBalancersInput{}, func(page *elbv2.DescribeLoadBalancersOutput, lastPage bool) bool { - if page == nil || len(page.LoadBalancers) == 0 { - log.Print("[DEBUG] No LBs to sweep") - return false + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ELBv2 Load Balancer sweep for %s: %s", region, err) + return nil } - for _, loadBalancer := range page.LoadBalancers { - name := aws.StringValue(loadBalancer.LoadBalancerName) + if err != nil { + return fmt.Errorf("error listing ELBv2 Load Balancers (%s): %w", region, err) + } - log.Printf("[INFO] Deleting LB: %s", name) - _, err := conn.DeleteLoadBalancerWithContext(ctx, &elbv2.DeleteLoadBalancerInput{ - LoadBalancerArn: loadBalancer.LoadBalancerArn, - }) - if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("failed to delete LB (%s): %w", name, err)) - continue - } + for _, v := range page.LoadBalancers { + r := resourceLoadBalancer() + d := r.Data(nil) + d.SetId(aws.ToString(v.LoadBalancerArn)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - return !lastPage - }) - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping LB sweep for %s: %s", region, err) - return sweeperErrs.ErrorOrNil() // In case we have completed some pages, but had errors } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + if err != nil { - sweeperErrs = multierror.Append(sweeperErrs, fmt.Errorf("retrieving LBs: %w", err)) + return fmt.Errorf("error sweeping ELBv2 Load Balancers (%s): %w", region, err) } - return sweeperErrs.ErrorOrNil() + return nil } func sweepTargetGroups(region string) error { @@ -86,34 +86,38 @@ func sweepTargetGroups(region string) error { if err != nil { return fmt.Errorf("getting client: %w", err) } - conn := client.ELBV2Conn(ctx) + input := &elasticloadbalancingv2.DescribeTargetGroupsInput{} + conn := client.ELBV2Client(ctx) + sweepResources := make([]sweep.Sweepable, 0) + + pages := elasticloadbalancingv2.NewDescribeTargetGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - err = conn.DescribeTargetGroupsPagesWithContext(ctx, &elbv2.DescribeTargetGroupsInput{}, func(page *elbv2.DescribeTargetGroupsOutput, lastPage bool) bool { - if page == nil || len(page.TargetGroups) == 0 { - log.Print("[DEBUG] No LB Target Groups to sweep") - return false + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ELBv2 Target Group sweep for %s: %s", region, err) + return nil } - for _, targetGroup := range page.TargetGroups { - name := aws.StringValue(targetGroup.TargetGroupName) + if err != nil { + return fmt.Errorf("error listing ELBv2 Target Groups (%s): %w", region, err) + } - log.Printf("[INFO] Deleting LB Target Group: %s", name) - _, err := conn.DeleteTargetGroupWithContext(ctx, &elbv2.DeleteTargetGroupInput{ - TargetGroupArn: targetGroup.TargetGroupArn, - }) - if err != nil { - log.Printf("[ERROR] Failed to delete LB Target Group (%s): %s", name, err) - } + for _, v := range page.TargetGroups { + r := resourceTargetGroup() + d := r.Data(nil) + d.SetId(aws.ToString(v.TargetGroupArn)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - return !lastPage - }) + } + + err = sweep.SweepOrchestrator(ctx, sweepResources) + if err != nil { - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping LB Target Group sweep for %s: %s", region, err) - return nil - } - return fmt.Errorf("retrieving LB Target Groups: %w", err) + return fmt.Errorf("error sweeping ELBv2 Target Groups (%s): %w", region, err) } + return nil } @@ -123,60 +127,52 @@ func sweepListeners(region string) error { if err != nil { return fmt.Errorf("getting client: %s", err) } - - conn := client.ELBV2Conn(ctx) + input := &elasticloadbalancingv2.DescribeLoadBalancersInput{} + conn := client.ELBV2Client(ctx) sweepResources := make([]sweep.Sweepable, 0) - var errs *multierror.Error - err = conn.DescribeLoadBalancersPagesWithContext(ctx, &elbv2.DescribeLoadBalancersInput{}, func(page *elbv2.DescribeLoadBalancersOutput, lastPage bool) bool { - if page == nil || len(page.LoadBalancers) == 0 { - log.Print("[DEBUG] No LBs to sweep") - return false + pages := elasticloadbalancingv2.NewDescribeLoadBalancersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping ELBv2 Listener sweep for %s: %s", region, err) + return nil } - for _, loadBalancer := range page.LoadBalancers { - err = conn.DescribeListenersPagesWithContext(ctx, &elbv2.DescribeListenersInput{ - LoadBalancerArn: loadBalancer.LoadBalancerArn, - }, func(page *elbv2.DescribeListenersOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + if err != nil { + return fmt.Errorf("error listing ELBv2 Load Balancers (%s): %w", region, err) + } - for _, listener := range page.Listeners { - if listener == nil { - continue - } + for _, v := range page.LoadBalancers { + input := &elasticloadbalancingv2.DescribeListenersInput{ + LoadBalancerArn: v.LoadBalancerArn, + } - r := ResourceListener() - d := r.Data(nil) - d.SetId(aws.StringValue(listener.ListenerArn)) + pages := elasticloadbalancingv2.NewDescribeListenersPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + if err != nil { + continue } - return !lastPage - }) + for _, v := range page.Listeners { + r := resourceListener() + d := r.Data(nil) + d.SetId(aws.ToString(v.ListenerArn)) - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("failed to describe LB Listeners (%s): %w", region, err)) - continue + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } } } - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error describing ELBv2 Listeners for %s: %w", region, err)) } - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping ELBv2 Listeners for %s: %w", region, err)) - } + err = sweep.SweepOrchestrator(ctx, sweepResources) - if awsv1.SkipSweepError(errs.ErrorOrNil()) { - log.Printf("[WARN] Skipping ELBv2 Listener sweep for %s: %s", region, errs) - return nil + if err != nil { + return fmt.Errorf("error sweeping ELBv2 Listeners (%s): %w", region, err) } - return errs.ErrorOrNil() + return nil } diff --git a/internal/service/elbv2/tags_gen.go b/internal/service/elbv2/tags_gen.go index b60e08f83ab..541c4048026 100644 --- a/internal/service/elbv2/tags_gen.go +++ b/internal/service/elbv2/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/aws/aws-sdk-go/service/elbv2/elbv2iface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,24 +19,24 @@ import ( // listTags lists elbv2 service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn elbv2iface.ELBV2API, identifier string) (tftags.KeyValueTags, error) { - input := &elbv2.DescribeTagsInput{ - ResourceArns: aws.StringSlice([]string{identifier}), +func listTags(ctx context.Context, conn *elasticloadbalancingv2.Client, identifier string, optFns ...func(*elasticloadbalancingv2.Options)) (tftags.KeyValueTags, error) { + input := &elasticloadbalancingv2.DescribeTagsInput{ + ResourceArns: []string{identifier}, } - output, err := conn.DescribeTagsWithContext(ctx, input) + output, err := conn.DescribeTags(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err } - return keyValueTags(ctx, output.TagDescriptions[0].Tags), nil + return KeyValueTags(ctx, output.TagDescriptions[0].Tags), nil } // ListTags lists elbv2 service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).ELBV2Conn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).ELBV2Client(ctx), identifier) if err != nil { return err @@ -51,12 +51,12 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling -// tags returns elbv2 service tags. -func tags(tags tftags.KeyValueTags) []*elbv2.Tag { - result := make([]*elbv2.Tag, 0, len(tags)) +// Tags returns elbv2 service tags. +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &elbv2.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -67,12 +67,12 @@ func tags(tags tftags.KeyValueTags) []*elbv2.Tag { return result } -// keyValueTags creates tftags.KeyValueTags from elbv2 service tags. -func keyValueTags(ctx context.Context, tags []*elbv2.Tag) tftags.KeyValueTags { +// KeyValueTags creates tftags.KeyValueTags from elasticloadbalancingv2 service tags. +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,9 +80,9 @@ func keyValueTags(ctx context.Context, tags []*elbv2.Tag) tftags.KeyValueTags { // getTagsIn returns elbv2 service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*elbv2.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { - if tags := tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags } } @@ -91,25 +91,25 @@ func getTagsIn(ctx context.Context) []*elbv2.Tag { } // setTagsOut sets elbv2 service tags in Context. -func setTagsOut(ctx context.Context, tags []*elbv2.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = option.Some(keyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } // createTags creates elbv2 service tags for new resources. -func createTags(ctx context.Context, conn elbv2iface.ELBV2API, identifier string, tags []*elbv2.Tag) error { +func createTags(ctx context.Context, conn *elasticloadbalancingv2.Client, identifier string, tags []awstypes.Tag, optFns ...func(*elasticloadbalancingv2.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, keyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates elbv2 service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn elbv2iface.ELBV2API, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *elasticloadbalancingv2.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*elasticloadbalancingv2.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -118,12 +118,12 @@ func updateTags(ctx context.Context, conn elbv2iface.ELBV2API, identifier string removedTags := oldTags.Removed(newTags) removedTags = removedTags.IgnoreSystem(names.ELBV2) if len(removedTags) > 0 { - input := &elbv2.RemoveTagsInput{ - ResourceArns: aws.StringSlice([]string{identifier}), - TagKeys: aws.StringSlice(removedTags.Keys()), + input := &elasticloadbalancingv2.RemoveTagsInput{ + ResourceArns: []string{identifier}, + TagKeys: removedTags.Keys(), } - _, err := conn.RemoveTagsWithContext(ctx, input) + _, err := conn.RemoveTags(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -133,12 +133,12 @@ func updateTags(ctx context.Context, conn elbv2iface.ELBV2API, identifier string updatedTags := oldTags.Updated(newTags) updatedTags = updatedTags.IgnoreSystem(names.ELBV2) if len(updatedTags) > 0 { - input := &elbv2.AddTagsInput{ - ResourceArns: aws.StringSlice([]string{identifier}), - Tags: tags(updatedTags), + input := &elasticloadbalancingv2.AddTagsInput{ + ResourceArns: []string{identifier}, + Tags: Tags(updatedTags), } - _, err := conn.AddTagsWithContext(ctx, input) + _, err := conn.AddTags(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -151,5 +151,5 @@ func updateTags(ctx context.Context, conn elbv2iface.ELBV2API, identifier string // UpdateTags updates elbv2 service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).ELBV2Conn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).ELBV2Client(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/elbv2/tagsv2_gen.go b/internal/service/elbv2/tagsv2_gen.go deleted file mode 100644 index cd926c35a9d..00000000000 --- a/internal/service/elbv2/tagsv2_gen.go +++ /dev/null @@ -1,132 +0,0 @@ -// Code generated by internal/generate/tags/main.go; DO NOT EDIT. -package elbv2 - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" - awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" - "github.com/hashicorp/terraform-plugin-log/tflog" - "github.com/hashicorp/terraform-provider-aws/internal/logging" - tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types/option" - "github.com/hashicorp/terraform-provider-aws/names" -) - -// listTagsV2 lists elbv2 service tags. -// The identifier is typically the Amazon Resource Name (ARN), although -// it may also be a different identifier depending on the service. -func listTagsV2(ctx context.Context, conn *elasticloadbalancingv2.Client, identifier string, optFns ...func(*elasticloadbalancingv2.Options)) (tftags.KeyValueTags, error) { - input := &elasticloadbalancingv2.DescribeTagsInput{ - ResourceArns: []string{identifier}, - } - - output, err := conn.DescribeTags(ctx, input, optFns...) - - if err != nil { - return tftags.New(ctx, nil), err - } - - return keyValueTagsV2(ctx, output.TagDescriptions[0].Tags), nil -} - -// []*SERVICE.Tag handling - -// tagsV2 returns elbv2 service tags. -func tagsV2(tags tftags.KeyValueTags) []awstypes.Tag { - result := make([]awstypes.Tag, 0, len(tags)) - - for k, v := range tags.Map() { - tag := awstypes.Tag{ - Key: aws.String(k), - Value: aws.String(v), - } - - result = append(result, tag) - } - - return result -} - -// keyValueTagsV2 creates tftags.KeyValueTags from elasticloadbalancingv2 service tags. -func keyValueTagsV2(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { - m := make(map[string]*string, len(tags)) - - for _, tag := range tags { - m[aws.ToString(tag.Key)] = tag.Value - } - - return tftags.New(ctx, m) -} - -// getTagsInV2 returns elbv2 service tags from Context. -// nil is returned if there are no input tags. -func getTagsInV2(ctx context.Context) []awstypes.Tag { - if inContext, ok := tftags.FromContext(ctx); ok { - if tags := tagsV2(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { - return tags - } - } - - return nil -} - -// setTagsOutV2 sets elbv2 service tags in Context. -func setTagsOutV2(ctx context.Context, tags []awstypes.Tag) { - if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = option.Some(keyValueTagsV2(ctx, tags)) - } -} - -// createTagsV2 creates elbv2 service tags for new resources. -func createTagsV2(ctx context.Context, conn *elasticloadbalancingv2.Client, identifier string, tags []awstypes.Tag) error { - if len(tags) == 0 { - return nil - } - - return updateTagsV2(ctx, conn, identifier, nil, keyValueTagsV2(ctx, tags)) -} - -// updateTagsV2 updates elbv2 service tags. -// The identifier is typically the Amazon Resource Name (ARN), although -// it may also be a different identifier depending on the service. -func updateTagsV2(ctx context.Context, conn *elasticloadbalancingv2.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*elasticloadbalancingv2.Options)) error { - oldTags := tftags.New(ctx, oldTagsMap) - newTags := tftags.New(ctx, newTagsMap) - - ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) - - removedTags := oldTags.Removed(newTags) - removedTags = removedTags.IgnoreSystem(names.ELBV2) - if len(removedTags) > 0 { - input := &elasticloadbalancingv2.RemoveTagsInput{ - ResourceArns: []string{identifier}, - TagKeys: removedTags.Keys(), - } - - _, err := conn.RemoveTags(ctx, input, optFns...) - - if err != nil { - return fmt.Errorf("untagging resource (%s): %w", identifier, err) - } - } - - updatedTags := oldTags.Updated(newTags) - updatedTags = updatedTags.IgnoreSystem(names.ELBV2) - if len(updatedTags) > 0 { - input := &elasticloadbalancingv2.AddTagsInput{ - ResourceArns: []string{identifier}, - Tags: tagsV2(updatedTags), - } - - _, err := conn.AddTags(ctx, input, optFns...) - - if err != nil { - return fmt.Errorf("tagging resource (%s): %w", identifier, err) - } - } - - return nil -} diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 87c7a782d58..63b8479d3e1 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -13,9 +13,10 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" @@ -24,6 +25,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -37,9 +39,9 @@ import ( // @SDKResource("aws_alb_target_group", name="Target Group") // @SDKResource("aws_lb_target_group", name="Target Group") // @Tags(identifierAttribute="id") -// @Testing(existsType="github.com/aws/aws-sdk-go/service/elbv2;elbv2.TargetGroup") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;types.TargetGroup") // @Testing(importIgnore="lambda_multi_value_headers_enabled;proxy_protocol_v2") -func ResourceTargetGroup() *schema.Resource { +func resourceTargetGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTargetGroupCreate, ReadWithoutTimeout: resourceTargetGroupRead, @@ -120,17 +122,17 @@ func ResourceTargetGroup() *schema.Resource { Optional: true, Default: healthCheckPortTrafficPort, ValidateFunc: validTargetGroupHealthCheckPort, - DiffSuppressFunc: suppressIfTargetType(elbv2.TargetTypeEnumLambda), + DiffSuppressFunc: suppressIfTargetType(awstypes.TargetTypeEnumLambda), }, names.AttrProtocol: { Type: schema.TypeString, Optional: true, - Default: elbv2.ProtocolEnumHttp, + Default: awstypes.ProtocolEnumHttp, StateFunc: func(v interface{}) string { return strings.ToUpper(v.(string)) }, ValidateFunc: validation.StringInSlice(healthCheckProtocolEnumValues(), true), - DiffSuppressFunc: suppressIfTargetType(elbv2.TargetTypeEnumLambda), + DiffSuppressFunc: suppressIfTargetType(awstypes.TargetTypeEnumLambda), }, names.AttrTimeout: { Type: schema.TypeInt, @@ -148,11 +150,11 @@ func ResourceTargetGroup() *schema.Resource { }, }, names.AttrIPAddressType: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(elbv2.TargetGroupIpAddressTypeEnum_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.TargetGroupIpAddressTypeEnum](), }, "lambda_multi_value_headers_enabled": { Type: schema.TypeBool, @@ -203,7 +205,7 @@ func ResourceTargetGroup() *schema.Resource { Optional: true, ForceNew: true, ValidateFunc: validation.IntBetween(1, 65535), - DiffSuppressFunc: suppressIfTargetType(elbv2.TargetTypeEnumLambda), + DiffSuppressFunc: suppressIfTargetType(awstypes.TargetTypeEnumLambda), }, "preserve_client_ip": { Type: nullable.TypeNullableBool, @@ -216,8 +218,8 @@ func ResourceTargetGroup() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validation.StringInSlice(elbv2.ProtocolEnum_Values(), true), - DiffSuppressFunc: suppressIfTargetType(elbv2.TargetTypeEnumLambda), + ValidateDiagFunc: enum.Validate[awstypes.ProtocolEnum](), + DiffSuppressFunc: suppressIfTargetType(awstypes.TargetTypeEnumLambda), }, "protocol_version": { Type: schema.TypeString, @@ -233,11 +235,11 @@ func ResourceTargetGroup() *schema.Resource { if d.Id() == "" { return false } - if d.Get("target_type").(string) == elbv2.TargetTypeEnumLambda { + if awstypes.TargetTypeEnum(d.Get("target_type").(string)) == awstypes.TargetTypeEnumLambda { return true } - switch d.Get(names.AttrProtocol).(string) { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + switch awstypes.ProtocolEnum(d.Get(names.AttrProtocol).(string)) { + case awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps: return false } return true @@ -270,8 +272,8 @@ func ResourceTargetGroup() *schema.Resource { Default: 86400, ValidateFunc: validation.IntBetween(0, 604800), DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - switch d.Get(names.AttrProtocol).(string) { - case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumUdp, elbv2.ProtocolEnumTcpUdp, elbv2.ProtocolEnumTls, elbv2.ProtocolEnumGeneve: + switch awstypes.ProtocolEnum(d.Get(names.AttrProtocol).(string)) { + case awstypes.ProtocolEnumTcp, awstypes.ProtocolEnumUdp, awstypes.ProtocolEnumTcpUdp, awstypes.ProtocolEnumTls, awstypes.ProtocolEnumGeneve: return true } return false @@ -315,6 +317,57 @@ func ResourceTargetGroup() *schema.Resource { }, }, }, + "target_group_health": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dns_failover": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minimum_healthy_targets_count": { + Type: schema.TypeString, + Optional: true, + Default: "off", + ValidateFunc: validTargetGroupHealthInput, + }, + "minimum_healthy_targets_percentage": { + Type: schema.TypeString, + Optional: true, + Default: "off", + ValidateFunc: validTargetGroupHealthPercentageInput, + }, + }, + }, + }, + "unhealthy_state_routing": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minimum_healthy_targets_count": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + }, + "minimum_healthy_targets_percentage": { + Type: schema.TypeString, + Optional: true, + Default: "off", + ValidateFunc: validTargetGroupHealthPercentageInput, + }, + }, + }, + }, + }, + }, + }, "target_health_state": { Type: schema.TypeList, Optional: true, @@ -329,35 +382,35 @@ func ResourceTargetGroup() *schema.Resource { }, }, "target_type": { - Type: schema.TypeString, - Optional: true, - Default: elbv2.TargetTypeEnumInstance, - ForceNew: true, - ValidateFunc: validation.StringInSlice(elbv2.TargetTypeEnum_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.TargetTypeEnumInstance, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.TargetTypeEnum](), }, names.AttrVPCID: { Type: schema.TypeString, Optional: true, ForceNew: true, - DiffSuppressFunc: suppressIfTargetType(elbv2.TargetTypeEnumLambda), + DiffSuppressFunc: suppressIfTargetType(awstypes.TargetTypeEnumLambda), }, }, } } -func suppressIfTargetType(t string) schema.SchemaDiffSuppressFunc { +func suppressIfTargetType(t awstypes.TargetTypeEnum) schema.SchemaDiffSuppressFunc { return func(k string, old string, new string, d *schema.ResourceData) bool { // Don't suppress on creation, so that warnings are actually called if d.Id() == "" { return false } - return d.Get("target_type").(string) == t + return awstypes.TargetTypeEnum(d.Get("target_type").(string)) == t } } func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) name := create.NewNameGenerator( create.WithConfiguredName(d.Get(names.AttrName).(string)), @@ -376,26 +429,27 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta targetGroupRuntimeValidation(d, &diags) - protocol := d.Get(names.AttrProtocol).(string) - targetType := d.Get("target_type").(string) - input := &elbv2.CreateTargetGroupInput{ + protocol := awstypes.ProtocolEnum(d.Get(names.AttrProtocol).(string)) + targetType := awstypes.TargetTypeEnum(d.Get("target_type").(string)) + input := &elasticloadbalancingv2.CreateTargetGroupInput{ Name: aws.String(name), Tags: getTagsIn(ctx), - TargetType: aws.String(targetType), + TargetType: targetType, } - if targetType != elbv2.TargetTypeEnumLambda { - input.Port = aws.Int64(int64(d.Get(names.AttrPort).(int))) - input.Protocol = aws.String(protocol) + if targetType != awstypes.TargetTypeEnumLambda { + input.Port = aws.Int32(int32(d.Get(names.AttrPort).(int))) + input.Protocol = protocol switch protocol { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + case awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps: input.ProtocolVersion = aws.String(d.Get("protocol_version").(string)) } input.VpcId = aws.String(d.Get(names.AttrVPCID).(string)) - if targetType == elbv2.TargetTypeEnumIp { + switch targetType { + case awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp: if v, ok := d.GetOk(names.AttrIPAddressType); ok { - input.IpAddressType = aws.String(v.(string)) + input.IpAddressType = awstypes.TargetGroupIpAddressTypeEnum(v.(string)) } } } @@ -404,46 +458,47 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta tfMap := v.([]interface{})[0].(map[string]interface{}) input.HealthCheckEnabled = aws.Bool(tfMap[names.AttrEnabled].(bool)) - input.HealthCheckIntervalSeconds = aws.Int64(int64(tfMap[names.AttrInterval].(int))) - input.HealthyThresholdCount = aws.Int64(int64(tfMap["healthy_threshold"].(int))) - input.UnhealthyThresholdCount = aws.Int64(int64(tfMap["unhealthy_threshold"].(int))) + input.HealthCheckIntervalSeconds = aws.Int32(int32(tfMap[names.AttrInterval].(int))) + input.HealthyThresholdCount = aws.Int32(int32(tfMap["healthy_threshold"].(int))) + input.UnhealthyThresholdCount = aws.Int32(int32(tfMap["unhealthy_threshold"].(int))) if v, ok := tfMap[names.AttrTimeout].(int); ok && v != 0 { - input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) + input.HealthCheckTimeoutSeconds = aws.Int32(int32(v)) } - healthCheckProtocol := tfMap[names.AttrProtocol].(string) - if healthCheckProtocol != elbv2.ProtocolEnumTcp { + healthCheckProtocol := awstypes.ProtocolEnum(tfMap[names.AttrProtocol].(string)) + if healthCheckProtocol != awstypes.ProtocolEnumTcp { if v, ok := tfMap[names.AttrPath].(string); ok && v != "" { input.HealthCheckPath = aws.String(v) } if v, ok := tfMap["matcher"].(string); ok && v != "" { if protocolVersion := d.Get("protocol_version").(string); protocolVersion == protocolVersionGRPC { - input.Matcher = &elbv2.Matcher{ + input.Matcher = &awstypes.Matcher{ GrpcCode: aws.String(v), } } else { - input.Matcher = &elbv2.Matcher{ + input.Matcher = &awstypes.Matcher{ HttpCode: aws.String(v), } } } } - if targetType != elbv2.TargetTypeEnumLambda { + if targetType != awstypes.TargetTypeEnumLambda { input.HealthCheckPort = aws.String(tfMap[names.AttrPort].(string)) - input.HealthCheckProtocol = aws.String(healthCheckProtocol) + input.HealthCheckProtocol = healthCheckProtocol } } - output, err := conn.CreateTargetGroupWithContext(ctx, input) + output, err := conn.CreateTargetGroup(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + partition := meta.(*conns.AWSClient).Partition + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateTargetGroupWithContext(ctx, input) + output, err = conn.CreateTargetGroup(ctx, input) } // Tags are not supported on creation with some protocol types(i.e. GENEVE) @@ -451,27 +506,27 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta if input.Tags != nil && tfawserr.ErrMessageContains(err, errCodeValidationError, tagsOnCreationErrMessage) { input.Tags = nil - output, err = conn.CreateTargetGroupWithContext(ctx, input) + output, err = conn.CreateTargetGroup(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating ELBv2 Target Group (%s): %s", name, err) } - d.SetId(aws.StringValue(output.TargetGroups[0].TargetGroupArn)) + d.SetId(aws.ToString(output.TargetGroups[0].TargetGroupArn)) - _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (interface{}, error) { - return FindTargetGroupByARN(ctx, conn, d.Id()) + _, err = tfresource.RetryWhenNotFound(ctx, elbv2PropagationTimeout, func() (interface{}, error) { + return findTargetGroupByARN(ctx, conn, d.Id()) }) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ELBv2 Target Group (%s) create: %s", d.Id(), err) } - var attributes []*elbv2.TargetGroupAttribute + var attributes []awstypes.TargetGroupAttribute switch targetType { - case elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp: + case awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp: if v, ok := d.GetOk("stickiness"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } @@ -480,6 +535,10 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, expandTargetGroupTargetFailoverAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } + if v, ok := d.GetOk("target_group_health"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupHealthAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } @@ -488,12 +547,12 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, targetGroupAttributes.expand(d, targetType, false)...) if len(attributes) > 0 { - input := &elbv2.ModifyTargetGroupAttributesInput{ + input := &elasticloadbalancingv2.ModifyTargetGroupAttributesInput{ Attributes: attributes, TargetGroupArn: aws.String(d.Id()), } - _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, input) + _, err := conn.ModifyTargetGroupAttributes(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s) attributes: %s", d.Id(), err) @@ -505,7 +564,7 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta err := createTags(ctx, conn, d.Id(), tags) // If default tags only, continue. Otherwise, error. - if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(partition, err) { return append(diags, resourceTargetGroupRead(ctx, d, meta)...) } @@ -519,9 +578,9 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - targetGroup, err := FindTargetGroupByARN(ctx, conn, d.Id()) + targetGroup, err := findTargetGroupByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Target Group %s not found, removing from state", d.Id()) @@ -543,18 +602,18 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) } d.Set(names.AttrIPAddressType, targetGroup.IpAddressType) - d.Set("load_balancer_arns", flex.FlattenStringSet(targetGroup.LoadBalancerArns)) + d.Set("load_balancer_arns", flex.FlattenStringValueSet(targetGroup.LoadBalancerArns)) d.Set(names.AttrName, targetGroup.TargetGroupName) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) - targetType := aws.StringValue(targetGroup.TargetType) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(targetGroup.TargetGroupName))) + targetType := targetGroup.TargetType d.Set("target_type", targetType) if _, ok := d.GetOk(names.AttrPort); targetGroup.Port != nil || ok { d.Set(names.AttrPort, targetGroup.Port) } - var protocol string - if _, ok := d.GetOk(names.AttrProtocol); targetGroup.Protocol != nil || ok { - protocol = aws.StringValue(targetGroup.Protocol) + var protocol awstypes.ProtocolEnum + if _, ok := d.GetOk(names.AttrProtocol); targetGroup.Protocol != "" || ok { + protocol = targetGroup.Protocol d.Set(names.AttrProtocol, protocol) } if _, ok := d.GetOk("protocol_version"); targetGroup.ProtocolVersion != nil || ok { @@ -578,6 +637,10 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "setting target_failover: %s", err) } + if err := d.Set("target_group_health", []interface{}{flattenTargetGroupHealthAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target_group_health: %s", err) + } + if err := d.Set("target_health_state", []interface{}{flattenTargetGroupTargetHealthStateAttributes(attributes, protocol)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) } @@ -589,36 +652,36 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - protocol := d.Get(names.AttrProtocol).(string) - targetType := d.Get("target_type").(string) + protocol := awstypes.ProtocolEnum(d.Get(names.AttrProtocol).(string)) + targetType := awstypes.TargetTypeEnum(d.Get("target_type").(string)) if d.HasChange(names.AttrHealthCheck) { if v, ok := d.GetOk(names.AttrHealthCheck); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { tfMap := v.([]interface{})[0].(map[string]interface{}) - input := &elbv2.ModifyTargetGroupInput{ + input := &elasticloadbalancingv2.ModifyTargetGroupInput{ HealthCheckEnabled: aws.Bool(tfMap[names.AttrEnabled].(bool)), - HealthCheckIntervalSeconds: aws.Int64(int64(tfMap[names.AttrInterval].(int))), - HealthyThresholdCount: aws.Int64(int64(tfMap["healthy_threshold"].(int))), + HealthCheckIntervalSeconds: aws.Int32(int32(tfMap[names.AttrInterval].(int))), + HealthyThresholdCount: aws.Int32(int32(tfMap["healthy_threshold"].(int))), TargetGroupArn: aws.String(d.Id()), - UnhealthyThresholdCount: aws.Int64(int64(tfMap["unhealthy_threshold"].(int))), + UnhealthyThresholdCount: aws.Int32(int32(tfMap["unhealthy_threshold"].(int))), } if v, ok := tfMap[names.AttrTimeout].(int); ok && v != 0 { - input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) + input.HealthCheckTimeoutSeconds = aws.Int32(int32(v)) } - healthCheckProtocol := tfMap[names.AttrProtocol].(string) - if healthCheckProtocol != elbv2.ProtocolEnumTcp { + healthCheckProtocol := awstypes.ProtocolEnum(tfMap[names.AttrProtocol].(string)) + if healthCheckProtocol != awstypes.ProtocolEnumTcp { if v, ok := tfMap["matcher"].(string); ok { if protocolVersion := d.Get("protocol_version").(string); protocolVersion == protocolVersionGRPC { - input.Matcher = &elbv2.Matcher{ + input.Matcher = &awstypes.Matcher{ GrpcCode: aws.String(v), } } else { - input.Matcher = &elbv2.Matcher{ + input.Matcher = &awstypes.Matcher{ HttpCode: aws.String(v), } } @@ -626,12 +689,12 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta input.HealthCheckPath = aws.String(tfMap[names.AttrPath].(string)) } - if targetType != elbv2.TargetTypeEnumLambda { + if targetType != awstypes.TargetTypeEnumLambda { input.HealthCheckPort = aws.String(tfMap[names.AttrPort].(string)) - input.HealthCheckProtocol = aws.String(healthCheckProtocol) + input.HealthCheckProtocol = healthCheckProtocol } - _, err := conn.ModifyTargetGroupWithContext(ctx, input) + _, err := conn.ModifyTargetGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s): %s", d.Id(), err) @@ -639,15 +702,15 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } } - var attributes []*elbv2.TargetGroupAttribute + var attributes []awstypes.TargetGroupAttribute switch targetType { - case elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp: + case awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp: if d.HasChange("stickiness") { if v, ok := d.GetOk("stickiness"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } else { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, awstypes.TargetGroupAttribute{ Key: aws.String(targetGroupAttributeStickinessEnabled), Value: flex.BoolValueToString(false), }) @@ -660,6 +723,12 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } } + if d.HasChange("target_group_health") { + if v, ok := d.GetOk("target_group_health"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupHealthAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + } + if d.HasChange("target_health_state") { if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) @@ -670,12 +739,12 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, targetGroupAttributes.expand(d, targetType, true)...) if len(attributes) > 0 { - input := &elbv2.ModifyTargetGroupAttributesInput{ + input := &elasticloadbalancingv2.ModifyTargetGroupAttributesInput{ Attributes: attributes, TargetGroupArn: aws.String(d.Id()), } - _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, input) + _, err := conn.ModifyTargetGroupAttributes(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s) attributes: %s", d.Id(), err) @@ -687,14 +756,17 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceTargetGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) log.Printf("[DEBUG] Deleting ELBv2 Target Group: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 2*time.Minute, func() (interface{}, error) { - return conn.DeleteTargetGroupWithContext(ctx, &elbv2.DeleteTargetGroupInput{ + const ( + timeout = 2 * time.Minute + ) + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.ResourceInUseException](ctx, timeout, func() (interface{}, error) { + return conn.DeleteTargetGroup(ctx, &elasticloadbalancingv2.DeleteTargetGroupInput{ TargetGroupArn: aws.String(d.Id()), }) - }, elbv2.ErrCodeResourceInUseException, "is currently in use by a listener or a rule") + }, "is currently in use by a listener or a rule") if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ELBv2 Target Group (%s): %s", d.Id(), err) @@ -707,7 +779,7 @@ type targetGroupAttributeInfo struct { apiAttributeKey string tfType schema.ValueType tfNullableType schema.ValueType - targetTypesSupported []string + targetTypesSupported []awstypes.TargetTypeEnum } type targetGroupAttributeMap map[string]targetGroupAttributeInfo @@ -716,54 +788,54 @@ var targetGroupAttributes = targetGroupAttributeMap(map[string]targetGroupAttrib "connection_termination": { apiAttributeKey: targetGroupAttributeDeregistrationDelayConnectionTerminationEnabled, tfType: schema.TypeBool, - targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp}, }, "deregistration_delay": { apiAttributeKey: targetGroupAttributeDeregistrationDelayTimeoutSeconds, tfType: schema.TypeString, tfNullableType: schema.TypeInt, - targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp}, }, "lambda_multi_value_headers_enabled": { apiAttributeKey: targetGroupAttributeLambdaMultiValueHeadersEnabled, tfType: schema.TypeBool, - targetTypesSupported: []string{elbv2.TargetTypeEnumLambda}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumLambda}, }, "load_balancing_algorithm_type": { apiAttributeKey: targetGroupAttributeLoadBalancingAlgorithmType, tfType: schema.TypeString, - targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp}, }, "load_balancing_anomaly_mitigation": { apiAttributeKey: targetGroupAttributeLoadBalancingAlgorithmAnomalyMitigation, tfType: schema.TypeString, - targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp}, }, "load_balancing_cross_zone_enabled": { apiAttributeKey: targetGroupAttributeLoadBalancingCrossZoneEnabled, tfType: schema.TypeString, - targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp}, }, "preserve_client_ip": { apiAttributeKey: targetGroupAttributePreserveClientIPEnabled, tfType: schema.TypeString, tfNullableType: schema.TypeBool, - targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp}, }, "proxy_protocol_v2": { apiAttributeKey: targetGroupAttributeProxyProtocolV2Enabled, tfType: schema.TypeBool, - targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp}, }, "slow_start": { apiAttributeKey: targetGroupAttributeSlowStartDurationSeconds, tfType: schema.TypeInt, - targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + targetTypesSupported: []awstypes.TargetTypeEnum{awstypes.TargetTypeEnumInstance, awstypes.TargetTypeEnumIp}, }, }) -func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType string, update bool) []*elbv2.TargetGroupAttribute { - var apiObjects []*elbv2.TargetGroupAttribute +func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType awstypes.TargetTypeEnum, update bool) []awstypes.TargetGroupAttribute { + var apiObjects []awstypes.TargetGroupAttribute for tfAttributeName, attributeInfo := range m { if update && !d.HasChange(tfAttributeName) { @@ -778,7 +850,7 @@ func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType strin case schema.TypeBool: v := v.(string) if v, null, _ := nullable.Bool(v).ValueBool(); !null { - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + apiObjects = append(apiObjects, awstypes.TargetGroupAttribute{ Key: k, Value: flex.BoolValueToString(v), }) @@ -786,7 +858,7 @@ func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType strin case schema.TypeInt: v := v.(string) if v, null, _ := nullable.Int(v).ValueInt64(); !null { - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + apiObjects = append(apiObjects, awstypes.TargetGroupAttribute{ Key: k, Value: flex.Int64ValueToString(v), }) @@ -795,21 +867,21 @@ func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType strin switch attributeInfo.tfType { case schema.TypeBool: if v := v.(bool); v || update { - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + apiObjects = append(apiObjects, awstypes.TargetGroupAttribute{ Key: k, Value: flex.BoolValueToString(v), }) } case schema.TypeInt: if v := v.(int); v > 0 || update { - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + apiObjects = append(apiObjects, awstypes.TargetGroupAttribute{ Key: k, Value: flex.IntValueToString(v), }) } case schema.TypeString: if v := v.(string); v != "" || update { - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + apiObjects = append(apiObjects, awstypes.TargetGroupAttribute{ Key: k, Value: aws.String(v), }) @@ -821,15 +893,15 @@ func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType strin return apiObjects } -func (m targetGroupAttributeMap) flatten(d *schema.ResourceData, targetType string, apiObjects []*elbv2.TargetGroupAttribute) { +func (m targetGroupAttributeMap) flatten(d *schema.ResourceData, targetType awstypes.TargetTypeEnum, apiObjects []awstypes.TargetGroupAttribute) { for tfAttributeName, attributeInfo := range m { if !slices.Contains(attributeInfo.targetTypesSupported, targetType) { continue } k := attributeInfo.apiAttributeKey - i := slices.IndexFunc(apiObjects, func(v *elbv2.TargetGroupAttribute) bool { - return aws.StringValue(v.Key) == k + i := slices.IndexFunc(apiObjects, func(v awstypes.TargetGroupAttribute) bool { + return aws.ToString(v.Key) == k }) if i == -1 { @@ -847,9 +919,9 @@ func (m targetGroupAttributeMap) flatten(d *schema.ResourceData, targetType stri } } -func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (*elbv2.TargetGroup, error) { - input := &elbv2.DescribeTargetGroupsInput{ - TargetGroupArns: aws.StringSlice([]string{arn}), +func findTargetGroupByARN(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) (*awstypes.TargetGroup, error) { + input := &elasticloadbalancingv2.DescribeTargetGroupsInput{ + TargetGroupArns: []string{arn}, } output, err := findTargetGroup(ctx, conn, input) @@ -859,7 +931,7 @@ func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (* } // Eventual consistency check. - if aws.StringValue(output.TargetGroupArn) != arn { + if aws.ToString(output.TargetGroupArn) != arn { return nil, &retry.NotFoundError{ LastRequest: input, } @@ -868,9 +940,9 @@ func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (* return output, nil } -func findTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) (*elbv2.TargetGroup, error) { - input := &elbv2.DescribeTargetGroupsInput{ - Names: aws.StringSlice([]string{name}), +func findTargetGroupByName(ctx context.Context, conn *elasticloadbalancingv2.Client, name string) (*awstypes.TargetGroup, error) { + input := &elasticloadbalancingv2.DescribeTargetGroupsInput{ + Names: []string{name}, } output, err := findTargetGroup(ctx, conn, input) @@ -880,7 +952,7 @@ func findTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) } // Eventual consistency check. - if aws.StringValue(output.TargetGroupName) != name { + if aws.ToString(output.TargetGroupName) != name { return nil, &retry.NotFoundError{ LastRequest: input, } @@ -889,55 +961,48 @@ func findTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) return output, nil } -func findTargetGroup(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) (*elbv2.TargetGroup, error) { +func findTargetGroup(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTargetGroupsInput) (*awstypes.TargetGroup, error) { output, err := findTargetGroups(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) ([]*elbv2.TargetGroup, error) { - var output []*elbv2.TargetGroup +func findTargetGroups(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTargetGroupsInput) ([]awstypes.TargetGroup, error) { + var output []awstypes.TargetGroup - err := conn.DescribeTargetGroupsPagesWithContext(ctx, input, func(page *elbv2.DescribeTargetGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticloadbalancingv2.NewDescribeTargetGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.TargetGroups { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.TargetGroupNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.TargetGroups...) } return output, nil } -func findTargetGroupAttributesByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) ([]*elbv2.TargetGroupAttribute, error) { - input := &elbv2.DescribeTargetGroupAttributesInput{ +func findTargetGroupAttributesByARN(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) ([]awstypes.TargetGroupAttribute, error) { + input := &elasticloadbalancingv2.DescribeTargetGroupAttributesInput{ TargetGroupArn: aws.String(arn), } - output, err := conn.DescribeTargetGroupAttributesWithContext(ctx, input) + output, err := conn.DescribeTargetGroupAttributes(ctx, input) - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + if errs.IsA[*awstypes.TargetGroupNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -994,34 +1059,34 @@ func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDi healthCheck = healthChecks[0].(map[string]interface{}) } - healtCheckPath := cty.GetAttrPath(names.AttrHealthCheck).IndexInt(0) + healthCheckPath := cty.GetAttrPath(names.AttrHealthCheck).IndexInt(0) - if p, ok := healthCheck[names.AttrProtocol].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { + if p, ok := healthCheck[names.AttrProtocol].(string); ok && strings.ToUpper(p) == string(awstypes.ProtocolEnumTcp) { if m := healthCheck["matcher"].(string); m != "" { return sdkdiag.DiagnosticError(errs.NewAttributeConflictsWhenError( - healtCheckPath.GetAttr("matcher"), - healtCheckPath.GetAttr(names.AttrProtocol), - elbv2.ProtocolEnumTcp, + healthCheckPath.GetAttr("matcher"), + healthCheckPath.GetAttr(names.AttrProtocol), + p, )) } if m := healthCheck[names.AttrPath].(string); m != "" { return sdkdiag.DiagnosticError(errs.NewAttributeConflictsWhenError( - healtCheckPath.GetAttr(names.AttrPath), - healtCheckPath.GetAttr(names.AttrProtocol), - elbv2.ProtocolEnumTcp, + healthCheckPath.GetAttr(names.AttrPath), + healthCheckPath.GetAttr(names.AttrProtocol), + p, )) } } - protocol := diff.Get(names.AttrProtocol).(string) + protocol := awstypes.ProtocolEnum(diff.Get(names.AttrProtocol).(string)) switch protocol { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: - if p, ok := healthCheck[names.AttrProtocol].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { + case awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps: + if p, ok := healthCheck[names.AttrProtocol].(string); ok && strings.ToUpper(p) == string(awstypes.ProtocolEnumTcp) { return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", - errs.PathString(healtCheckPath.GetAttr(names.AttrProtocol)), - elbv2.ProtocolEnumTcp, + errs.PathString(healthCheckPath.GetAttr(names.AttrProtocol)), + awstypes.ProtocolEnumTcp, errs.PathString(cty.GetAttrPath(names.AttrProtocol)), protocol, ) @@ -1036,21 +1101,22 @@ func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDi } func customizeDiffTargetGroupTargetTypeLambda(_ context.Context, diff *schema.ResourceDiff, meta any) error { - if diff.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { + targetType := awstypes.TargetTypeEnum(diff.Get("target_type").(string)) + if targetType != awstypes.TargetTypeEnumLambda { return nil } if healthChecks := diff.Get(names.AttrHealthCheck).([]interface{}); len(healthChecks) == 1 { healthCheck := healthChecks[0].(map[string]interface{}) healtCheckPath := cty.GetAttrPath(names.AttrHealthCheck).IndexInt(0) - healthCheckProtocol := healthCheck[names.AttrProtocol].(string) + healthCheckProtocol := awstypes.ProtocolEnum(healthCheck[names.AttrProtocol].(string)) - if healthCheckProtocol == elbv2.ProtocolEnumTcp { + if healthCheckProtocol == awstypes.ProtocolEnumTcp { return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", errs.PathString(healtCheckPath.GetAttr(names.AttrProtocol)), - elbv2.ProtocolEnumTcp, + awstypes.ProtocolEnumTcp, errs.PathString(cty.GetAttrPath("target_type")), - elbv2.TargetTypeEnumLambda, + targetType, ) } } @@ -1060,7 +1126,7 @@ func customizeDiffTargetGroupTargetTypeLambda(_ context.Context, diff *schema.Re func customizeDiffTargetGroupTargetTypeNotLambda(_ context.Context, diff *schema.ResourceDiff, meta any) error { targetType := diff.Get("target_type").(string) - if targetType == elbv2.TargetTypeEnumLambda { + if awstypes.TargetTypeEnum(targetType) == awstypes.TargetTypeEnumLambda { return nil } @@ -1093,43 +1159,39 @@ func customizeDiffTargetGroupTargetTypeNotLambda(_ context.Context, diff *schema return nil } -func flattenTargetGroupHealthCheck(apiObject *elbv2.TargetGroup) []interface{} { - if apiObject == nil { - return []interface{}{} - } - +func flattenTargetGroupHealthCheck(apiObject *awstypes.TargetGroup) []interface{} { tfMap := map[string]interface{}{ - names.AttrEnabled: aws.BoolValue(apiObject.HealthCheckEnabled), - "healthy_threshold": int(aws.Int64Value(apiObject.HealthyThresholdCount)), - names.AttrInterval: int(aws.Int64Value(apiObject.HealthCheckIntervalSeconds)), - names.AttrPort: aws.StringValue(apiObject.HealthCheckPort), - names.AttrProtocol: aws.StringValue(apiObject.HealthCheckProtocol), - names.AttrTimeout: int(aws.Int64Value(apiObject.HealthCheckTimeoutSeconds)), - "unhealthy_threshold": int(aws.Int64Value(apiObject.UnhealthyThresholdCount)), + names.AttrEnabled: aws.ToBool(apiObject.HealthCheckEnabled), + "healthy_threshold": aws.ToInt32(apiObject.HealthyThresholdCount), + names.AttrInterval: aws.ToInt32(apiObject.HealthCheckIntervalSeconds), + names.AttrPort: aws.ToString(apiObject.HealthCheckPort), + names.AttrProtocol: apiObject.HealthCheckProtocol, + names.AttrTimeout: aws.ToInt32(apiObject.HealthCheckTimeoutSeconds), + "unhealthy_threshold": aws.ToInt32(apiObject.UnhealthyThresholdCount), } if v := apiObject.HealthCheckPath; v != nil { - tfMap[names.AttrPath] = aws.StringValue(v) + tfMap[names.AttrPath] = aws.ToString(v) } if apiObject := apiObject.Matcher; apiObject != nil { if v := apiObject.HttpCode; v != nil { - tfMap["matcher"] = aws.StringValue(v) + tfMap["matcher"] = aws.ToString(v) } if v := apiObject.GrpcCode; v != nil { - tfMap["matcher"] = aws.StringValue(v) + tfMap["matcher"] = aws.ToString(v) } } return []interface{}{tfMap} } -func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { +func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protocol awstypes.ProtocolEnum) []awstypes.TargetGroupAttribute { if tfMap == nil { return nil } - apiObjects := []*elbv2.TargetGroupAttribute{ + apiObjects := []awstypes.TargetGroupAttribute{ { Key: aws.String(targetGroupAttributeStickinessEnabled), Value: flex.BoolValueToString(tfMap[names.AttrEnabled].(bool)), @@ -1141,21 +1203,21 @@ func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protoco } switch protocol { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + case awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps: switch stickinessType := tfMap[names.AttrType].(string); stickinessType { case stickinessTypeLBCookie: apiObjects = append(apiObjects, - &elbv2.TargetGroupAttribute{ + awstypes.TargetGroupAttribute{ Key: aws.String(targetGroupAttributeStickinessLBCookieDurationSeconds), Value: flex.IntValueToString(tfMap["cookie_duration"].(int)), }) case stickinessTypeAppCookie: apiObjects = append(apiObjects, - &elbv2.TargetGroupAttribute{ + awstypes.TargetGroupAttribute{ Key: aws.String(targetGroupAttributeStickinessAppCookieCookieName), Value: aws.String(tfMap["cookie_name"].(string)), }, - &elbv2.TargetGroupAttribute{ + awstypes.TargetGroupAttribute{ Key: aws.String(targetGroupAttributeStickinessAppCookieDurationSeconds), Value: flex.IntValueToString(tfMap["cookie_duration"].(int)), }) @@ -1165,7 +1227,7 @@ func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protoco return apiObjects } -func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { +func flattenTargetGroupStickinessAttributes(apiObjects []awstypes.TargetGroupAttribute, protocol awstypes.ProtocolEnum) map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -1174,24 +1236,24 @@ func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttri var stickinessType string for _, apiObject := range apiObjects { - switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + switch k, v := aws.ToString(apiObject.Key), apiObject.Value; k { case targetGroupAttributeStickinessEnabled: tfMap[names.AttrEnabled] = flex.StringToBoolValue(v) case targetGroupAttributeStickinessType: - stickinessType = aws.StringValue(v) + stickinessType = aws.ToString(v) tfMap[names.AttrType] = stickinessType } } switch protocol { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + case awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps: for _, apiObject := range apiObjects { - k, v := aws.StringValue(apiObject.Key), apiObject.Value + k, v := aws.ToString(apiObject.Key), apiObject.Value switch { case k == targetGroupAttributeStickinessLBCookieDurationSeconds && stickinessType == stickinessTypeLBCookie: tfMap["cookie_duration"] = flex.StringToIntValue(v) case k == targetGroupAttributeStickinessAppCookieCookieName && stickinessType == stickinessTypeAppCookie: - tfMap["cookie_name"] = aws.StringValue(v) + tfMap["cookie_name"] = aws.ToString(v) case k == targetGroupAttributeStickinessAppCookieDurationSeconds && stickinessType == stickinessTypeAppCookie: tfMap["cookie_duration"] = flex.StringToIntValue(v) } @@ -1201,21 +1263,21 @@ func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttri return tfMap } -func expandTargetGroupTargetFailoverAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { +func expandTargetGroupTargetFailoverAttributes(tfMap map[string]interface{}, protocol awstypes.ProtocolEnum) []awstypes.TargetGroupAttribute { if tfMap == nil { return nil } - var apiObjects []*elbv2.TargetGroupAttribute + var apiObjects []awstypes.TargetGroupAttribute switch protocol { - case elbv2.ProtocolEnumGeneve: + case awstypes.ProtocolEnumGeneve: apiObjects = append(apiObjects, - &elbv2.TargetGroupAttribute{ + awstypes.TargetGroupAttribute{ Key: aws.String(targetGroupAttributeTargetFailoverOnDeregistration), Value: aws.String(tfMap["on_deregistration"].(string)), }, - &elbv2.TargetGroupAttribute{ + awstypes.TargetGroupAttribute{ Key: aws.String(targetGroupAttributeTargetFailoverOnUnhealthy), Value: aws.String(tfMap["on_unhealthy"].(string)), }) @@ -1224,7 +1286,7 @@ func expandTargetGroupTargetFailoverAttributes(tfMap map[string]interface{}, pro return apiObjects } -func flattenTargetGroupTargetFailoverAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { +func flattenTargetGroupTargetFailoverAttributes(apiObjects []awstypes.TargetGroupAttribute, protocol awstypes.ProtocolEnum) map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -1232,13 +1294,13 @@ func flattenTargetGroupTargetFailoverAttributes(apiObjects []*elbv2.TargetGroupA tfMap := map[string]interface{}{} switch protocol { - case elbv2.ProtocolEnumGeneve: + case awstypes.ProtocolEnumGeneve: for _, apiObject := range apiObjects { - switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + switch k, v := aws.ToString(apiObject.Key), apiObject.Value; k { case targetGroupAttributeTargetFailoverOnDeregistration: - tfMap["on_deregistration"] = aws.StringValue(v) + tfMap["on_deregistration"] = aws.ToString(v) case targetGroupAttributeTargetFailoverOnUnhealthy: - tfMap["on_unhealthy"] = aws.StringValue(v) + tfMap["on_unhealthy"] = aws.ToString(v) } } } @@ -1246,17 +1308,17 @@ func flattenTargetGroupTargetFailoverAttributes(apiObjects []*elbv2.TargetGroupA return tfMap } -func expandTargetGroupTargetHealthStateAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { +func expandTargetGroupTargetHealthStateAttributes(tfMap map[string]interface{}, protocol awstypes.ProtocolEnum) []awstypes.TargetGroupAttribute { if tfMap == nil { return nil } - var apiObjects []*elbv2.TargetGroupAttribute + var apiObjects []awstypes.TargetGroupAttribute switch protocol { - case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + case awstypes.ProtocolEnumTcp, awstypes.ProtocolEnumTls: apiObjects = append(apiObjects, - &elbv2.TargetGroupAttribute{ + awstypes.TargetGroupAttribute{ Key: aws.String(targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled), Value: flex.BoolValueToString(tfMap["enable_unhealthy_connection_termination"].(bool)), }) @@ -1265,7 +1327,7 @@ func expandTargetGroupTargetHealthStateAttributes(tfMap map[string]interface{}, return apiObjects } -func flattenTargetGroupTargetHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { +func flattenTargetGroupTargetHealthStateAttributes(apiObjects []awstypes.TargetGroupAttribute, protocol awstypes.ProtocolEnum) map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -1273,9 +1335,9 @@ func flattenTargetGroupTargetHealthStateAttributes(apiObjects []*elbv2.TargetGro tfMap := map[string]interface{}{} switch protocol { - case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + case awstypes.ProtocolEnumTcp, awstypes.ProtocolEnumTls: for _, apiObject := range apiObjects { - switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + switch k, v := aws.ToString(apiObject.Key), apiObject.Value; k { case targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled: tfMap["enable_unhealthy_connection_termination"] = flex.StringToBoolValue(v) } @@ -1285,14 +1347,90 @@ func flattenTargetGroupTargetHealthStateAttributes(apiObjects []*elbv2.TargetGro return tfMap } +func expandTargetGroupHealthAttributes(tfMap map[string]interface{}, protocol awstypes.ProtocolEnum) []awstypes.TargetGroupAttribute { + if tfMap == nil { + return nil + } + + var apiObjects []awstypes.TargetGroupAttribute + + // Supported on Application Load Balancers and Network Load Balancers. + switch protocol { + case awstypes.ProtocolEnumGeneve: + default: + if v, ok := tfMap["dns_failover"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + tfMap := v[0].(map[string]interface{}) + apiObjects = append(apiObjects, + awstypes.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetGroupHealthDNSFailoverMinimumHealthyTargetsCount), + Value: aws.String(tfMap["minimum_healthy_targets_count"].(string)), + }, + awstypes.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetGroupHealthDNSFailoverMinimumHealthyTargetsPercentage), + Value: aws.String(tfMap["minimum_healthy_targets_percentage"].(string)), + }, + ) + } + + if v, ok := tfMap["unhealthy_state_routing"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + tfMap := v[0].(map[string]interface{}) + apiObjects = append(apiObjects, + awstypes.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetGroupHealthUnhealthyStateRoutingMinimumHealthyTargetsCount), + Value: flex.IntValueToString(tfMap["minimum_healthy_targets_count"].(int)), + }, + awstypes.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetGroupHealthUnhealthyStateRoutingMinimumHealthyTargetsPercentage), + Value: aws.String(tfMap["minimum_healthy_targets_percentage"].(string)), + }, + ) + } + } + + return apiObjects +} + +func flattenTargetGroupHealthAttributes(apiObjects []awstypes.TargetGroupAttribute, protocol awstypes.ProtocolEnum) map[string]interface{} { + if len(apiObjects) == 0 { + return nil + } + + tfMap := map[string]interface{}{} + dnsFailoverMap := make(map[string]interface{}) + unhealthyStateRoutingMap := make(map[string]interface{}) + + // Supported on Application Load Balancers and Network Load Balancers. + switch protocol { + case awstypes.ProtocolEnumGeneve: + default: + for _, apiObject := range apiObjects { + switch k, v := aws.ToString(apiObject.Key), apiObject.Value; k { + case targetGroupAttributeTargetGroupHealthDNSFailoverMinimumHealthyTargetsCount: + dnsFailoverMap["minimum_healthy_targets_count"] = aws.ToString(v) + case targetGroupAttributeTargetGroupHealthDNSFailoverMinimumHealthyTargetsPercentage: + dnsFailoverMap["minimum_healthy_targets_percentage"] = aws.ToString(v) + case targetGroupAttributeTargetGroupHealthUnhealthyStateRoutingMinimumHealthyTargetsCount: + unhealthyStateRoutingMap["minimum_healthy_targets_count"] = flex.StringToIntValue(v) + case targetGroupAttributeTargetGroupHealthUnhealthyStateRoutingMinimumHealthyTargetsPercentage: + unhealthyStateRoutingMap["minimum_healthy_targets_percentage"] = aws.ToString(v) + } + } + } + + tfMap["dns_failover"] = []interface{}{dnsFailoverMap} + tfMap["unhealthy_state_routing"] = []interface{}{unhealthyStateRoutingMap} + + return tfMap +} + func targetGroupRuntimeValidation(d *schema.ResourceData, diags *diag.Diagnostics) { - targetType := d.Get("target_type").(string) - if targetType == elbv2.TargetTypeEnumLambda { + if targetType := awstypes.TargetTypeEnum(d.Get("target_type").(string)); targetType == awstypes.TargetTypeEnumLambda { + targetType := string(targetType) if _, ok := d.GetOk(names.AttrProtocol); ok { *diags = append(*diags, errs.NewAttributeConflictsWhenWillBeError( cty.GetAttrPath(names.AttrProtocol), cty.GetAttrPath("target_type"), - elbv2.TargetTypeEnumLambda, + targetType, )) } @@ -1300,7 +1438,7 @@ func targetGroupRuntimeValidation(d *schema.ResourceData, diags *diag.Diagnostic *diags = append(*diags, errs.NewAttributeConflictsWhenWillBeError( cty.GetAttrPath("protocol_version"), cty.GetAttrPath("target_type"), - elbv2.TargetTypeEnumLambda, + targetType, )) } @@ -1308,7 +1446,7 @@ func targetGroupRuntimeValidation(d *schema.ResourceData, diags *diag.Diagnostic *diags = append(*diags, errs.NewAttributeConflictsWhenWillBeError( cty.GetAttrPath(names.AttrPort), cty.GetAttrPath("target_type"), - elbv2.TargetTypeEnumLambda, + targetType, )) } @@ -1316,7 +1454,7 @@ func targetGroupRuntimeValidation(d *schema.ResourceData, diags *diag.Diagnostic *diags = append(*diags, errs.NewAttributeConflictsWhenWillBeError( cty.GetAttrPath(names.AttrPort), cty.GetAttrPath("target_type"), - elbv2.TargetTypeEnumLambda, + targetType, )) } @@ -1328,17 +1466,18 @@ func targetGroupRuntimeValidation(d *schema.ResourceData, diags *diag.Diagnostic *diags = append(*diags, errs.NewAttributeConflictsWhenWillBeError( path.GetAttr(names.AttrProtocol), cty.GetAttrPath("target_type"), - elbv2.TargetTypeEnumLambda, + targetType, )) } } } else { if _, ok := d.GetOk("protocol_version"); ok { - protocol := d.Get(names.AttrProtocol).(string) + protocol := awstypes.ProtocolEnum(d.Get(names.AttrProtocol).(string)) switch protocol { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + case awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps: // Noop default: + protocol := string(protocol) *diags = append(*diags, errs.NewAttributeConflictsWhenWillBeError( cty.GetAttrPath("protocol_version"), cty.GetAttrPath(names.AttrProtocol), diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index c45fa425c50..4e69e594139 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -8,23 +8,24 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_alb_target_group_attachment") -// @SDKResource("aws_lb_target_group_attachment") -func ResourceTargetGroupAttachment() *schema.Resource { +// @SDKResource("aws_alb_target_group_attachment", name="Target Group Attachment") +// @SDKResource("aws_lb_target_group_attachment", name="Target Group Attachment") +func resourceTargetGroupAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAttachmentCreate, ReadWithoutTimeout: resourceAttachmentRead, @@ -57,12 +58,12 @@ func ResourceTargetGroupAttachment() *schema.Resource { func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) targetGroupARN := d.Get("target_group_arn").(string) - input := &elbv2.RegisterTargetsInput{ + input := &elasticloadbalancingv2.RegisterTargetsInput{ TargetGroupArn: aws.String(targetGroupARN), - Targets: []*elbv2.TargetDescription{{ + Targets: []awstypes.TargetDescription{{ Id: aws.String(d.Get("target_id").(string)), }}, } @@ -72,12 +73,15 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk(names.AttrPort); ok { - input.Targets[0].Port = aws.Int64(int64(v.(int))) + input.Targets[0].Port = aws.Int32(int32(v.(int))) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 10*time.Minute, func() (interface{}, error) { - return conn.RegisterTargetsWithContext(ctx, input) - }, elbv2.ErrCodeInvalidTargetException) + const ( + timeout = 10 * time.Minute + ) + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidTargetException](ctx, timeout, func() (interface{}, error) { + return conn.RegisterTargets(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "registering ELBv2 Target Group (%s) target: %s", targetGroupARN, err) @@ -93,12 +97,12 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta // target, so there is no work to do beyond ensuring that the target and group still exist. func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) targetGroupARN := d.Get("target_group_arn").(string) - input := &elbv2.DescribeTargetHealthInput{ + input := &elasticloadbalancingv2.DescribeTargetHealthInput{ TargetGroupArn: aws.String(targetGroupARN), - Targets: []*elbv2.TargetDescription{{ + Targets: []awstypes.TargetDescription{{ Id: aws.String(d.Get("target_id").(string)), }}, } @@ -108,10 +112,10 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk(names.AttrPort); ok { - input.Targets[0].Port = aws.Int64(int64(v.(int))) + input.Targets[0].Port = aws.Int32(int32(v.(int))) } - _, err := FindTargetHealthDescription(ctx, conn, input) + _, err := findTargetHealthDescription(ctx, conn, input) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) @@ -128,12 +132,12 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) targetGroupARN := d.Get("target_group_arn").(string) - input := &elbv2.DeregisterTargetsInput{ + input := &elasticloadbalancingv2.DeregisterTargetsInput{ TargetGroupArn: aws.String(targetGroupARN), - Targets: []*elbv2.TargetDescription{{ + Targets: []awstypes.TargetDescription{{ Id: aws.String(d.Get("target_id").(string)), }}, } @@ -143,13 +147,13 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk(names.AttrPort); ok { - input.Targets[0].Port = aws.Int64(int64(v.(int))) + input.Targets[0].Port = aws.Int32(int32(v.(int))) } log.Printf("[DEBUG] Deleting ELBv2 Target Group Attachment: %s", d.Id()) - _, err := conn.DeregisterTargetsWithContext(ctx, input) + _, err := conn.DeregisterTargets(ctx, input) - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + if errs.IsA[*awstypes.LoadBalancerNotFoundException](err) { return diags } @@ -160,14 +164,14 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func FindTargetHealthDescription(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) (*elbv2.TargetHealthDescription, error) { - output, err := findTargetHealthDescriptions(ctx, conn, input, func(v *elbv2.TargetHealthDescription) bool { +func findTargetHealthDescription(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTargetHealthInput) (*awstypes.TargetHealthDescription, error) { + output, err := findTargetHealthDescriptions(ctx, conn, input, func(v *awstypes.TargetHealthDescription) bool { // This will catch targets being removed by hand (draining as we plan) or that have been removed for a while // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. if v := v.TargetHealth; v != nil { - switch reason := aws.StringValue(v.Reason); reason { - case elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress, elbv2.TargetHealthReasonEnumTargetNotRegistered: + switch v.Reason { + case awstypes.TargetHealthReasonEnumDeregistrationInProgress, awstypes.TargetHealthReasonEnumNotRegistered: return false default: return true @@ -181,15 +185,15 @@ func FindTargetHealthDescription(ctx context.Context, conn *elbv2.ELBV2, input * return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput, filter tfslices.Predicate[*elbv2.TargetHealthDescription]) ([]*elbv2.TargetHealthDescription, error) { - var targetHealthDescriptions []*elbv2.TargetHealthDescription +func findTargetHealthDescriptions(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTargetHealthInput, filter tfslices.Predicate[*awstypes.TargetHealthDescription]) ([]awstypes.TargetHealthDescription, error) { + var targetHealthDescriptions []awstypes.TargetHealthDescription - output, err := conn.DescribeTargetHealthWithContext(ctx, input) + output, err := conn.DescribeTargetHealth(ctx, input) - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException, elbv2.ErrCodeTargetGroupNotFoundException) { + if errs.IsA[*awstypes.InvalidTargetException](err) || errs.IsA[*awstypes.TargetGroupNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -205,7 +209,7 @@ func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input } for _, v := range output.TargetHealthDescriptions { - if v != nil && filter(v) { + if filter(&v) { targetHealthDescriptions = append(targetHealthDescriptions, v) } } diff --git a/internal/service/elbv2/target_group_attachment_test.go b/internal/service/elbv2/target_group_attachment_test.go index fdaa995a8a7..1251d5f9987 100644 --- a/internal/service/elbv2/target_group_attachment_test.go +++ b/internal/service/elbv2/target_group_attachment_test.go @@ -8,8 +8,9 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -156,11 +157,11 @@ func testAccCheckTargetGroupAttachmentExists(ctx context.Context, n string) reso return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) - input := &elbv2.DescribeTargetHealthInput{ + input := &elasticloadbalancingv2.DescribeTargetHealthInput{ TargetGroupArn: aws.String(rs.Primary.Attributes["target_group_arn"]), - Targets: []*elbv2.TargetDescription{{ + Targets: []awstypes.TargetDescription{{ Id: aws.String(rs.Primary.Attributes["target_id"]), }}, } @@ -170,7 +171,7 @@ func testAccCheckTargetGroupAttachmentExists(ctx context.Context, n string) reso } if v := rs.Primary.Attributes[names.AttrPort]; v != "" { - input.Targets[0].Port = flex.StringValueToInt64(v) + input.Targets[0].Port = flex.StringValueToInt32(v) } _, err := tfelbv2.FindTargetHealthDescription(ctx, conn, input) @@ -181,16 +182,16 @@ func testAccCheckTargetGroupAttachmentExists(ctx context.Context, n string) reso func testAccCheckTargetGroupAttachmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lb_target_group_attachment" && rs.Type != "aws_alb_target_group_attachment" { continue } - input := &elbv2.DescribeTargetHealthInput{ + input := &elasticloadbalancingv2.DescribeTargetHealthInput{ TargetGroupArn: aws.String(rs.Primary.Attributes["target_group_arn"]), - Targets: []*elbv2.TargetDescription{{ + Targets: []awstypes.TargetDescription{{ Id: aws.String(rs.Primary.Attributes["target_id"]), }}, } @@ -200,7 +201,7 @@ func testAccCheckTargetGroupAttachmentDestroy(ctx context.Context) resource.Test } if v := rs.Primary.Attributes[names.AttrPort]; v != "" { - input.Targets[0].Port = flex.StringValueToInt64(v) + input.Targets[0].Port = flex.StringValueToInt32(v) } _, err := tfelbv2.FindTargetHealthDescription(ctx, conn, input) diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index ee8db27150c..da90ef3460f 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -8,9 +8,9 @@ import ( "log" "time" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -21,10 +21,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_alb_target_group") -// @SDKDataSource("aws_lb_target_group") +// @SDKDataSource("aws_alb_target_group", name="Target Group") +// @SDKDataSource("aws_lb_target_group", name="Target Group") // @Testing(tagsTest=true) -func DataSourceTargetGroup() *schema.Resource { +func dataSourceTargetGroup() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceTargetGroupRead, @@ -184,16 +184,17 @@ func DataSourceTargetGroup() *schema.Resource { func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) + partition := meta.(*conns.AWSClient).Partition ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig tagsToMatch := tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{})).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - input := &elbv2.DescribeTargetGroupsInput{} + input := &elasticloadbalancingv2.DescribeTargetGroupsInput{} if v, ok := d.GetOk(names.AttrARN); ok { - input.TargetGroupArns = aws.StringSlice([]string{v.(string)}) + input.TargetGroupArns = []string{v.(string)} } else if v, ok := d.GetOk(names.AttrName); ok { - input.Names = aws.StringSlice([]string{v.(string)}) + input.Names = []string{v.(string)} } results, err := findTargetGroups(ctx, conn, input) @@ -203,13 +204,13 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta } if len(tagsToMatch) > 0 { - var targetGroups []*elbv2.TargetGroup + var targetGroups []awstypes.TargetGroup for _, targetGroup := range results { arn := aws.StringValue(targetGroup.TargetGroupArn) tags, err := listTags(ctx, conn, arn) - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + if errs.IsA[*awstypes.TargetGroupNotFoundException](err) { continue } @@ -235,26 +236,26 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta d.SetId(aws.StringValue(targetGroup.TargetGroupArn)) d.Set(names.AttrARN, targetGroup.TargetGroupArn) d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) - d.Set("load_balancer_arns", flex.FlattenStringSet(targetGroup.LoadBalancerArns)) + d.Set("load_balancer_arns", flex.FlattenStringValueSet(targetGroup.LoadBalancerArns)) d.Set(names.AttrName, targetGroup.TargetGroupName) d.Set("target_type", targetGroup.TargetType) - if err := d.Set(names.AttrHealthCheck, flattenTargetGroupHealthCheck(targetGroup)); err != nil { + if err := d.Set(names.AttrHealthCheck, flattenTargetGroupHealthCheck(&targetGroup)); err != nil { return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) } d.Set(names.AttrName, targetGroup.TargetGroupName) - targetType := aws.StringValue(targetGroup.TargetType) + targetType := targetGroup.TargetType d.Set("target_type", targetType) - var protocol string - if targetType != elbv2.TargetTypeEnumLambda { + var protocol awstypes.ProtocolEnum + if targetType != awstypes.TargetTypeEnumLambda { d.Set(names.AttrPort, targetGroup.Port) - protocol = aws.StringValue(targetGroup.Protocol) + protocol = targetGroup.Protocol d.Set(names.AttrProtocol, protocol) d.Set(names.AttrVPCID, targetGroup.VpcId) } - switch protocol { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + switch targetGroup.Protocol { + case awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps: d.Set("protocol_version", targetGroup.ProtocolVersion) } @@ -272,7 +273,7 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta tags, err := listTags(ctx, conn, d.Id()) - if errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if errs.IsUnsupportedOperationInPartitionError(partition, err) { log.Printf("[WARN] Unable to list tags for ELBv2 Target Group %s: %s", d.Id(), err) return diags } diff --git a/internal/service/elbv2/target_group_tags_gen_test.go b/internal/service/elbv2/target_group_tags_gen_test.go index 6d8a62c4ce1..b2d01f1d37a 100644 --- a/internal/service/elbv2/target_group_tags_gen_test.go +++ b/internal/service/elbv2/target_group_tags_gen_test.go @@ -5,7 +5,7 @@ package elbv2_test import ( "testing" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-testing/config" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -19,7 +19,7 @@ import ( func TestAccELBV2TargetGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -202,7 +202,7 @@ func TestAccELBV2TargetGroup_tags(t *testing.T) { func TestAccELBV2TargetGroup_tags_null(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -265,7 +265,7 @@ func TestAccELBV2TargetGroup_tags_null(t *testing.T) { func TestAccELBV2TargetGroup_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -345,7 +345,7 @@ func TestAccELBV2TargetGroup_tags_AddOnUpdate(t *testing.T) { func TestAccELBV2TargetGroup_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -436,7 +436,7 @@ func TestAccELBV2TargetGroup_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccELBV2TargetGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -569,7 +569,7 @@ func TestAccELBV2TargetGroup_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2TargetGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -655,7 +655,7 @@ func TestAccELBV2TargetGroup_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -848,7 +848,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1017,7 +1017,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1202,7 +1202,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_overlapping(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1295,7 +1295,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccELBV2TargetGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1387,7 +1387,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccELBV2TargetGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1455,7 +1455,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccELBV2TargetGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1515,7 +1515,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccELBV2TargetGroup_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1580,7 +1580,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_nullOverlappingResourceTag(t *test func TestAccELBV2TargetGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1645,7 +1645,7 @@ func TestAccELBV2TargetGroup_tags_DefaultTags_nullNonOverlappingResourceTag(t *t func TestAccELBV2TargetGroup_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1702,7 +1702,7 @@ func TestAccELBV2TargetGroup_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccELBV2TargetGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1795,7 +1795,7 @@ func TestAccELBV2TargetGroup_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2TargetGroup_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TargetGroup + var v types.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index 295f33fce7b..5ea0e88bbc1 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -12,13 +12,14 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" tfelbv2 "github.com/hashicorp/terraform-provider-aws/internal/service/elbv2" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" @@ -92,7 +93,7 @@ func TestALBTargetGroupCloudWatchSuffixFromARN(t *testing.T) { func TestAccELBV2TargetGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -152,7 +153,7 @@ func TestAccELBV2TargetGroup_basic(t *testing.T) { func TestAccELBV2TargetGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -176,7 +177,7 @@ func TestAccELBV2TargetGroup_disappears(t *testing.T) { func TestAccELBV2TargetGroup_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -211,7 +212,7 @@ func TestAccELBV2TargetGroup_nameGenerated(t *testing.T) { func TestAccELBV2TargetGroup_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -246,7 +247,7 @@ func TestAccELBV2TargetGroup_namePrefix(t *testing.T) { func TestAccELBV2TargetGroup_duplicateName(t *testing.T) { ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -272,7 +273,7 @@ func TestAccELBV2TargetGroup_duplicateName(t *testing.T) { func TestAccELBV2TargetGroup_backwardsCompatibility(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -317,7 +318,7 @@ func TestAccELBV2TargetGroup_backwardsCompatibility(t *testing.T) { func TestAccELBV2TargetGroup_ProtocolVersion_basic(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -371,7 +372,7 @@ func TestAccELBV2TargetGroup_ProtocolVersion_basic(t *testing.T) { func TestAccELBV2TargetGroup_ProtocolVersion_grpcHealthCheck(t *testing.T) { ctx := acctest.Context(t) - var targetGroup1 elbv2.TargetGroup + var targetGroup1 awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -396,7 +397,7 @@ func TestAccELBV2TargetGroup_ProtocolVersion_grpcHealthCheck(t *testing.T) { func TestAccELBV2TargetGroup_ProtocolVersion_grpcUpdate(t *testing.T) { ctx := acctest.Context(t) - var targetGroup1 elbv2.TargetGroup + var targetGroup1 awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -428,7 +429,7 @@ func TestAccELBV2TargetGroup_ProtocolVersion_grpcUpdate(t *testing.T) { func TestAccELBV2TargetGroup_ipAddressType(t *testing.T) { ctx := acctest.Context(t) - var targetGroup1 elbv2.TargetGroup + var targetGroup1 awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -463,7 +464,7 @@ func TestAccELBV2TargetGroup_ipAddressType(t *testing.T) { func TestAccELBV2TargetGroup_tls(t *testing.T) { ctx := acctest.Context(t) - var targetGroup1 elbv2.TargetGroup + var targetGroup1 awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -486,7 +487,7 @@ func TestAccELBV2TargetGroup_tls(t *testing.T) { func TestAccELBV2TargetGroup_HealthCheck_tcpHTTPS(t *testing.T) { ctx := acctest.Context(t) - var confBefore, confAfter elbv2.TargetGroup + var confBefore, confAfter awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -546,7 +547,7 @@ func TestAccELBV2TargetGroup_HealthCheck_tcpHTTPS(t *testing.T) { func TestAccELBV2TargetGroup_attrsOnCreate(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -580,7 +581,7 @@ func TestAccELBV2TargetGroup_attrsOnCreate(t *testing.T) { func TestAccELBV2TargetGroup_udp(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -612,7 +613,7 @@ func TestAccELBV2TargetGroup_udp(t *testing.T) { func TestAccELBV2TargetGroup_ForceNew_name(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rNameBefore := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rNameAfter := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -644,7 +645,7 @@ func TestAccELBV2TargetGroup_ForceNew_name(t *testing.T) { func TestAccELBV2TargetGroup_ForceNew_port(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -675,7 +676,7 @@ func TestAccELBV2TargetGroup_ForceNew_port(t *testing.T) { func TestAccELBV2TargetGroup_ForceNew_protocol(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -706,7 +707,7 @@ func TestAccELBV2TargetGroup_ForceNew_protocol(t *testing.T) { func TestAccELBV2TargetGroup_ForceNew_vpc(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -735,7 +736,7 @@ func TestAccELBV2TargetGroup_ForceNew_vpc(t *testing.T) { func TestAccELBV2TargetGroup_Defaults_application(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -774,7 +775,7 @@ func TestAccELBV2TargetGroup_Defaults_application(t *testing.T) { func TestAccELBV2TargetGroup_Defaults_network(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" healthCheckValid := ` @@ -818,7 +819,7 @@ timeout = 4 func TestAccELBV2TargetGroup_HealthCheck_enable(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -854,7 +855,7 @@ func TestAccELBV2TargetGroup_HealthCheck_enable(t *testing.T) { func TestAccELBV2TargetGroup_NetworkLB_tcpHealthCheckUpdated(t *testing.T) { ctx := acctest.Context(t) - var targetGroup1, targetGroup2 elbv2.TargetGroup + var targetGroup1, targetGroup2 awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -917,7 +918,7 @@ func TestAccELBV2TargetGroup_NetworkLB_tcpHealthCheckUpdated(t *testing.T) { func TestAccELBV2TargetGroup_networkLB_TargetGroupWithConnectionTermination(t *testing.T) { ctx := acctest.Context(t) - var confBefore, confAfter elbv2.TargetGroup + var confBefore, confAfter awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -947,7 +948,7 @@ func TestAccELBV2TargetGroup_networkLB_TargetGroupWithConnectionTermination(t *t func TestAccELBV2TargetGroup_NetworkLB_targetGroupWithProxy(t *testing.T) { ctx := acctest.Context(t) - var confBefore, confAfter elbv2.TargetGroup + var confBefore, confAfter awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -977,7 +978,7 @@ func TestAccELBV2TargetGroup_NetworkLB_targetGroupWithProxy(t *testing.T) { func TestAccELBV2TargetGroup_preserveClientIPValid(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup resourceName := "aws_lb_target_group.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1007,7 +1008,7 @@ func TestAccELBV2TargetGroup_preserveClientIPValid(t *testing.T) { func TestAccELBV2TargetGroup_Geneve_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1022,7 +1023,7 @@ func TestAccELBV2TargetGroup_Geneve_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPort, "6081"), - resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, elbv2.ProtocolEnumGeneve), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, string(awstypes.ProtocolEnumGeneve)), resource.TestCheckResourceAttr(resourceName, "tags.Name", rName), ), }, @@ -1043,7 +1044,7 @@ func TestAccELBV2TargetGroup_Geneve_basic(t *testing.T) { func TestAccELBV2TargetGroup_Geneve_notSticky(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1058,7 +1059,7 @@ func TestAccELBV2TargetGroup_Geneve_notSticky(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPort, "6081"), - resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, elbv2.ProtocolEnumGeneve), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, string(awstypes.ProtocolEnumGeneve)), ), }, { @@ -1066,7 +1067,7 @@ func TestAccELBV2TargetGroup_Geneve_notSticky(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPort, "6081"), - resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, elbv2.ProtocolEnumGeneve), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, string(awstypes.ProtocolEnumGeneve)), resource.TestCheckResourceAttr(resourceName, "health_check.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health"), resource.TestCheckResourceAttr(resourceName, "health_check.0.enabled", acctest.CtTrue), @@ -1084,7 +1085,7 @@ func TestAccELBV2TargetGroup_Geneve_notSticky(t *testing.T) { func TestAccELBV2TargetGroup_Geneve_Sticky(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1099,7 +1100,7 @@ func TestAccELBV2TargetGroup_Geneve_Sticky(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPort, "6081"), - resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, elbv2.ProtocolEnumGeneve), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, string(awstypes.ProtocolEnumGeneve)), resource.TestCheckResourceAttr(resourceName, "stickiness.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "source_ip_dest_ip"), @@ -1110,7 +1111,7 @@ func TestAccELBV2TargetGroup_Geneve_Sticky(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPort, "6081"), - resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, elbv2.ProtocolEnumGeneve), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, string(awstypes.ProtocolEnumGeneve)), resource.TestCheckResourceAttr(resourceName, "stickiness.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "source_ip_dest_ip_proto"), @@ -1122,7 +1123,7 @@ func TestAccELBV2TargetGroup_Geneve_Sticky(t *testing.T) { func TestAccELBV2TargetGroup_Geneve_targetFailover(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1137,7 +1138,7 @@ func TestAccELBV2TargetGroup_Geneve_targetFailover(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPort, "6081"), - resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, elbv2.ProtocolEnumGeneve), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, string(awstypes.ProtocolEnumGeneve)), resource.TestCheckResourceAttr(resourceName, "target_failover.0.on_deregistration", "rebalance"), resource.TestCheckResourceAttr(resourceName, "target_failover.0.on_unhealthy", "rebalance"), ), @@ -1158,7 +1159,7 @@ func TestAccELBV2TargetGroup_Geneve_targetFailover(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &conf), resource.TestCheckResourceAttr(resourceName, names.AttrPort, "6081"), - resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, elbv2.ProtocolEnumGeneve), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, string(awstypes.ProtocolEnumGeneve)), resource.TestCheckResourceAttr(resourceName, "target_failover.0.on_deregistration", "no_rebalance"), resource.TestCheckResourceAttr(resourceName, "target_failover.0.on_unhealthy", "no_rebalance"), ), @@ -1180,7 +1181,7 @@ func TestAccELBV2TargetGroup_Geneve_targetFailover(t *testing.T) { func TestAccELBV2TargetGroup_Stickiness_defaultALB(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1205,7 +1206,7 @@ func TestAccELBV2TargetGroup_Stickiness_defaultALB(t *testing.T) { func TestAccELBV2TargetGroup_Stickiness_defaultNLB(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1317,7 +1318,7 @@ func TestAccELBV2TargetGroup_Stickiness_invalidNLB(t *testing.T) { func TestAccELBV2TargetGroup_Stickiness_validALB(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1353,7 +1354,7 @@ func TestAccELBV2TargetGroup_Stickiness_validALB(t *testing.T) { func TestAccELBV2TargetGroup_Stickiness_validNLB(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1405,7 +1406,7 @@ func TestAccELBV2TargetGroup_Stickiness_validNLB(t *testing.T) { func TestAccELBV2TargetGroup_Stickiness_updateAppEnabled(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1494,7 +1495,7 @@ func TestAccELBV2TargetGroup_Stickiness_updateAppEnabled(t *testing.T) { func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1588,7 +1589,7 @@ func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { func TestAccELBV2TargetGroup_HealthCheck_update(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1654,7 +1655,7 @@ func TestAccELBV2TargetGroup_HealthCheck_update(t *testing.T) { func TestAccELBV2TargetGroup_Stickiness_updateEnabled(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1741,7 +1742,7 @@ func TestAccELBV2TargetGroup_Stickiness_updateEnabled(t *testing.T) { func TestAccELBV2TargetGroup_HealthCheck_without(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -1767,7 +1768,7 @@ func TestAccELBV2TargetGroup_HealthCheck_without(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -1812,7 +1813,7 @@ func TestAccELBV2TargetGroup_ALBAlias_basic(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_changeNameForceNew(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rNameAfter := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -1843,7 +1844,7 @@ func TestAccELBV2TargetGroup_ALBAlias_changeNameForceNew(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_changePortForceNew(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -1873,7 +1874,7 @@ func TestAccELBV2TargetGroup_ALBAlias_changePortForceNew(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_changeProtocolForceNew(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -1903,7 +1904,7 @@ func TestAccELBV2TargetGroup_ALBAlias_changeProtocolForceNew(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_changeVPCForceNew(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -1931,7 +1932,7 @@ func TestAccELBV2TargetGroup_ALBAlias_changeVPCForceNew(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_generatedName(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -1953,7 +1954,7 @@ func TestAccELBV2TargetGroup_ALBAlias_generatedName(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_lambda(t *testing.T) { ctx := acctest.Context(t) - var targetGroup1 elbv2.TargetGroup + var targetGroup1 awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -1989,7 +1990,7 @@ func TestAccELBV2TargetGroup_ALBAlias_lambda(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_lambdaMultiValueHeadersEnabled(t *testing.T) { ctx := acctest.Context(t) - var targetGroup1 elbv2.TargetGroup + var targetGroup1 awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2085,7 +2086,7 @@ func TestAccELBV2TargetGroup_ALBAlias_missing(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2108,7 +2109,7 @@ func TestAccELBV2TargetGroup_ALBAlias_namePrefix(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_setAndUpdateSlowStart(t *testing.T) { ctx := acctest.Context(t) - var before, after elbv2.TargetGroup + var before, after awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2156,7 +2157,7 @@ func TestAccELBV2TargetGroup_ALBAlias_InvalidSlowStart(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_tags(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2189,7 +2190,7 @@ func TestAccELBV2TargetGroup_ALBAlias_tags(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_updateHealthCheck(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2253,7 +2254,7 @@ func TestAccELBV2TargetGroup_ALBAlias_updateHealthCheck(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_updateLoadBalancingAlgorithmType(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2327,7 +2328,7 @@ func TestAccELBV2TargetGroup_ALBAlias_InvalidAnomalyMitigation(t *testing.T) { func TestAccELBV2TargetGroup_ALBAlias_updateLoadBalancingAnomalyMitigation(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2370,7 +2371,7 @@ func TestAccELBV2TargetGroup_ALBAlias_updateLoadBalancingAnomalyMitigation(t *te func TestAccELBV2TargetGroup_ALBAlias_updateLoadBalancingCrossZoneEnabled(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2413,7 +2414,7 @@ func TestAccELBV2TargetGroup_ALBAlias_updateLoadBalancingCrossZoneEnabled(t *tes func TestAccELBV2TargetGroup_ALBAlias_updateStickinessEnabled(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TargetGroup + var conf awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_alb_target_group.test" @@ -2500,7 +2501,7 @@ func TestAccELBV2TargetGroup_ALBAlias_updateStickinessEnabled(t *testing.T) { func TestAccELBV2TargetGroup_targetHealthStateUnhealthyConnectionTermination(t *testing.T) { ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_lb_target_group.test" @@ -2554,6 +2555,92 @@ func TestAccELBV2TargetGroup_targetHealthStateUnhealthyConnectionTermination(t * }) } +func TestAccELBV2TargetGroup_targetGroupHealthState(t *testing.T) { + ctx := acctest.Context(t) + var targetGroup awstypes.TargetGroup + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ELBV2ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTargetGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTargetGroupConfig_targetGroupHealthState(rName, "off", "off", 1, "off"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "target_group_health.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_count", "off"), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_percentage", "off"), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_percentage", "off"), + ), + }, + { + Config: testAccTargetGroupConfig_targetGroupHealthState(rName, acctest.Ct1, "off", 1, "off"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "target_group_health.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_percentage", "off"), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_percentage", "off"), + ), + }, + { + Config: testAccTargetGroupConfig_targetGroupHealthState(rName, acctest.Ct1, "100", 1, "off"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "target_group_health.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_percentage", "100"), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_percentage", "off"), + ), + }, + { + Config: testAccTargetGroupConfig_targetGroupHealthState(rName, acctest.Ct1, "off", 1, "100"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "target_group_health.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_percentage", "off"), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_percentage", "100"), + ), + }, + { + Config: testAccTargetGroupConfig_targetGroupHealthState(rName, acctest.Ct1, "100", 1, "100"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "target_group_health.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.dns_failover.0.minimum_healthy_targets_percentage", "100"), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_count", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "target_group_health.0.unhealthy_state_routing.0.minimum_healthy_targets_percentage", "100"), + ), + }, + }, + }) +} + func TestAccELBV2TargetGroup_Instance_HealthCheck_defaults(t *testing.T) { t.Parallel() @@ -2565,99 +2652,99 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_defaults(t *testing.T) { expectedPath string expectedTimeout string }{ - elbv2.ProtocolEnumHttp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { + string(awstypes.ProtocolEnumHttp): { expectedMatcher: "200", expectedPath: "/", expectedTimeout: "5", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { expectedMatcher: "200", expectedPath: "/", expectedTimeout: "5", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidHealthCheckProtocol: true, }, }, - elbv2.ProtocolEnumHttps: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttps): { + string(awstypes.ProtocolEnumHttp): { expectedMatcher: "200", expectedPath: "/", expectedTimeout: "5", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { expectedMatcher: "200", expectedPath: "/", expectedTimeout: "5", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidHealthCheckProtocol: true, }, }, - elbv2.ProtocolEnumTcp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTcp): { + string(awstypes.ProtocolEnumHttp): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: "6", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: acctest.Ct10, }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { expectedMatcher: "", expectedPath: "", expectedTimeout: acctest.Ct10, }, }, - elbv2.ProtocolEnumTls: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTls): { + string(awstypes.ProtocolEnumHttp): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: "6", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: acctest.Ct10, }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { expectedMatcher: "", expectedPath: "", expectedTimeout: acctest.Ct10, }, }, - elbv2.ProtocolEnumUdp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumUdp): { + string(awstypes.ProtocolEnumHttp): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: "6", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: acctest.Ct10, }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { expectedMatcher: "", expectedPath: "", expectedTimeout: acctest.Ct10, }, }, - elbv2.ProtocolEnumTcpUdp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTcpUdp): { + string(awstypes.ProtocolEnumHttp): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: "6", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: acctest.Ct10, }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { expectedMatcher: "", expectedPath: "", expectedTimeout: acctest.Ct10, @@ -2665,11 +2752,11 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_defaults(t *testing.T) { }, } - for _, protocol := range elbv2.ProtocolEnum_Values() { - if protocol == elbv2.ProtocolEnumGeneve { + for _, protocol := range enum.EnumValues[awstypes.ProtocolEnum]() { + if protocol == awstypes.ProtocolEnumGeneve { continue } - protocol := protocol + protocol := string(protocol) t.Run(protocol, func(t *testing.T) { t.Parallel() @@ -2689,7 +2776,7 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_defaults(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup step := resource.TestStep{ Config: testAccTargetGroupConfig_Instance_HealthCheck_basic(protocol, healthCheckProtocol), @@ -2699,7 +2786,7 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_defaults(t *testing.T) { } else { step.Check = resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumInstance), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumInstance)), resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, protocol), resource.TestCheckResourceAttr(resourceName, "health_check.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "health_check.0.enabled", acctest.CtTrue), @@ -2738,85 +2825,85 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_matcher(t *testing.T) { invalidConfig bool matcher string }{ - elbv2.ProtocolEnumHttp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { + string(awstypes.ProtocolEnumHttp): { matcher: "200", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "200", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "200", }, }, - elbv2.ProtocolEnumHttps: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttps): { + string(awstypes.ProtocolEnumHttp): { matcher: "200", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "200", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "200", }, }, - elbv2.ProtocolEnumTcp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTcp): { + string(awstypes.ProtocolEnumHttp): { matcher: "200", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "200", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "200", }, }, - elbv2.ProtocolEnumTls: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTls): { + string(awstypes.ProtocolEnumHttp): { matcher: "200", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "200", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "200", }, }, - elbv2.ProtocolEnumUdp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumUdp): { + string(awstypes.ProtocolEnumHttp): { matcher: "200", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "200", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "200", }, }, - elbv2.ProtocolEnumTcpUdp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTcpUdp): { + string(awstypes.ProtocolEnumHttp): { matcher: "200", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "200", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "200", }, }, } - for _, protocol := range elbv2.ProtocolEnum_Values() { - if protocol == elbv2.ProtocolEnumGeneve { + for _, protocol := range enum.EnumValues[awstypes.ProtocolEnum]() { + if protocol == awstypes.ProtocolEnumGeneve { continue } - protocol := protocol + protocol := string(protocol) t.Run(protocol, func(t *testing.T) { t.Parallel() @@ -2836,7 +2923,7 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_matcher(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup step := resource.TestStep{ Config: testAccTargetGroupConfig_Instance_HealthCheck_matcher(protocol, healthCheckProtocol, tc.matcher), @@ -2880,85 +2967,85 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_path(t *testing.T) { invalidConfig bool path string }{ - elbv2.ProtocolEnumHttp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { + string(awstypes.ProtocolEnumHttp): { path: "/path", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { path: "/path", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, path: "/path", }, }, - elbv2.ProtocolEnumHttps: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttps): { + string(awstypes.ProtocolEnumHttp): { path: "/path", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { path: "/path", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, path: "/path", }, }, - elbv2.ProtocolEnumTcp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTcp): { + string(awstypes.ProtocolEnumHttp): { path: "/path", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { path: "/path", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, path: "/path", }, }, - elbv2.ProtocolEnumTls: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTls): { + string(awstypes.ProtocolEnumHttp): { path: "/path", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { path: "/path", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, path: "/path", }, }, - elbv2.ProtocolEnumUdp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumUdp): { + string(awstypes.ProtocolEnumHttp): { path: "/path", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { path: "/path", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, path: "/path", }, }, - elbv2.ProtocolEnumTcpUdp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTcpUdp): { + string(awstypes.ProtocolEnumHttp): { path: "/path", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { path: "/path", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, path: "/path", }, }, } - for _, protocol := range elbv2.ProtocolEnum_Values() { - if protocol == elbv2.ProtocolEnumGeneve { + for _, protocol := range enum.EnumValues[awstypes.ProtocolEnum]() { + if protocol == awstypes.ProtocolEnumGeneve { continue } - protocol := protocol + protocol := string(protocol) t.Run(protocol, func(t *testing.T) { t.Parallel() @@ -2978,7 +3065,7 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_path(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup step := resource.TestStep{ Config: testAccTargetGroupConfig_Instance_HealthCheck_path(protocol, healthCheckProtocol, tc.path), @@ -3021,97 +3108,97 @@ func TestAccELBV2TargetGroup_Instance_HealthCheck_matcherOutOfRange(t *testing.T matcher string validRange string }{ - elbv2.ProtocolEnumHttp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { + string(awstypes.ProtocolEnumHttp): { matcher: "500", validRange: "200-499", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "500", validRange: "200-499", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "500", }, }, - elbv2.ProtocolEnumHttps: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttps): { + string(awstypes.ProtocolEnumHttp): { matcher: "500", validRange: "200-499", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "500", validRange: "200-499", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "500", }, }, - elbv2.ProtocolEnumTcp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTcp): { + string(awstypes.ProtocolEnumHttp): { matcher: "600", validRange: "200-599", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "600", validRange: "200-599", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "600", }, }, - elbv2.ProtocolEnumTls: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTls): { + string(awstypes.ProtocolEnumHttp): { matcher: "600", validRange: "200-599", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "600", validRange: "200-599", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "600", }, }, - elbv2.ProtocolEnumUdp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumUdp): { + string(awstypes.ProtocolEnumHttp): { matcher: "600", validRange: "200-599", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "600", validRange: "200-599", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "600", }, }, - elbv2.ProtocolEnumTcpUdp: { - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumTcpUdp): { + string(awstypes.ProtocolEnumHttp): { matcher: "600", validRange: "200-599", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "600", validRange: "200-599", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, matcher: "600", }, }, } - for _, protocol := range elbv2.ProtocolEnum_Values() { - if protocol == elbv2.ProtocolEnumGeneve { + for _, protocol := range enum.EnumValues[awstypes.ProtocolEnum]() { + if protocol == awstypes.ProtocolEnumGeneve { continue } - protocol := protocol + protocol := string(protocol) t.Run(protocol, func(t *testing.T) { t.Parallel() @@ -3167,17 +3254,17 @@ func TestAccELBV2TargetGroup_Instance_HealthCheckGeneve_defaults(t *testing.T) { expectedPath string expectedTimeout string }{ - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: "5", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { expectedMatcher: "200-399", expectedPath: "/", expectedTimeout: "5", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { expectedMatcher: "", expectedPath: "", expectedTimeout: "5", @@ -3194,7 +3281,7 @@ func TestAccELBV2TargetGroup_Instance_HealthCheckGeneve_defaults(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3206,7 +3293,7 @@ func TestAccELBV2TargetGroup_Instance_HealthCheckGeneve_defaults(t *testing.T) { Config: testAccTargetGroupConfig_Instance_HealthCheckGeneve_basic(healthCheckProtocol), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, elbv2.ProtocolEnumGeneve), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, string(awstypes.ProtocolEnumGeneve)), resource.TestCheckResourceAttr(resourceName, "health_check.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "health_check.0.enabled", acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", acctest.Ct3), @@ -3236,22 +3323,22 @@ func TestAccELBV2TargetGroup_Instance_HealthCheckGRPC_defaults(t *testing.T) { expectedPath string expectedTimeout string }{ - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { expectedMatcher: "12", expectedPath: "/AWS.ALB/healthcheck", expectedTimeout: "5", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { expectedMatcher: "12", expectedPath: "/AWS.ALB/healthcheck", expectedTimeout: "5", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidHealthCheckProtocol: true, }, } - for _, protocol := range []string{elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps} { + for _, protocol := range enum.Slice(awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps) { protocol := protocol t.Run(protocol, func(t *testing.T) { @@ -3267,7 +3354,7 @@ func TestAccELBV2TargetGroup_Instance_HealthCheckGRPC_defaults(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup step := resource.TestStep{ Config: testAccTargetGroupConfig_Instance_HealhCheckGRPC_basic(protocol, healthCheckProtocol), @@ -3316,19 +3403,19 @@ func TestAccELBV2TargetGroup_Instance_HealthCheckGRPC_path(t *testing.T) { invalidConfig bool path string }{ - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { path: "/path", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { path: "/path", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidConfig: true, path: "/path", }, } - for _, protocol := range []string{elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps} { + for _, protocol := range enum.Slice(awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps) { protocol := protocol t.Run(protocol, func(t *testing.T) { @@ -3344,7 +3431,7 @@ func TestAccELBV2TargetGroup_Instance_HealthCheckGRPC_path(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup step := resource.TestStep{ Config: testAccTargetGroupConfig_Instance_HealhCheckGRPC_path(protocol, healthCheckProtocol, tc.path), @@ -3385,18 +3472,18 @@ func TestAccELBV2TargetGroup_Instance_HealthCheckGRPC_matcherOutOfRange(t *testi invalidHealthCheckProtocol bool matcher string }{ - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { matcher: "101", }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { matcher: "101", }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidHealthCheckProtocol: true, }, } - for _, protocol := range []string{elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps} { + for _, protocol := range enum.Slice(awstypes.ProtocolEnumHttp, awstypes.ProtocolEnumHttps) { protocol := protocol t.Run(protocol, func(t *testing.T) { @@ -3444,31 +3531,31 @@ func TestAccELBV2TargetGroup_Instance_protocolVersion(t *testing.T) { testcases := map[string]struct { validConfig bool }{ - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { validConfig: true, }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { validConfig: true, }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { validConfig: false, }, - elbv2.ProtocolEnumTls: { + string(awstypes.ProtocolEnumTls): { validConfig: false, }, - elbv2.ProtocolEnumUdp: { + string(awstypes.ProtocolEnumUdp): { validConfig: false, }, - elbv2.ProtocolEnumTcpUdp: { + string(awstypes.ProtocolEnumTcpUdp): { validConfig: false, }, } - for _, protocol := range elbv2.ProtocolEnum_Values() { //nolint:paralleltest // false positive - if protocol == elbv2.ProtocolEnumGeneve { + for _, protocol := range enum.EnumValues[awstypes.ProtocolEnum]() { //nolint:paralleltest // false positive + if protocol == awstypes.ProtocolEnumGeneve { continue } - protocol := protocol + protocol := string(protocol) t.Run(protocol, func(t *testing.T) { protocolCase, ok := testcases[protocol] @@ -3477,7 +3564,7 @@ func TestAccELBV2TargetGroup_Instance_protocolVersion(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup step := resource.TestStep{ Config: testAccTargetGroupConfig_Instance_protocolVersion(protocol, "HTTP1"), @@ -3485,13 +3572,13 @@ func TestAccELBV2TargetGroup_Instance_protocolVersion(t *testing.T) { if protocolCase.validConfig { step.Check = resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumInstance), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumInstance)), resource.TestCheckResourceAttr(resourceName, "protocol_version", "HTTP1"), ) } else { step.Check = resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumInstance), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumInstance)), resource.TestCheckResourceAttr(resourceName, "protocol_version", ""), // Should be Null ) } @@ -3517,31 +3604,31 @@ func TestAccELBV2TargetGroup_Instance_protocolVersion_MigrateV0(t *testing.T) { testcases := map[string]struct { validConfig bool }{ - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { validConfig: true, }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { validConfig: true, }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { validConfig: false, }, - elbv2.ProtocolEnumTls: { + string(awstypes.ProtocolEnumTls): { validConfig: false, }, - elbv2.ProtocolEnumUdp: { + string(awstypes.ProtocolEnumUdp): { validConfig: false, }, - elbv2.ProtocolEnumTcpUdp: { + string(awstypes.ProtocolEnumTcpUdp): { validConfig: false, }, } - for _, protocol := range elbv2.ProtocolEnum_Values() { //nolint:paralleltest // false positive - if protocol == elbv2.ProtocolEnumGeneve { + for _, protocol := range enum.EnumValues[awstypes.ProtocolEnum]() { //nolint:paralleltest // false positive + if protocol == awstypes.ProtocolEnumGeneve { continue } - protocol := protocol + protocol := string(protocol) t.Run(protocol, func(t *testing.T) { protocolCase, ok := testcases[protocol] @@ -3550,7 +3637,7 @@ func TestAccELBV2TargetGroup_Instance_protocolVersion_MigrateV0(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup var ( preCheck resource.TestCheckFunc @@ -3559,23 +3646,23 @@ func TestAccELBV2TargetGroup_Instance_protocolVersion_MigrateV0(t *testing.T) { if protocolCase.validConfig { preCheck = resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumInstance), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumInstance)), resource.TestCheckResourceAttr(resourceName, "protocol_version", "HTTP1"), ) postCheck = resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumInstance), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumInstance)), resource.TestCheckResourceAttr(resourceName, "protocol_version", "HTTP1"), ) } else { preCheck = resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumInstance), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumInstance)), resource.TestCheckNoResourceAttr(resourceName, "protocol_version"), ) postCheck = resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumInstance), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumInstance)), resource.TestCheckNoResourceAttr(resourceName, "protocol_version"), ) } @@ -3600,7 +3687,7 @@ func TestAccELBV2TargetGroup_Lambda_defaults(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3612,7 +3699,7 @@ func TestAccELBV2TargetGroup_Lambda_defaults(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_basic(), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, "ipv4"), resource.TestCheckNoResourceAttr(resourceName, names.AttrPort), resource.TestCheckNoResourceAttr(resourceName, names.AttrProtocol), @@ -3630,7 +3717,7 @@ func TestAccELBV2TargetGroup_Lambda_defaults_MigrateV0(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3642,7 +3729,7 @@ func TestAccELBV2TargetGroup_Lambda_defaults_MigrateV0(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_basic(), PreCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, "ipv4"), resource.TestCheckNoResourceAttr(resourceName, names.AttrPort), resource.TestCheckNoResourceAttr(resourceName, names.AttrProtocol), @@ -3653,7 +3740,7 @@ func TestAccELBV2TargetGroup_Lambda_defaults_MigrateV0(t *testing.T) { ), PostCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrIPAddressType, "ipv4"), resource.TestCheckNoResourceAttr(resourceName, names.AttrPort), resource.TestCheckNoResourceAttr(resourceName, names.AttrProtocol), @@ -3670,7 +3757,7 @@ func TestAccELBV2TargetGroup_Lambda_vpc(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3682,7 +3769,7 @@ func TestAccELBV2TargetGroup_Lambda_vpc(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_vpc(), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrVPCID, ""), // Should be Null ), }, @@ -3694,7 +3781,7 @@ func TestAccELBV2TargetGroup_Lambda_vpc_MigrateV0(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3706,12 +3793,12 @@ func TestAccELBV2TargetGroup_Lambda_vpc_MigrateV0(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_vpc(), PreCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttrPair(resourceName, names.AttrVPCID, "aws_vpc.test", names.AttrID), ), PostCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrVPCID, ""), // Should be Null ), }.Steps(), @@ -3723,12 +3810,12 @@ func TestAccELBV2TargetGroup_Lambda_protocol(t *testing.T) { t.Parallel() - for _, protocol := range elbv2.ProtocolEnum_Values() { //nolint:paralleltest // false positive - protocol := protocol + for _, protocol := range enum.EnumValues[awstypes.ProtocolEnum]() { //nolint:paralleltest // false positive + protocol := string(protocol) t.Run(protocol, func(t *testing.T) { ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3740,7 +3827,7 @@ func TestAccELBV2TargetGroup_Lambda_protocol(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_protocol(protocol), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, ""), // Should be Null ), }, @@ -3755,12 +3842,12 @@ func TestAccELBV2TargetGroup_Lambda_protocol_MigrateV0(t *testing.T) { t.Parallel() - for _, protocol := range elbv2.ProtocolEnum_Values() { //nolint:paralleltest // false positive - protocol := protocol + for _, protocol := range enum.EnumValues[awstypes.ProtocolEnum]() { //nolint:paralleltest // false positive + protocol := string(protocol) t.Run(protocol, func(t *testing.T) { ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3772,12 +3859,12 @@ func TestAccELBV2TargetGroup_Lambda_protocol_MigrateV0(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_protocol(protocol), PreCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, protocol), ), PostCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, ""), // Should be Null ), }.Steps(), @@ -3790,7 +3877,7 @@ func TestAccELBV2TargetGroup_Lambda_protocolVersion(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3802,7 +3889,7 @@ func TestAccELBV2TargetGroup_Lambda_protocolVersion(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_protocolVersion("HTTP1"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, "protocol_version", ""), ), }, @@ -3814,7 +3901,7 @@ func TestAccELBV2TargetGroup_Lambda_protocolVersion_MigrateV0(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3826,12 +3913,12 @@ func TestAccELBV2TargetGroup_Lambda_protocolVersion_MigrateV0(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_protocolVersion("GRPC"), PreCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckNoResourceAttr(resourceName, "protocol_version"), ), PostCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckNoResourceAttr(resourceName, "protocol_version"), ), }.Steps(), @@ -3842,7 +3929,7 @@ func TestAccELBV2TargetGroup_Lambda_port(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3854,7 +3941,7 @@ func TestAccELBV2TargetGroup_Lambda_port(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_port("443"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrPort, acctest.Ct0), // Should be Null ), }, @@ -3866,7 +3953,7 @@ func TestAccELBV2TargetGroup_Lambda_port_MigrateV0(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3878,12 +3965,12 @@ func TestAccELBV2TargetGroup_Lambda_port_MigrateV0(t *testing.T) { Config: testAccTargetGroupConfig_Lambda_port("443"), PreCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrPort, "443"), ), PostCheck: resource.ComposeAggregateTestCheckFunc( testAccCheckTargetGroupExists(ctx, resourceName, &targetGroup), - resource.TestCheckResourceAttr(resourceName, "target_type", elbv2.TargetTypeEnumLambda), + resource.TestCheckResourceAttr(resourceName, "target_type", string(awstypes.TargetTypeEnumLambda)), resource.TestCheckResourceAttr(resourceName, names.AttrPort, acctest.Ct0), // Should be Null ), }.Steps(), @@ -3894,7 +3981,7 @@ func TestAccELBV2TargetGroup_Lambda_HealthCheck_basic(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3926,7 +4013,7 @@ func TestAccELBV2TargetGroup_Lambda_HealthCheck_basic_MigrateV0(t *testing.T) { const resourceName = "aws_lb_target_group.test" ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -3975,13 +4062,13 @@ func TestAccELBV2TargetGroup_Lambda_HealthCheck_protocol(t *testing.T) { invalidHealthCheckProtocol bool warning bool }{ - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { warning: true, }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { warning: true, }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidHealthCheckProtocol: true, }, } @@ -3996,7 +4083,7 @@ func TestAccELBV2TargetGroup_Lambda_HealthCheck_protocol(t *testing.T) { } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup step := resource.TestStep{ Config: testAccTargetGroupConfig_Lambda_HealthCheck_protocol(healthCheckProtocol), @@ -4035,13 +4122,13 @@ func TestAccELBV2TargetGroup_Lambda_HealthCheck_protocol_MigrateV0(t *testing.T) invalidHealthCheckProtocol bool warning bool }{ - elbv2.ProtocolEnumHttp: { + string(awstypes.ProtocolEnumHttp): { warning: true, }, - elbv2.ProtocolEnumHttps: { + string(awstypes.ProtocolEnumHttps): { warning: true, }, - elbv2.ProtocolEnumTcp: { + string(awstypes.ProtocolEnumTcp): { invalidHealthCheckProtocol: true, }, } @@ -4056,7 +4143,7 @@ func TestAccELBV2TargetGroup_Lambda_HealthCheck_protocol_MigrateV0(t *testing.T) } ctx := acctest.Context(t) - var targetGroup elbv2.TargetGroup + var targetGroup awstypes.TargetGroup config := testAccTargetGroupConfig_Lambda_HealthCheck_protocol(healthCheckProtocol) @@ -4129,7 +4216,7 @@ func TestAccELBV2TargetGroup_Lambda_HealthCheck_matcherOutOfRange(t *testing.T) func testAccCheckTargetGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lb_target_group" && rs.Type != "aws_alb_target_group" { @@ -4153,14 +4240,14 @@ func testAccCheckTargetGroupDestroy(ctx context.Context) resource.TestCheckFunc } } -func testAccCheckTargetGroupExists(ctx context.Context, n string, v *elbv2.TargetGroup) resource.TestCheckFunc { +func testAccCheckTargetGroupExists(ctx context.Context, n string, v *awstypes.TargetGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) output, err := tfelbv2.FindTargetGroupByARN(ctx, conn, rs.Primary.ID) @@ -4174,9 +4261,9 @@ func testAccCheckTargetGroupExists(ctx context.Context, n string, v *elbv2.Targe } } -func testAccCheckTargetGroupNotRecreated(i, j *elbv2.TargetGroup) resource.TestCheckFunc { +func testAccCheckTargetGroupNotRecreated(i, j *awstypes.TargetGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.TargetGroupArn) != aws.StringValue(j.TargetGroupArn) { + if aws.ToString(i.TargetGroupArn) != aws.ToString(j.TargetGroupArn) { return errors.New("ELBv2 Target Group was recreated") } @@ -4184,9 +4271,9 @@ func testAccCheckTargetGroupNotRecreated(i, j *elbv2.TargetGroup) resource.TestC } } -func testAccCheckTargetGroupRecreated(i, j *elbv2.TargetGroup) resource.TestCheckFunc { +func testAccCheckTargetGroupRecreated(i, j *awstypes.TargetGroup) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.TargetGroupArn) == aws.StringValue(j.TargetGroupArn) { + if aws.ToString(i.TargetGroupArn) == aws.ToString(j.TargetGroupArn) { return errors.New("ELBv2 Target Group was not recreated") } @@ -4891,6 +4978,37 @@ resource "aws_vpc" "test" { `, rName, protocol, enabled) } +func testAccTargetGroupConfig_targetGroupHealthState(rName, targetGroupHealthCount string, targetGroupHealthPercentageEnabled string, unhealthyStateRoutingCount int, unhealthyStateRoutingPercentageEnabled string) string { + return fmt.Sprintf(` +resource "aws_lb_target_group" "test" { + name = %[1]q + port = 80 + protocol = "TCP" + vpc_id = aws_vpc.test.id + + target_group_health { + dns_failover { + minimum_healthy_targets_count = %[2]q + minimum_healthy_targets_percentage = %[3]q + } + + unhealthy_state_routing { + minimum_healthy_targets_count = %[4]d + minimum_healthy_targets_percentage = %[5]q + } + } +} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} +`, rName, targetGroupHealthCount, targetGroupHealthPercentageEnabled, unhealthyStateRoutingCount, unhealthyStateRoutingPercentageEnabled) +} + func testAccTargetGroupConfig_typeTCP(rName string) string { return fmt.Sprintf(` resource "aws_lb_target_group" "test" { diff --git a/internal/service/elbv2/trust_store.go b/internal/service/elbv2/trust_store.go index 2241302f82e..fa038a0ca9d 100644 --- a/internal/service/elbv2/trust_store.go +++ b/internal/service/elbv2/trust_store.go @@ -8,9 +8,10 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -18,6 +19,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -28,9 +30,9 @@ import ( // @SDKResource("aws_lb_trust_store", name="Trust Store") // @Tags(identifierAttribute="id") -// @Testing(existsType="github.com/aws/aws-sdk-go/service/elbv2;elbv2.TrustStore") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types;types.TrustStore") // @Testing(importIgnore="ca_certificates_bundle_s3_bucket;ca_certificates_bundle_s3_key") -func ResourceTrustStore() *schema.Resource { +func resourceTrustStore() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTrustStoreCreate, ReadWithoutTimeout: resourceTrustStoreRead, @@ -98,14 +100,15 @@ func ResourceTrustStore() *schema.Resource { func resourceTrustStoreCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) + partition := meta.(*conns.AWSClient).Partition name := create.NewNameGenerator( create.WithConfiguredName(d.Get(names.AttrName).(string)), create.WithConfiguredPrefix(d.Get(names.AttrNamePrefix).(string)), create.WithDefaultPrefix("tf-"), ).Generate() - input := &elbv2.CreateTrustStoreInput{ + input := &elasticloadbalancingv2.CreateTrustStoreInput{ CaCertificatesBundleS3Bucket: aws.String(d.Get("ca_certificates_bundle_s3_bucket").(string)), CaCertificatesBundleS3Key: aws.String(d.Get("ca_certificates_bundle_s3_key").(string)), Name: aws.String(name), @@ -116,13 +119,13 @@ func resourceTrustStoreCreate(ctx context.Context, d *schema.ResourceData, meta input.CaCertificatesBundleS3ObjectVersion = aws.String(v.(string)) } - output, err := conn.CreateTrustStoreWithContext(ctx, input) + output, err := conn.CreateTrustStore(ctx, input) // Some partitions (e.g. ISO) may not support tag-on-create. - if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if input.Tags != nil && errs.IsUnsupportedOperationInPartitionError(partition, err) { input.Tags = nil - output, err = conn.CreateTrustStoreWithContext(ctx, input) + output, err = conn.CreateTrustStore(ctx, input) } // Tags are not supported on creation with some protocol types(i.e. GENEVE) @@ -130,29 +133,33 @@ func resourceTrustStoreCreate(ctx context.Context, d *schema.ResourceData, meta if input.Tags != nil && tfawserr.ErrMessageContains(err, errCodeValidationError, tagsOnCreationErrMessage) { input.Tags = nil - output, err = conn.CreateTrustStoreWithContext(ctx, input) + output, err = conn.CreateTrustStore(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating ELBv2 Trust Store (%s): %s", name, err) } - d.SetId(aws.StringValue(output.TrustStores[0].TrustStoreArn)) + d.SetId(aws.ToString(output.TrustStores[0].TrustStoreArn)) _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return FindTrustStoreByARN(ctx, conn, d.Id()) + return findTrustStoreByARN(ctx, conn, d.Id()) }) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ELBv2 Trust Store (%s) create: %s", d.Id(), err) } + if _, err := waitTrustStoreActive(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for ELBv2 Trust Store (%s) create: %s", d.Id(), err) + } + // For partitions not supporting tag-on-create, attempt tag after create. if tags := getTagsIn(ctx); input.Tags == nil && len(tags) > 0 { err := createTags(ctx, conn, d.Id(), tags) // If default tags only, continue. Otherwise, error. - if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { + if v, ok := d.GetOk(names.AttrTags); (!ok || len(v.(map[string]interface{})) == 0) && errs.IsUnsupportedOperationInPartitionError(partition, err) { return append(diags, resourceTrustStoreRead(ctx, d, meta)...) } @@ -166,9 +173,9 @@ func resourceTrustStoreCreate(ctx context.Context, d *schema.ResourceData, meta func resourceTrustStoreRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - trustStore, err := FindTrustStoreByARN(ctx, conn, d.Id()) + trustStore, err := findTrustStoreByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Trust Store %s not found, removing from state", d.Id()) @@ -182,17 +189,17 @@ func resourceTrustStoreRead(ctx context.Context, d *schema.ResourceData, meta in d.Set(names.AttrARN, trustStore.TrustStoreArn) d.Set(names.AttrName, trustStore.Name) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.StringValue(trustStore.Name))) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(trustStore.Name))) return diags } func resourceTrustStoreUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { - input := &elbv2.ModifyTrustStoreInput{ + input := &elasticloadbalancingv2.ModifyTrustStoreInput{ CaCertificatesBundleS3Bucket: aws.String(d.Get("ca_certificates_bundle_s3_bucket").(string)), CaCertificatesBundleS3Key: aws.String(d.Get("ca_certificates_bundle_s3_key").(string)), TrustStoreArn: aws.String(d.Id()), @@ -202,7 +209,7 @@ func resourceTrustStoreUpdate(ctx context.Context, d *schema.ResourceData, meta input.CaCertificatesBundleS3ObjectVersion = aws.String(v.(string)) } - _, err := conn.ModifyTrustStoreWithContext(ctx, input) + _, err := conn.ModifyTrustStore(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Trust Store (%s): %s", d.Id(), err) @@ -214,18 +221,22 @@ func resourceTrustStoreUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceTrustStoreDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) if err := waitForNoTrustStoreAssociations(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for ELBV2 Trust Store (%s) associations delete: %s", d.Id(), err) } log.Printf("[DEBUG] Deleting ELBv2 Trust Store: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, d.Timeout(schema.TimeoutDelete), func() (interface{}, error) { - return conn.DeleteTrustStoreWithContext(ctx, &elbv2.DeleteTrustStoreInput{ + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.TrustStoreInUseException](ctx, d.Timeout(schema.TimeoutDelete), func() (interface{}, error) { + return conn.DeleteTrustStore(ctx, &elasticloadbalancingv2.DeleteTrustStoreInput{ TrustStoreArn: aws.String(d.Id()), }) - }, elbv2.ErrCodeTrustStoreInUseException, "is currently in use by a listener") + }, "is currently in use by a listener") + + if errs.IsA[*awstypes.TrustStoreNotFoundException](err) { + return diags + } if err != nil { return sdkdiag.AppendErrorf(diags, "deleting ELBv2 Trust Store (%s): %s", d.Id(), err) @@ -234,9 +245,9 @@ func resourceTrustStoreDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func FindTrustStoreByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (*elbv2.TrustStore, error) { - input := &elbv2.DescribeTrustStoresInput{ - TrustStoreArns: aws.StringSlice([]string{arn}), +func findTrustStoreByARN(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) (*awstypes.TrustStore, error) { + input := &elasticloadbalancingv2.DescribeTrustStoresInput{ + TrustStoreArns: []string{arn}, } output, err := findTrustStore(ctx, conn, input) @@ -245,7 +256,7 @@ func FindTrustStoreByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (*e } // Eventual consistency check. - if aws.StringValue(output.TrustStoreArn) != arn { + if aws.ToString(output.TrustStoreArn) != arn { return nil, &retry.NotFoundError{ LastRequest: input, } @@ -254,86 +265,111 @@ func FindTrustStoreByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (*e return output, nil } -func findTrustStore(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTrustStoresInput) (*elbv2.TrustStore, error) { +func findTrustStore(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTrustStoresInput) (*awstypes.TrustStore, error) { output, err := findTrustStores(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findTrustStores(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTrustStoresInput) ([]*elbv2.TrustStore, error) { - var output []*elbv2.TrustStore +func findTrustStores(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTrustStoresInput) ([]awstypes.TrustStore, error) { + var output []awstypes.TrustStore - err := conn.DescribeTrustStoresPagesWithContext(ctx, input, func(page *elbv2.DescribeTrustStoresOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticloadbalancingv2.NewDescribeTrustStoresPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.TrustStores { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.TrustStoreNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTrustStoreNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.TrustStores...) } return output, nil } -func findTrustStoreAssociations(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTrustStoreAssociationsInput) ([]*elbv2.TrustStoreAssociation, error) { - var output []*elbv2.TrustStoreAssociation +func statusTrustStore(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findTrustStoreByARN(ctx, conn, arn) - err := conn.DescribeTrustStoreAssociationsPagesWithContext(ctx, input, func(page *elbv2.DescribeTrustStoreAssociationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + if tfresource.NotFound(err) { + return nil, "", nil } - for _, v := range page.TrustStoreAssociations { - if v != nil { - output = append(output, v) - } + if err != nil { + return nil, "", err } - return !lastPage - }) + return output, string(output.Status), nil + } +} - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTrustStoreNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } +func waitTrustStoreActive(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string, timeout time.Duration) (*awstypes.TrustStore, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.TrustStoreStatusCreating), + Target: enum.Slice(awstypes.TrustStoreStatusActive), + Refresh: statusTrustStore(ctx, conn, arn), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, } - if err != nil { - return nil, err + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.TrustStore); ok { + return output, err + } + + return nil, err +} + +func findTrustStoreAssociations(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTrustStoreAssociationsInput) ([]awstypes.TrustStoreAssociation, error) { + var output []awstypes.TrustStoreAssociation + + pages := elasticloadbalancingv2.NewDescribeTrustStoreAssociationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.TrustStoreNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + output = append(output, page.TrustStoreAssociations...) } return output, nil } -func waitForNoTrustStoreAssociations(ctx context.Context, conn *elbv2.ELBV2, arn string, timeout time.Duration) error { - input := &elbv2.DescribeTrustStoreAssociationsInput{ +func waitForNoTrustStoreAssociations(ctx context.Context, conn *elasticloadbalancingv2.Client, arn string, timeout time.Duration) error { + input := &elasticloadbalancingv2.DescribeTrustStoreAssociationsInput{ TrustStoreArn: aws.String(arn), } _, err := tfresource.RetryUntilEqual(ctx, timeout, 0, func() (int, error) { associations, err := findTrustStoreAssociations(ctx, conn, input) + if tfresource.NotFound(err) { + return 0, nil + } + if err != nil { return 0, err } diff --git a/internal/service/elbv2/trust_store_data_source.go b/internal/service/elbv2/trust_store_data_source.go index 60f7bee9856..2ec5a9e76a2 100644 --- a/internal/service/elbv2/trust_store_data_source.go +++ b/internal/service/elbv2/trust_store_data_source.go @@ -6,8 +6,8 @@ package elbv2 import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -17,7 +17,7 @@ import ( ) // @SDKDataSource("aws_lb_trust_store", name="Trust Store") -func DataSourceTrustStore() *schema.Resource { +func dataSourceTrustStore() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceTrustStoreRead, @@ -38,14 +38,14 @@ func DataSourceTrustStore() *schema.Resource { func dataSourceTrustStoreRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) - input := &elbv2.DescribeTrustStoresInput{} + input := &elasticloadbalancingv2.DescribeTrustStoresInput{} if v, ok := d.GetOk(names.AttrARN); ok { - input.TrustStoreArns = aws.StringSlice([]string{v.(string)}) + input.TrustStoreArns = []string{v.(string)} } else if v, ok := d.GetOk(names.AttrName); ok { - input.Names = aws.StringSlice([]string{v.(string)}) + input.Names = []string{v.(string)} } trustStore, err := findTrustStore(ctx, conn, input) @@ -54,7 +54,7 @@ func dataSourceTrustStoreRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("ELBv2 Trust Store", err)) } - d.SetId(aws.StringValue(trustStore.TrustStoreArn)) + d.SetId(aws.ToString(trustStore.TrustStoreArn)) d.Set(names.AttrARN, trustStore.TrustStoreArn) d.Set(names.AttrName, trustStore.Name) diff --git a/internal/service/elbv2/trust_store_revocation.go b/internal/service/elbv2/trust_store_revocation.go index ca39bb8ee9f..f94369b3baf 100644 --- a/internal/service/elbv2/trust_store_revocation.go +++ b/internal/service/elbv2/trust_store_revocation.go @@ -9,9 +9,9 @@ import ( "strconv" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -25,7 +25,7 @@ import ( ) // @SDKResource("aws_lb_trust_store_revocation", name="Trust Store Revocation") -func ResourceTrustStoreRevocation() *schema.Resource { +func resourceTrustStoreRevocation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTrustStoreRevocationCreate, ReadWithoutTimeout: resourceTrustStoreRevocationRead, @@ -78,13 +78,13 @@ const ( func resourceTrustStoreRevocationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) s3Bucket := d.Get("revocations_s3_bucket").(string) s3Key := d.Get("revocations_s3_key").(string) trustStoreARN := d.Get("trust_store_arn").(string) - input := &elbv2.AddTrustStoreRevocationsInput{ - RevocationContents: []*elbv2.RevocationContent{{ + input := &elasticloadbalancingv2.AddTrustStoreRevocationsInput{ + RevocationContents: []awstypes.RevocationContent{{ S3Bucket: aws.String(s3Bucket), S3Key: aws.String(s3Key), }}, @@ -95,19 +95,19 @@ func resourceTrustStoreRevocationCreate(ctx context.Context, d *schema.ResourceD input.RevocationContents[0].S3ObjectVersion = aws.String(v.(string)) } - output, err := conn.AddTrustStoreRevocationsWithContext(ctx, input) + output, err := conn.AddTrustStoreRevocations(ctx, input) if err != nil { sdkdiag.AppendErrorf(diags, "creating ELBv2 Trust Store (%s) Revocation (s3://%s/%s): %s", trustStoreARN, s3Bucket, s3Key, err) } - revocationID := aws.Int64Value(output.TrustStoreRevocations[0].RevocationId) + revocationID := aws.ToInt64(output.TrustStoreRevocations[0].RevocationId) id := errs.Must(flex.FlattenResourceId([]string{trustStoreARN, strconv.FormatInt(revocationID, 10)}, trustStoreRevocationResourceIDPartCount, false)) d.SetId(id) _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return FindTrustStoreRevocationByTwoPartKey(ctx, conn, trustStoreARN, revocationID) + return findTrustStoreRevocationByTwoPartKey(ctx, conn, trustStoreARN, revocationID) }) if err != nil { @@ -119,7 +119,7 @@ func resourceTrustStoreRevocationCreate(ctx context.Context, d *schema.ResourceD func resourceTrustStoreRevocationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) parts, err := flex.ExpandResourceId(d.Id(), trustStoreRevocationResourceIDPartCount, false) if err != nil { @@ -128,7 +128,7 @@ func resourceTrustStoreRevocationRead(ctx context.Context, d *schema.ResourceDat trustStoreARN := parts[0] revocationID := errs.Must(strconv.ParseInt(parts[1], 10, 64)) - revocation, err := FindTrustStoreRevocationByTwoPartKey(ctx, conn, trustStoreARN, revocationID) + revocation, err := findTrustStoreRevocationByTwoPartKey(ctx, conn, trustStoreARN, revocationID) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Trust Store Revocation %s not found, removing from state", d.Id()) @@ -148,7 +148,7 @@ func resourceTrustStoreRevocationRead(ctx context.Context, d *schema.ResourceDat func resourceTrustStoreRevocationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + conn := meta.(*conns.AWSClient).ELBV2Client(ctx) parts, err := flex.ExpandResourceId(d.Id(), trustStoreRevocationResourceIDPartCount, false) if err != nil { @@ -159,8 +159,8 @@ func resourceTrustStoreRevocationDelete(ctx context.Context, d *schema.ResourceD revocationID := errs.Must(strconv.ParseInt(parts[1], 10, 64)) log.Printf("[DEBUG] Deleting ELBv2 Trust Store Revocation: %s", d.Id()) - _, err = conn.RemoveTrustStoreRevocationsWithContext(ctx, &elbv2.RemoveTrustStoreRevocationsInput{ - RevocationIds: aws.Int64Slice([]int64{revocationID}), + _, err = conn.RemoveTrustStoreRevocations(ctx, &elasticloadbalancingv2.RemoveTrustStoreRevocationsInput{ + RevocationIds: []int64{revocationID}, TrustStoreArn: aws.String(trustStoreARN), }) @@ -171,9 +171,9 @@ func resourceTrustStoreRevocationDelete(ctx context.Context, d *schema.ResourceD return diags } -func FindTrustStoreRevocationByTwoPartKey(ctx context.Context, conn *elbv2.ELBV2, trustStoreARN string, revocationID int64) (*elbv2.DescribeTrustStoreRevocation, error) { - input := &elbv2.DescribeTrustStoreRevocationsInput{ - RevocationIds: aws.Int64Slice([]int64{revocationID}), +func findTrustStoreRevocationByTwoPartKey(ctx context.Context, conn *elasticloadbalancingv2.Client, trustStoreARN string, revocationID int64) (*awstypes.DescribeTrustStoreRevocation, error) { + input := &elasticloadbalancingv2.DescribeTrustStoreRevocationsInput{ + RevocationIds: []int64{revocationID}, TrustStoreArn: aws.String(trustStoreARN), } output, err := findTrustStoreRevocation(ctx, conn, input) @@ -183,7 +183,7 @@ func FindTrustStoreRevocationByTwoPartKey(ctx context.Context, conn *elbv2.ELBV2 } // Eventual consistency check. - if aws.StringValue(output.TrustStoreArn) != trustStoreARN || aws.Int64Value(output.RevocationId) != revocationID { + if aws.ToString(output.TrustStoreArn) != trustStoreARN || aws.ToInt64(output.RevocationId) != revocationID { return nil, &retry.NotFoundError{ LastRequest: input, } @@ -192,42 +192,35 @@ func FindTrustStoreRevocationByTwoPartKey(ctx context.Context, conn *elbv2.ELBV2 return output, nil } -func findTrustStoreRevocation(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTrustStoreRevocationsInput) (*elbv2.DescribeTrustStoreRevocation, error) { +func findTrustStoreRevocation(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTrustStoreRevocationsInput) (*awstypes.DescribeTrustStoreRevocation, error) { output, err := findTrustStoreRevocations(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertSingleValueResult(output) } -func findTrustStoreRevocations(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTrustStoreRevocationsInput) ([]*elbv2.DescribeTrustStoreRevocation, error) { - var output []*elbv2.DescribeTrustStoreRevocation +func findTrustStoreRevocations(ctx context.Context, conn *elasticloadbalancingv2.Client, input *elasticloadbalancingv2.DescribeTrustStoreRevocationsInput) ([]awstypes.DescribeTrustStoreRevocation, error) { + var output []awstypes.DescribeTrustStoreRevocation - err := conn.DescribeTrustStoreRevocationsPagesWithContext(ctx, input, func(page *elbv2.DescribeTrustStoreRevocationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := elasticloadbalancingv2.NewDescribeTrustStoreRevocationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.TrustStoreRevocations { - if v != nil { - output = append(output, v) + if errs.IsA[*awstypes.TrustStoreNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTrustStoreNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.TrustStoreRevocations...) } return output, nil diff --git a/internal/service/elbv2/trust_store_revocation_test.go b/internal/service/elbv2/trust_store_revocation_test.go index 71cac88ad54..ef83b712297 100644 --- a/internal/service/elbv2/trust_store_revocation_test.go +++ b/internal/service/elbv2/trust_store_revocation_test.go @@ -9,7 +9,7 @@ import ( "strconv" "testing" - "github.com/aws/aws-sdk-go/service/elbv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccELBV2TrustStoreRevocation_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.DescribeTrustStoreRevocation + var conf awstypes.DescribeTrustStoreRevocation resourceName := "aws_lb_trust_store_revocation.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -49,14 +49,14 @@ func TestAccELBV2TrustStoreRevocation_basic(t *testing.T) { }) } -func testAccCheckTrustStoreRevocationExists(ctx context.Context, n string, v *elbv2.DescribeTrustStoreRevocation) resource.TestCheckFunc { +func testAccCheckTrustStoreRevocationExists(ctx context.Context, n string, v *awstypes.DescribeTrustStoreRevocation) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) trustStoreARN := rs.Primary.Attributes["trust_store_arn"] revocationID, err := strconv.ParseInt(rs.Primary.Attributes["revocation_id"], 10, 64) @@ -79,7 +79,7 @@ func testAccCheckTrustStoreRevocationExists(ctx context.Context, n string, v *el func testAccCheckTrustStoreRevocationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lb_trust_store_revocation" { diff --git a/internal/service/elbv2/trust_store_tags_gen_test.go b/internal/service/elbv2/trust_store_tags_gen_test.go index 428671ab5fc..45b1c640b29 100644 --- a/internal/service/elbv2/trust_store_tags_gen_test.go +++ b/internal/service/elbv2/trust_store_tags_gen_test.go @@ -5,7 +5,7 @@ package elbv2_test import ( "testing" - "github.com/aws/aws-sdk-go/service/elbv2" + "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" "github.com/hashicorp/terraform-plugin-testing/config" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -19,7 +19,7 @@ import ( func TestAccELBV2TrustStore_tags(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -202,7 +202,7 @@ func TestAccELBV2TrustStore_tags(t *testing.T) { func TestAccELBV2TrustStore_tags_null(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -265,7 +265,7 @@ func TestAccELBV2TrustStore_tags_null(t *testing.T) { func TestAccELBV2TrustStore_tags_AddOnUpdate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -345,7 +345,7 @@ func TestAccELBV2TrustStore_tags_AddOnUpdate(t *testing.T) { func TestAccELBV2TrustStore_tags_EmptyTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -436,7 +436,7 @@ func TestAccELBV2TrustStore_tags_EmptyTag_OnCreate(t *testing.T) { func TestAccELBV2TrustStore_tags_EmptyTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -569,7 +569,7 @@ func TestAccELBV2TrustStore_tags_EmptyTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2TrustStore_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -655,7 +655,7 @@ func TestAccELBV2TrustStore_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_providerOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -848,7 +848,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_providerOnly(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_nonOverlapping(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1017,7 +1017,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_nonOverlapping(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_overlapping(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1202,7 +1202,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_overlapping(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_updateToProviderOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1295,7 +1295,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_updateToProviderOnly(t *testing.T) func TestAccELBV2TrustStore_tags_DefaultTags_updateToResourceOnly(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1387,7 +1387,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_updateToResourceOnly(t *testing.T) func TestAccELBV2TrustStore_tags_DefaultTags_emptyResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1455,7 +1455,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_emptyResourceTag(t *testing.T) { func TestAccELBV2TrustStore_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1515,7 +1515,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) func TestAccELBV2TrustStore_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1580,7 +1580,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_nullOverlappingResourceTag(t *testi func TestAccELBV2TrustStore_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1645,7 +1645,7 @@ func TestAccELBV2TrustStore_tags_DefaultTags_nullNonOverlappingResourceTag(t *te func TestAccELBV2TrustStore_tags_ComputedTag_OnCreate(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1702,7 +1702,7 @@ func TestAccELBV2TrustStore_tags_ComputedTag_OnCreate(t *testing.T) { func TestAccELBV2TrustStore_tags_ComputedTag_OnUpdate_Add(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1795,7 +1795,7 @@ func TestAccELBV2TrustStore_tags_ComputedTag_OnUpdate_Add(t *testing.T) { func TestAccELBV2TrustStore_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { ctx := acctest.Context(t) - var v elbv2.TrustStore + var v types.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) diff --git a/internal/service/elbv2/trust_store_test.go b/internal/service/elbv2/trust_store_test.go index 812d599ff27..a4359078d8b 100644 --- a/internal/service/elbv2/trust_store_test.go +++ b/internal/service/elbv2/trust_store_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/elbv2" + awstypes "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +22,7 @@ import ( func TestAccELBV2TrustStore_basic(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TrustStore + var conf awstypes.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -53,7 +53,7 @@ func TestAccELBV2TrustStore_basic(t *testing.T) { func TestAccELBV2TrustStore_disappears(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TrustStore + var conf awstypes.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -77,7 +77,7 @@ func TestAccELBV2TrustStore_disappears(t *testing.T) { func TestAccELBV2TrustStore_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TrustStore + var conf awstypes.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -106,7 +106,7 @@ func TestAccELBV2TrustStore_nameGenerated(t *testing.T) { func TestAccELBV2TrustStore_namePrefix(t *testing.T) { ctx := acctest.Context(t) - var conf elbv2.TrustStore + var conf awstypes.TrustStore resourceName := "aws_lb_trust_store.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -133,14 +133,14 @@ func TestAccELBV2TrustStore_namePrefix(t *testing.T) { }) } -func testAccCheckTrustStoreExists(ctx context.Context, n string, v *elbv2.TrustStore) resource.TestCheckFunc { +func testAccCheckTrustStoreExists(ctx context.Context, n string, v *awstypes.TrustStore) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) output, err := tfelbv2.FindTrustStoreByARN(ctx, conn, rs.Primary.ID) @@ -156,7 +156,7 @@ func testAccCheckTrustStoreExists(ctx context.Context, n string, v *elbv2.TrustS func testAccCheckTrustStoreDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_lb_trust_store" { diff --git a/internal/service/elbv2/validate.go b/internal/service/elbv2/validate.go index efae650391a..ee11772e6f7 100644 --- a/internal/service/elbv2/validate.go +++ b/internal/service/elbv2/validate.go @@ -5,6 +5,7 @@ package elbv2 import ( "fmt" + "strconv" "github.com/YakDriver/regexache" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" @@ -99,3 +100,29 @@ func validTargetGroupNamePrefix(v interface{}, k string) (ws []string, errors [] } return } + +func validTargetGroupHealthInput(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if value != "off" { + _, err := strconv.Atoi(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q must be an integer or 'off'", k)) + } + } + return +} + +func validTargetGroupHealthPercentageInput(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if value != "off" { + intValue, err := strconv.Atoi(value) + if err != nil || intValue < 1 || intValue > 100 { + errors = append(errors, fmt.Errorf( + "%q must be an integer between 0 and 100 or 'off'", k)) + } + } + return +} diff --git a/internal/service/emr/service_endpoint_resolver_gen.go b/internal/service/emr/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..2ab4e13c2d8 --- /dev/null +++ b/internal/service/emr/service_endpoint_resolver_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package emr + +import ( + "context" + "fmt" + "net" + "net/url" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + emr_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emr" + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} + +var _ emr_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver emr_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: emr_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params emr_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up emr endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*emr_sdkv2.Options) { + return func(o *emr_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/emr/service_endpoints_gen_test.go b/internal/service/emr/service_endpoints_gen_test.go index a0f3a025c5b..db0da56e3d8 100644 --- a/internal/service/emr/service_endpoints_gen_test.go +++ b/internal/service/emr/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -255,24 +257,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }) } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := emr_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), emr_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := emr_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), emr_sdkv2.EndpointParameters{ @@ -280,14 +282,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -364,16 +366,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/emr/service_package_gen.go b/internal/service/emr/service_package_gen.go index db86d6e7074..43ac84ff7ac 100644 --- a/internal/service/emr/service_package_gen.go +++ b/internal/service/emr/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package emr @@ -8,7 +8,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" emr_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emr" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" emr_sdkv1 "github.com/aws/aws-sdk-go/service/emr" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -108,11 +107,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*e "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return emr_sdkv1.New(sess.Copy(&cfg)), nil @@ -122,19 +118,10 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*e func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*emr_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return emr_sdkv2.NewFromConfig(cfg, func(o *emr_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return emr_sdkv2.NewFromConfig(cfg, + emr_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/emrcontainers/service_endpoint_resolver_gen.go b/internal/service/emrcontainers/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..9f03022d390 --- /dev/null +++ b/internal/service/emrcontainers/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package emrcontainers + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/emrcontainers/service_endpoints_gen_test.go b/internal/service/emrcontainers/service_endpoints_gen_test.go index 4360c641ce0..5acb2473bcd 100644 --- a/internal/service/emrcontainers/service_endpoints_gen_test.go +++ b/internal/service/emrcontainers/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(emrcontainers_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(emrcontainers_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/emrcontainers/service_package_gen.go b/internal/service/emrcontainers/service_package_gen.go index 1a4f65b054d..f247f7faa4f 100644 --- a/internal/service/emrcontainers/service_package_gen.go +++ b/internal/service/emrcontainers/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package emrcontainers @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" emrcontainers_sdkv1 "github.com/aws/aws-sdk-go/service/emrcontainers" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -70,11 +69,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*e "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return emrcontainers_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/emrserverless/application.go b/internal/service/emrserverless/application.go index 0c2b1b2cb9b..996af4ed4f7 100644 --- a/internal/service/emrserverless/application.go +++ b/internal/service/emrserverless/application.go @@ -155,6 +155,26 @@ func resourceApplication() *schema.Resource { }, }, }, + "interactive_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "livy_endpoint_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "studio_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + }, + }, + }, "maximum_capacity": { Type: schema.TypeList, Optional: true, @@ -258,6 +278,10 @@ func resourceApplicationCreate(ctx context.Context, d *schema.ResourceData, meta input.InitialCapacity = expandInitialCapacity(v.(*schema.Set)) } + if v, ok := d.GetOk("interactive_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.InteractiveConfiguration = expandInteractiveConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + if v, ok := d.GetOk("maximum_capacity"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.MaximumCapacity = expandMaximumCapacity(v.([]interface{})[0].(map[string]interface{})) } @@ -319,6 +343,10 @@ func resourceApplicationRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "setting initial_capacity: %s", err) } + if err := d.Set("interactive_configuration", []interface{}{flattenInteractiveConfiguration(application.InteractiveConfiguration)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting interactive_configuration: %s", err) + } + if err := d.Set("maximum_capacity", []interface{}{flattenMaximumCapacity(application.MaximumCapacity)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting maximum_capacity: %s", err) } @@ -362,6 +390,10 @@ func resourceApplicationUpdate(ctx context.Context, d *schema.ResourceData, meta input.InitialCapacity = expandInitialCapacity(v.(*schema.Set)) } + if v, ok := d.GetOk("interactive_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.InteractiveConfiguration = expandInteractiveConfiguration(v.([]interface{})[0].(map[string]interface{})) + } + if v, ok := d.GetOk("maximum_capacity"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.MaximumCapacity = expandMaximumCapacity(v.([]interface{})[0].(map[string]interface{})) } @@ -573,6 +605,42 @@ func flattenAutoStopConfig(apiObject *types.AutoStopConfig) map[string]interface return tfMap } +func expandInteractiveConfiguration(tfMap map[string]interface{}) *types.InteractiveConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &types.InteractiveConfiguration{} + + if v, ok := tfMap["livy_endpoint_enabled"].(bool); ok { + apiObject.LivyEndpointEnabled = aws.Bool(v) + } + + if v, ok := tfMap["studio_enabled"].(bool); ok { + apiObject.StudioEnabled = aws.Bool(v) + } + + return apiObject +} + +func flattenInteractiveConfiguration(apiObject *types.InteractiveConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.LivyEndpointEnabled; v != nil { + tfMap["livy_endpoint_enabled"] = aws.ToBool(v) + } + + if v := apiObject.StudioEnabled; v != nil { + tfMap["studio_enabled"] = aws.ToBool(v) + } + + return tfMap +} + func expandMaximumCapacity(tfMap map[string]interface{}) *types.MaximumAllowedResources { if tfMap == nil { return nil diff --git a/internal/service/emrserverless/application_test.go b/internal/service/emrserverless/application_test.go index 58499d1f5d2..a0317904a3c 100644 --- a/internal/service/emrserverless/application_test.go +++ b/internal/service/emrserverless/application_test.go @@ -230,6 +230,63 @@ func TestAccEMRServerlessApplication_imageConfiguration(t *testing.T) { }) } +func TestAccEMRServerlessApplication_interactiveConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var application types.Application + resourceName := "aws_emrserverless_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.EMRServerlessServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccApplicationConfig_interactiveConfiguration(rName, true, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.livy_endpoint_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.studio_enabled", acctest.CtTrue), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccApplicationConfig_interactiveConfiguration(rName, true, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.livy_endpoint_enabled", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.studio_enabled", acctest.CtFalse), + ), + }, + { + Config: testAccApplicationConfig_interactiveConfiguration(rName, false, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.livy_endpoint_enabled", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.0.studio_enabled", acctest.CtTrue), + ), + }, + { + Config: testAccApplicationConfig_interactiveConfiguration(rName, false, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &application), + resource.TestCheckResourceAttr(resourceName, "interactive_configuration.#", acctest.Ct1), + resource.TestCheckNoResourceAttr(resourceName, "interactive_configuration.0.livy_endpoint_enabled"), + resource.TestCheckNoResourceAttr(resourceName, "interactive_configuration.0.studio_enabled"), + ), + }, + }, + }) +} + func TestAccEMRServerlessApplication_maxCapacity(t *testing.T) { ctx := acctest.Context(t) var application types.Application @@ -460,6 +517,20 @@ resource "aws_emrserverless_application" "test" { `, rName, cpu) } +func testAccApplicationConfig_interactiveConfiguration(rName string, livyEndpointEnabled, studioEnabled bool) string { + return fmt.Sprintf(` +resource "aws_emrserverless_application" "test" { + name = %[1]q + release_label = "emr-7.1.0" + type = "spark" + interactive_configuration { + livy_endpoint_enabled = %[2]t + studio_enabled = %[3]t + } +} +`, rName, livyEndpointEnabled, studioEnabled) +} + func testAccApplicationConfig_maxCapacity(rName, cpu string) string { return fmt.Sprintf(` resource "aws_emrserverless_application" "test" { diff --git a/internal/service/emrserverless/service_endpoint_resolver_gen.go b/internal/service/emrserverless/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..1e5a0a357fc --- /dev/null +++ b/internal/service/emrserverless/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package emrserverless + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ emrserverless_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver emrserverless_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: emrserverless_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params emrserverless_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up emrserverless endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*emrserverless_sdkv2.Options) { + return func(o *emrserverless_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/emrserverless/service_endpoints_gen_test.go b/internal/service/emrserverless/service_endpoints_gen_test.go index 3248697ff8c..a86c2c6c35b 100644 --- a/internal/service/emrserverless/service_endpoints_gen_test.go +++ b/internal/service/emrserverless/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := emrserverless_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), emrserverless_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := emrserverless_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), emrserverless_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/emrserverless/service_package_gen.go b/internal/service/emrserverless/service_package_gen.go index ba546201fb0..92fdfd78ebd 100644 --- a/internal/service/emrserverless/service_package_gen.go +++ b/internal/service/emrserverless/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package emrserverless @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" emrserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/emrserverless" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*emrserverless_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return emrserverless_sdkv2.NewFromConfig(cfg, func(o *emrserverless_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return emrserverless_sdkv2.NewFromConfig(cfg, + emrserverless_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/events/service_endpoint_resolver_gen.go b/internal/service/events/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..3506ec51991 --- /dev/null +++ b/internal/service/events/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package events + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + eventbridge_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eventbridge" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ eventbridge_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver eventbridge_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: eventbridge_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params eventbridge_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up eventbridge endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*eventbridge_sdkv2.Options) { + return func(o *eventbridge_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/events/service_endpoints_gen_test.go b/internal/service/events/service_endpoints_gen_test.go index 938cab7f209..de3aa35c867 100644 --- a/internal/service/events/service_endpoints_gen_test.go +++ b/internal/service/events/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -93,7 +95,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -333,7 +335,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -354,24 +356,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := eventbridge_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), eventbridge_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := eventbridge_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), eventbridge_sdkv2.EndpointParameters{ @@ -379,14 +381,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -478,16 +480,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/events/service_package_gen.go b/internal/service/events/service_package_gen.go index 044088c642a..e0cca987a99 100644 --- a/internal/service/events/service_package_gen.go +++ b/internal/service/events/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package events @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" eventbridge_sdkv2 "github.com/aws/aws-sdk-go-v2/service/eventbridge" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -107,19 +106,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*eventbridge_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return eventbridge_sdkv2.NewFromConfig(cfg, func(o *eventbridge_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return eventbridge_sdkv2.NewFromConfig(cfg, + eventbridge_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/events/tags_gen.go b/internal/service/events/tags_gen.go index 844769c9461..4a6ac768d54 100644 --- a/internal/service/events/tags_gen.go +++ b/internal/service/events/tags_gen.go @@ -98,12 +98,12 @@ func setTagsOut(ctx context.Context, tags []awstypes.Tag) { } // createTags creates events service tags for new resources. -func createTags(ctx context.Context, conn *eventbridge.Client, identifier string, tags []awstypes.Tag) error { +func createTags(ctx context.Context, conn *eventbridge.Client, identifier string, tags []awstypes.Tag, optFns ...func(*eventbridge.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates events service tags. diff --git a/internal/service/evidently/service_endpoint_resolver_gen.go b/internal/service/evidently/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ef4156707ad --- /dev/null +++ b/internal/service/evidently/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package evidently + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + evidently_sdkv2 "github.com/aws/aws-sdk-go-v2/service/evidently" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ evidently_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver evidently_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: evidently_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params evidently_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up evidently endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*evidently_sdkv2.Options) { + return func(o *evidently_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/evidently/service_endpoints_gen_test.go b/internal/service/evidently/service_endpoints_gen_test.go index f198cf0cf70..6d6e4e5f1c5 100644 --- a/internal/service/evidently/service_endpoints_gen_test.go +++ b/internal/service/evidently/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := evidently_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), evidently_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := evidently_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), evidently_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/evidently/service_package_gen.go b/internal/service/evidently/service_package_gen.go index 8b7b79cd644..df2ecbd1b68 100644 --- a/internal/service/evidently/service_package_gen.go +++ b/internal/service/evidently/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package evidently @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" evidently_sdkv2 "github.com/aws/aws-sdk-go-v2/service/evidently" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -72,19 +71,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*evidently_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return evidently_sdkv2.NewFromConfig(cfg, func(o *evidently_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return evidently_sdkv2.NewFromConfig(cfg, + evidently_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/finspace/service_endpoint_resolver_gen.go b/internal/service/finspace/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ee6aad2fed0 --- /dev/null +++ b/internal/service/finspace/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package finspace + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + finspace_sdkv2 "github.com/aws/aws-sdk-go-v2/service/finspace" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ finspace_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver finspace_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: finspace_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params finspace_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up finspace endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*finspace_sdkv2.Options) { + return func(o *finspace_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/finspace/service_endpoints_gen_test.go b/internal/service/finspace/service_endpoints_gen_test.go index 1483c5ecfc0..600018888c8 100644 --- a/internal/service/finspace/service_endpoints_gen_test.go +++ b/internal/service/finspace/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := finspace_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), finspace_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := finspace_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), finspace_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index b54cbb18723..a08f0926312 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package finspace @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" finspace_sdkv2 "github.com/aws/aws-sdk-go-v2/service/finspace" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -96,19 +95,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*finspace_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return finspace_sdkv2.NewFromConfig(cfg, func(o *finspace_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return finspace_sdkv2.NewFromConfig(cfg, + finspace_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/finspace/tags_gen.go b/internal/service/finspace/tags_gen.go index 8abb0deb456..f495ece6f6f 100644 --- a/internal/service/finspace/tags_gen.go +++ b/internal/service/finspace/tags_gen.go @@ -80,12 +80,12 @@ func setTagsOut(ctx context.Context, tags map[string]string) { } // createTags creates finspace service tags for new resources. -func createTags(ctx context.Context, conn *finspace.Client, identifier string, tags map[string]string) error { +func createTags(ctx context.Context, conn *finspace.Client, identifier string, tags map[string]string, optFns ...func(*finspace.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, tags) + return updateTags(ctx, conn, identifier, nil, tags, optFns...) } // updateTags updates finspace service tags. diff --git a/internal/service/firehose/service_endpoint_resolver_gen.go b/internal/service/firehose/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..35ce6c8e9cf --- /dev/null +++ b/internal/service/firehose/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package firehose + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + firehose_sdkv2 "github.com/aws/aws-sdk-go-v2/service/firehose" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ firehose_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver firehose_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: firehose_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params firehose_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up firehose endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*firehose_sdkv2.Options) { + return func(o *firehose_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/firehose/service_endpoints_gen_test.go b/internal/service/firehose/service_endpoints_gen_test.go index ac99e42e4f4..43e377a9cb0 100644 --- a/internal/service/firehose/service_endpoints_gen_test.go +++ b/internal/service/firehose/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := firehose_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), firehose_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := firehose_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), firehose_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/firehose/service_package_gen.go b/internal/service/firehose/service_package_gen.go index ea996fe2382..b1de02419a0 100644 --- a/internal/service/firehose/service_package_gen.go +++ b/internal/service/firehose/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package firehose @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" firehose_sdkv2 "github.com/aws/aws-sdk-go-v2/service/firehose" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -53,19 +52,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*firehose_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return firehose_sdkv2.NewFromConfig(cfg, func(o *firehose_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return firehose_sdkv2.NewFromConfig(cfg, + firehose_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/fis/experiment_template.go b/internal/service/fis/experiment_template.go index b8007e71423..7aaf7376259 100644 --- a/internal/service/fis/experiment_template.go +++ b/internal/service/fis/experiment_template.go @@ -6,33 +6,32 @@ package fis import ( "context" "errors" + "log" "time" "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/fis" - "github.com/aws/aws-sdk-go-v2/service/fis/types" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/fis/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - ErrCodeNotFound = 404 - ResNameExperimentTemplate = "Experiment Template" -) - // @SDKResource("aws_fis_experiment_template", name="Experiment Template") // @Tags -func ResourceExperimentTemplate() *schema.Resource { +func resourceExperimentTemplate() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceExperimentTemplateCreate, ReadWithoutTimeout: resourceExperimentTemplateRead, @@ -96,7 +95,6 @@ func ResourceExperimentTemplate() *schema.Resource { "start_after": { Type: schema.TypeSet, Optional: true, - Set: schema.HashString, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringLenBetween(0, 64), @@ -129,6 +127,26 @@ func ResourceExperimentTemplate() *schema.Resource { Required: true, ValidateFunc: validation.StringLenBetween(0, 512), }, + "experiment_options": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "account_targeting": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.AccountTargeting](), + }, + "empty_target_resolution_mode": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.EmptyTargetResolutionMode](), + }, + }, + }, + }, "log_configuration": { Type: schema.TypeList, Optional: true, @@ -213,7 +231,6 @@ func ResourceExperimentTemplate() *schema.Resource { names.AttrValues: { Type: schema.TypeSet, Required: true, - Set: schema.HashString, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: validation.StringLenBetween(0, 128), @@ -236,7 +253,6 @@ func ResourceExperimentTemplate() *schema.Resource { Type: schema.TypeSet, Optional: true, MaxItems: 5, - Set: schema.HashString, Elem: &schema.Schema{ Type: schema.TypeString, ValidateFunc: verify.ValidARN, @@ -285,7 +301,6 @@ func ResourceExperimentTemplate() *schema.Resource { func resourceExperimentTemplateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FISClient(ctx) input := &fis.CreateExperimentTemplateInput{ @@ -298,15 +313,20 @@ func resourceExperimentTemplateCreate(ctx context.Context, d *schema.ResourceDat Tags: getTagsIn(ctx), } - targets, err := expandExperimentTemplateTargets(d.Get(names.AttrTarget).(*schema.Set)) - if err != nil { - return create.AppendDiagError(diags, names.FIS, create.ErrActionCreating, ResNameExperimentTemplate, d.Get(names.AttrDescription).(string), err) + if v, ok := d.GetOk("experiment_options"); ok { + input.ExperimentOptions = expandCreateExperimentTemplateExperimentOptionsInput(v.([]interface{})) + } + + if targets, err := expandExperimentTemplateTargets(d.Get(names.AttrTarget).(*schema.Set)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } else { + input.Targets = targets } - input.Targets = targets output, err := conn.CreateExperimentTemplate(ctx, input) + if err != nil { - return create.AppendDiagError(diags, names.FIS, create.ErrActionCreating, ResNameExperimentTemplate, d.Get(names.AttrDescription).(string), err) + return sdkdiag.AppendErrorf(diags, "creating FIS Experiment Template: %s", err) } d.SetId(aws.ToString(output.ExperimentTemplate.Id)) @@ -316,52 +336,37 @@ func resourceExperimentTemplateCreate(ctx context.Context, d *schema.ResourceDat func resourceExperimentTemplateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FISClient(ctx) - input := &fis.GetExperimentTemplateInput{Id: aws.String(d.Id())} - out, err := conn.GetExperimentTemplate(ctx, input) - - var nf *types.ResourceNotFoundException - if !d.IsNewResource() && errors.As(err, &nf) { - create.LogNotFoundRemoveState(names.FIS, create.ErrActionReading, ResNameExperimentTemplate, d.Id()) - d.SetId("") - return diags - } + experimentTemplate, err := findExperimentTemplateByID(ctx, conn, d.Id()) - if !d.IsNewResource() && tfawserr.ErrStatusCodeEquals(err, ErrCodeNotFound) { - create.LogNotFoundRemoveState(names.FIS, create.ErrActionReading, ResNameExperimentTemplate, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FIS Experiment Template (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return create.AppendDiagError(diags, names.FIS, create.ErrActionReading, ResNameExperimentTemplate, d.Id(), err) - } - - experimentTemplate := out.ExperimentTemplate - if experimentTemplate == nil { - return create.AppendDiagError(diags, names.FIS, create.ErrActionReading, ResNameExperimentTemplate, d.Id(), errors.New("empty result")) + return sdkdiag.AppendErrorf(diags, "reading FIS Experiment Template (%s): %s", d.Id(), err) } d.SetId(aws.ToString(experimentTemplate.Id)) + if err := d.Set(names.AttrAction, flattenExperimentTemplateActions(experimentTemplate.Actions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting action: %s", err) + } d.Set(names.AttrRoleARN, experimentTemplate.RoleArn) d.Set(names.AttrDescription, experimentTemplate.Description) - - if err := d.Set(names.AttrAction, flattenExperimentTemplateActions(experimentTemplate.Actions)); err != nil { - return create.AppendDiagSettingError(diags, names.FIS, ResNameExperimentTemplate, d.Id(), names.AttrAction, err) + if err := d.Set("experiment_options", flattenExperimentTemplateExperimentOptions(experimentTemplate.ExperimentOptions)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting experiment_options: %s", err) } - if err := d.Set("log_configuration", flattenExperimentTemplateLogConfiguration(experimentTemplate.LogConfiguration)); err != nil { - return create.AppendDiagSettingError(diags, names.FIS, ResNameExperimentTemplate, d.Id(), "log_configuration", err) + return sdkdiag.AppendErrorf(diags, "setting log_configuration: %s", err) } - if err := d.Set("stop_condition", flattenExperimentTemplateStopConditions(experimentTemplate.StopConditions)); err != nil { - return create.AppendDiagSettingError(diags, names.FIS, ResNameExperimentTemplate, d.Id(), "stop_condition", err) + return sdkdiag.AppendErrorf(diags, "setting stop_condition: %s", err) } - if err := d.Set(names.AttrTarget, flattenExperimentTemplateTargets(experimentTemplate.Targets)); err != nil { - return create.AppendDiagSettingError(diags, names.FIS, ResNameExperimentTemplate, d.Id(), names.AttrTarget, err) + return sdkdiag.AppendErrorf(diags, "setting target: %s", err) } setTagsOut(ctx, experimentTemplate.Tags) @@ -387,6 +392,10 @@ func resourceExperimentTemplateUpdate(ctx context.Context, d *schema.ResourceDat input.Description = aws.String(d.Get(names.AttrDescription).(string)) } + if d.HasChange("experiment_options") { + input.ExperimentOptions = expandUpdateExperimentTemplateExperimentOptionsInput(d.Get("experiment_options").([]interface{})) + } + if d.HasChange("log_configuration") { config := expandExperimentTemplateLogConfigurationForUpdate(d.Get("log_configuration").([]interface{})) input.LogConfiguration = config @@ -401,16 +410,17 @@ func resourceExperimentTemplateUpdate(ctx context.Context, d *schema.ResourceDat } if d.HasChange(names.AttrTarget) { - targets, err := expandExperimentTemplateTargetsForUpdate(d.Get(names.AttrTarget).(*schema.Set)) - if err != nil { - return create.AppendDiagError(diags, names.FIS, create.ErrActionUpdating, ResNameExperimentTemplate, d.Id(), err) + if targets, err := expandExperimentTemplateTargetsForUpdate(d.Get(names.AttrTarget).(*schema.Set)); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } else { + input.Targets = targets } - input.Targets = targets } _, err := conn.UpdateExperimentTemplate(ctx, input) + if err != nil { - return create.AppendDiagError(diags, names.FIS, create.ErrActionUpdating, ResNameExperimentTemplate, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating FIS Experiment Template (%s): %s", d.Id(), err) } } @@ -419,38 +429,59 @@ func resourceExperimentTemplateUpdate(ctx context.Context, d *schema.ResourceDat func resourceExperimentTemplateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).FISClient(ctx) + + log.Printf("[DEBUG] Deleting FIS Experiment Template: %s", d.Id()) _, err := conn.DeleteExperimentTemplate(ctx, &fis.DeleteExperimentTemplateInput{ Id: aws.String(d.Id()), }) - var nf *types.ResourceNotFoundException - if errors.As(err, &nf) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } - if tfawserr.ErrStatusCodeEquals(err, ErrCodeNotFound) { - return diags + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting FIS Experiment Template (%s): %s", d.Id(), err) + } + + return diags +} + +func findExperimentTemplateByID(ctx context.Context, conn *fis.Client, id string) (*awstypes.ExperimentTemplate, error) { + input := &fis.GetExperimentTemplateInput{ + Id: aws.String(id), + } + + output, err := conn.GetExperimentTemplate(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } } if err != nil { - return create.AppendDiagError(diags, names.FIS, create.ErrActionDeleting, ResNameExperimentTemplate, d.Id(), err) + return nil, err } - return diags + if output == nil || output.ExperimentTemplate == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.ExperimentTemplate, nil } -func expandExperimentTemplateActions(l *schema.Set) map[string]types.CreateExperimentTemplateActionInput { +func expandExperimentTemplateActions(l *schema.Set) map[string]awstypes.CreateExperimentTemplateActionInput { if l.Len() == 0 { return nil } - attrs := make(map[string]types.CreateExperimentTemplateActionInput, l.Len()) + attrs := make(map[string]awstypes.CreateExperimentTemplateActionInput, l.Len()) for _, m := range l.List() { raw := m.(map[string]interface{}) - config := types.CreateExperimentTemplateActionInput{} + config := awstypes.CreateExperimentTemplateActionInput{} if v, ok := raw["action_id"].(string); ok && v != "" { config.ActionId = aws.String(v) @@ -480,16 +511,16 @@ func expandExperimentTemplateActions(l *schema.Set) map[string]types.CreateExper return attrs } -func expandExperimentTemplateActionsForUpdate(l *schema.Set) map[string]types.UpdateExperimentTemplateActionInputItem { +func expandExperimentTemplateActionsForUpdate(l *schema.Set) map[string]awstypes.UpdateExperimentTemplateActionInputItem { if l.Len() == 0 { return nil } - attrs := make(map[string]types.UpdateExperimentTemplateActionInputItem, l.Len()) + attrs := make(map[string]awstypes.UpdateExperimentTemplateActionInputItem, l.Len()) for _, m := range l.List() { raw := m.(map[string]interface{}) - config := types.UpdateExperimentTemplateActionInputItem{} + config := awstypes.UpdateExperimentTemplateActionInputItem{} if v, ok := raw["action_id"].(string); ok && v != "" { config.ActionId = aws.String(v) @@ -519,16 +550,70 @@ func expandExperimentTemplateActionsForUpdate(l *schema.Set) map[string]types.Up return attrs } -func expandExperimentTemplateStopConditions(l *schema.Set) []types.CreateExperimentTemplateStopConditionInput { +func expandCreateExperimentTemplateExperimentOptionsInput(tfMap []interface{}) *awstypes.CreateExperimentTemplateExperimentOptionsInput { + if len(tfMap) == 0 || tfMap[0] == nil { + return nil + } + + apiObject := &awstypes.CreateExperimentTemplateExperimentOptionsInput{} + + m := tfMap[0].(map[string]interface{}) + + if v, ok := m["account_targeting"].(string); ok { + apiObject.AccountTargeting = awstypes.AccountTargeting(v) + } + + if v, ok := m["empty_target_resolution_mode"].(string); ok { + apiObject.EmptyTargetResolutionMode = awstypes.EmptyTargetResolutionMode(v) + } + + return apiObject +} + +func expandUpdateExperimentTemplateExperimentOptionsInput(tfMap []interface{}) *awstypes.UpdateExperimentTemplateExperimentOptionsInput { + if len(tfMap) == 0 || tfMap[0] == nil { + return nil + } + + m := tfMap[0].(map[string]interface{}) + + apiObject := &awstypes.UpdateExperimentTemplateExperimentOptionsInput{} + + if v, ok := m["empty_target_resolution_mode"].(string); ok { + apiObject.EmptyTargetResolutionMode = awstypes.EmptyTargetResolutionMode(v) + } + + return apiObject +} + +func flattenExperimentTemplateExperimentOptions(apiObject *awstypes.ExperimentTemplateExperimentOptions) []map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := make([]map[string]interface{}, 1) + tfMap[0] = make(map[string]interface{}) + if v := apiObject.AccountTargeting; v != "" { + tfMap[0]["account_targeting"] = v + } + + if v := apiObject.EmptyTargetResolutionMode; v != "" { + tfMap[0]["empty_target_resolution_mode"] = v + } + + return tfMap +} + +func expandExperimentTemplateStopConditions(l *schema.Set) []awstypes.CreateExperimentTemplateStopConditionInput { if l.Len() == 0 { return nil } - items := []types.CreateExperimentTemplateStopConditionInput{} + items := []awstypes.CreateExperimentTemplateStopConditionInput{} for _, m := range l.List() { raw := m.(map[string]interface{}) - config := types.CreateExperimentTemplateStopConditionInput{} + config := awstypes.CreateExperimentTemplateStopConditionInput{} if v, ok := raw[names.AttrSource].(string); ok && v != "" { config.Source = aws.String(v) @@ -544,14 +629,14 @@ func expandExperimentTemplateStopConditions(l *schema.Set) []types.CreateExperim return items } -func expandExperimentTemplateLogConfiguration(l []interface{}) *types.CreateExperimentTemplateLogConfigurationInput { +func expandExperimentTemplateLogConfiguration(l []interface{}) *awstypes.CreateExperimentTemplateLogConfigurationInput { if len(l) == 0 { return nil } raw := l[0].(map[string]interface{}) - config := types.CreateExperimentTemplateLogConfigurationInput{ + config := awstypes.CreateExperimentTemplateLogConfigurationInput{ LogSchemaVersion: aws.Int32(int32(raw["log_schema_version"].(int))), } @@ -566,27 +651,27 @@ func expandExperimentTemplateLogConfiguration(l []interface{}) *types.CreateExpe return &config } -func expandExperimentTemplateCloudWatchLogsConfiguration(l []interface{}) *types.ExperimentTemplateCloudWatchLogsLogConfigurationInput { +func expandExperimentTemplateCloudWatchLogsConfiguration(l []interface{}) *awstypes.ExperimentTemplateCloudWatchLogsLogConfigurationInput { if len(l) == 0 { return nil } raw := l[0].(map[string]interface{}) - config := types.ExperimentTemplateCloudWatchLogsLogConfigurationInput{ + config := awstypes.ExperimentTemplateCloudWatchLogsLogConfigurationInput{ LogGroupArn: aws.String(raw["log_group_arn"].(string)), } return &config } -func expandExperimentTemplateS3Configuration(l []interface{}) *types.ExperimentTemplateS3LogConfigurationInput { +func expandExperimentTemplateS3Configuration(l []interface{}) *awstypes.ExperimentTemplateS3LogConfigurationInput { if len(l) == 0 { return nil } raw := l[0].(map[string]interface{}) - config := types.ExperimentTemplateS3LogConfigurationInput{ + config := awstypes.ExperimentTemplateS3LogConfigurationInput{ BucketName: aws.String(raw[names.AttrBucketName].(string)), } if v, ok := raw[names.AttrPrefix].(string); ok && v != "" { @@ -596,16 +681,16 @@ func expandExperimentTemplateS3Configuration(l []interface{}) *types.ExperimentT return &config } -func expandExperimentTemplateStopConditionsForUpdate(l *schema.Set) []types.UpdateExperimentTemplateStopConditionInput { +func expandExperimentTemplateStopConditionsForUpdate(l *schema.Set) []awstypes.UpdateExperimentTemplateStopConditionInput { if l.Len() == 0 { return nil } - items := []types.UpdateExperimentTemplateStopConditionInput{} + items := []awstypes.UpdateExperimentTemplateStopConditionInput{} for _, m := range l.List() { raw := m.(map[string]interface{}) - config := types.UpdateExperimentTemplateStopConditionInput{} + config := awstypes.UpdateExperimentTemplateStopConditionInput{} if v, ok := raw[names.AttrSource].(string); ok && v != "" { config.Source = aws.String(v) @@ -621,17 +706,17 @@ func expandExperimentTemplateStopConditionsForUpdate(l *schema.Set) []types.Upda return items } -func expandExperimentTemplateTargets(l *schema.Set) (map[string]types.CreateExperimentTemplateTargetInput, error) { +func expandExperimentTemplateTargets(l *schema.Set) (map[string]awstypes.CreateExperimentTemplateTargetInput, error) { if l.Len() == 0 { //Even though a template with no targets is valid (eg. containing just aws:fis:wait) and the API reference states that targets is not required, the key still needs to be present. - return map[string]types.CreateExperimentTemplateTargetInput{}, nil + return map[string]awstypes.CreateExperimentTemplateTargetInput{}, nil } - attrs := make(map[string]types.CreateExperimentTemplateTargetInput, l.Len()) + attrs := make(map[string]awstypes.CreateExperimentTemplateTargetInput, l.Len()) for _, m := range l.List() { raw := m.(map[string]interface{}) - config := types.CreateExperimentTemplateTargetInput{} + config := awstypes.CreateExperimentTemplateTargetInput{} var hasSeenResourceArns bool if v, ok := raw[names.AttrFilter].([]interface{}); ok && len(v) > 0 { @@ -672,16 +757,16 @@ func expandExperimentTemplateTargets(l *schema.Set) (map[string]types.CreateExpe return attrs, nil } -func expandExperimentTemplateTargetsForUpdate(l *schema.Set) (map[string]types.UpdateExperimentTemplateTargetInput, error) { +func expandExperimentTemplateTargetsForUpdate(l *schema.Set) (map[string]awstypes.UpdateExperimentTemplateTargetInput, error) { if l.Len() == 0 { return nil, nil } - attrs := make(map[string]types.UpdateExperimentTemplateTargetInput, l.Len()) + attrs := make(map[string]awstypes.UpdateExperimentTemplateTargetInput, l.Len()) for _, m := range l.List() { raw := m.(map[string]interface{}) - config := types.UpdateExperimentTemplateTargetInput{} + config := awstypes.UpdateExperimentTemplateTargetInput{} var hasSeenResourceArns bool if v, ok := raw[names.AttrFilter].([]interface{}); ok && len(v) > 0 { @@ -722,13 +807,13 @@ func expandExperimentTemplateTargetsForUpdate(l *schema.Set) (map[string]types.U return attrs, nil } -func expandExperimentTemplateLogConfigurationForUpdate(l []interface{}) *types.UpdateExperimentTemplateLogConfigurationInput { +func expandExperimentTemplateLogConfigurationForUpdate(l []interface{}) *awstypes.UpdateExperimentTemplateLogConfigurationInput { if len(l) == 0 { - return &types.UpdateExperimentTemplateLogConfigurationInput{} + return &awstypes.UpdateExperimentTemplateLogConfigurationInput{} } raw := l[0].(map[string]interface{}) - config := types.UpdateExperimentTemplateLogConfigurationInput{ + config := awstypes.UpdateExperimentTemplateLogConfigurationInput{ LogSchemaVersion: aws.Int32(int32(raw["log_schema_version"].(int))), } if v, ok := raw["cloudwatch_logs_configuration"].([]interface{}); ok && len(v) > 0 { @@ -776,16 +861,16 @@ func expandExperimentTemplateActionTargets(l []interface{}) map[string]string { return attrs } -func expandExperimentTemplateTargetFilters(l []interface{}) []types.ExperimentTemplateTargetInputFilter { +func expandExperimentTemplateTargetFilters(l []interface{}) []awstypes.ExperimentTemplateTargetInputFilter { if len(l) == 0 || l[0] == nil { return nil } - items := []types.ExperimentTemplateTargetInputFilter{} + items := []awstypes.ExperimentTemplateTargetInputFilter{} for _, m := range l { raw := m.(map[string]interface{}) - config := types.ExperimentTemplateTargetInputFilter{} + config := awstypes.ExperimentTemplateTargetInputFilter{} if v, ok := raw[names.AttrPath].(string); ok && v != "" { config.Path = aws.String(v) @@ -818,7 +903,7 @@ func expandExperimentTemplateTargetResourceTags(l *schema.Set) map[string]string return attrs } -func flattenExperimentTemplateActions(configured map[string]types.ExperimentTemplateAction) []map[string]interface{} { +func flattenExperimentTemplateActions(configured map[string]awstypes.ExperimentTemplateAction) []map[string]interface{} { dataResources := make([]map[string]interface{}, 0, len(configured)) for k, v := range configured { @@ -837,7 +922,7 @@ func flattenExperimentTemplateActions(configured map[string]types.ExperimentTemp return dataResources } -func flattenExperimentTemplateStopConditions(configured []types.ExperimentTemplateStopCondition) []map[string]interface{} { +func flattenExperimentTemplateStopConditions(configured []awstypes.ExperimentTemplateStopCondition) []map[string]interface{} { dataResources := make([]map[string]interface{}, 0, len(configured)) for _, v := range configured { @@ -854,7 +939,7 @@ func flattenExperimentTemplateStopConditions(configured []types.ExperimentTempla return dataResources } -func flattenExperimentTemplateTargets(configured map[string]types.ExperimentTemplateTarget) []map[string]interface{} { +func flattenExperimentTemplateTargets(configured map[string]awstypes.ExperimentTemplateTarget) []map[string]interface{} { dataResources := make([]map[string]interface{}, 0, len(configured)) for k, v := range configured { @@ -874,7 +959,7 @@ func flattenExperimentTemplateTargets(configured map[string]types.ExperimentTemp return dataResources } -func flattenExperimentTemplateLogConfiguration(configured *types.ExperimentTemplateLogConfiguration) []map[string]interface{} { +func flattenExperimentTemplateLogConfiguration(configured *awstypes.ExperimentTemplateLogConfiguration) []map[string]interface{} { if configured == nil { return make([]map[string]interface{}, 0) } @@ -888,7 +973,7 @@ func flattenExperimentTemplateLogConfiguration(configured *types.ExperimentTempl return dataResources } -func flattenCloudWatchLogsConfiguration(configured *types.ExperimentTemplateCloudWatchLogsLogConfiguration) []map[string]interface{} { +func flattenCloudWatchLogsConfiguration(configured *awstypes.ExperimentTemplateCloudWatchLogsLogConfiguration) []map[string]interface{} { if configured == nil { return make([]map[string]interface{}, 0) } @@ -900,7 +985,7 @@ func flattenCloudWatchLogsConfiguration(configured *types.ExperimentTemplateClou return dataResources } -func flattenS3Configuration(configured *types.ExperimentTemplateS3LogConfiguration) []map[string]interface{} { +func flattenS3Configuration(configured *awstypes.ExperimentTemplateS3LogConfiguration) []map[string]interface{} { if configured == nil { return make([]map[string]interface{}, 0) } @@ -942,7 +1027,7 @@ func flattenExperimentTemplateActionTargets(configured map[string]string) []map[ return dataResources } -func flattenExperimentTemplateTargetFilters(configured []types.ExperimentTemplateTargetFilter) []map[string]interface{} { +func flattenExperimentTemplateTargetFilters(configured []awstypes.ExperimentTemplateTargetFilter) []map[string]interface{} { dataResources := make([]map[string]interface{}, 0, len(configured)) for _, v := range configured { diff --git a/internal/service/fis/experiment_template_test.go b/internal/service/fis/experiment_template_test.go index b939b3c8425..b217d93c7e2 100644 --- a/internal/service/fis/experiment_template_test.go +++ b/internal/service/fis/experiment_template_test.go @@ -5,20 +5,18 @@ package fis_test import ( "context" - "errors" "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/fis" - "github.com/aws/aws-sdk-go-v2/service/fis/types" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/fis/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tffis "github.com/hashicorp/terraform-provider-aws/internal/service/fis" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -26,7 +24,7 @@ func TestAccFISExperimentTemplate_basic(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fis_experiment_template.test" - var conf types.ExperimentTemplate + var conf awstypes.ExperimentTemplate resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -36,13 +34,9 @@ func TestAccFISExperimentTemplate_basic(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccExperimentTemplateConfig_basic(rName, "An experiment template for testing", "test-action-1", "", "aws:ec2:terminate-instances", "Instances", "to-terminate-1", "aws:ec2:instance", "COUNT(1)", "env", "test"), - Check: resource.ComposeTestCheckFunc( + Check: resource.ComposeAggregateTestCheckFunc( testAccExperimentTemplateExists(ctx, resourceName, &conf), - resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "An experiment template for testing"), - resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, "aws_iam_role.test", names.AttrARN), - resource.TestCheckResourceAttr(resourceName, "stop_condition.0.source", "none"), - resource.TestCheckResourceAttr(resourceName, "stop_condition.0.value", ""), - resource.TestCheckResourceAttr(resourceName, "stop_condition.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "action.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "action.0.name", "test-action-1"), resource.TestCheckResourceAttr(resourceName, "action.0.description", ""), resource.TestCheckResourceAttr(resourceName, "action.0.action_id", "aws:ec2:terminate-instances"), @@ -51,7 +45,13 @@ func TestAccFISExperimentTemplate_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "action.0.target.0.key", "Instances"), resource.TestCheckResourceAttr(resourceName, "action.0.target.0.value", "to-terminate-1"), resource.TestCheckResourceAttr(resourceName, "action.0.target.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "action.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "An experiment template for testing"), + resource.TestCheckResourceAttr(resourceName, "experiment_options.#", acctest.Ct1), + resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, "aws_iam_role.test", names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "stop_condition.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "stop_condition.0.source", "none"), + resource.TestCheckResourceAttr(resourceName, "stop_condition.0.value", ""), + resource.TestCheckResourceAttr(resourceName, "target.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "target.0.name", "to-terminate-1"), resource.TestCheckResourceAttr(resourceName, "target.0.resource_type", "aws:ec2:instance"), resource.TestCheckResourceAttr(resourceName, "target.0.selection_mode", "COUNT(1)"), @@ -60,7 +60,6 @@ func TestAccFISExperimentTemplate_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "target.0.resource_tag.0.key", "env"), resource.TestCheckResourceAttr(resourceName, "target.0.resource_tag.0.value", "test"), resource.TestCheckResourceAttr(resourceName, "target.0.resource_tag.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "target.#", acctest.Ct1), ), }, { @@ -76,7 +75,7 @@ func TestAccFISExperimentTemplate_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fis_experiment_template.test" - var conf types.ExperimentTemplate + var conf awstypes.ExperimentTemplate resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -100,7 +99,7 @@ func TestAccFISExperimentTemplate_update(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fis_experiment_template.test" - var conf types.ExperimentTemplate + var conf awstypes.ExperimentTemplate resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -170,7 +169,7 @@ func TestAccFISExperimentTemplate_spot(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fis_experiment_template.test" - var conf types.ExperimentTemplate + var conf awstypes.ExperimentTemplate resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -220,7 +219,7 @@ func TestAccFISExperimentTemplate_eks(t *testing.T) { } rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fis_experiment_template.test" - var conf types.ExperimentTemplate + var conf awstypes.ExperimentTemplate resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -276,7 +275,7 @@ func TestAccFISExperimentTemplate_ebs(t *testing.T) { } rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fis_experiment_template.test" - var conf types.ExperimentTemplate + var conf awstypes.ExperimentTemplate resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -324,7 +323,7 @@ func TestAccFISExperimentTemplate_ebsParameters(t *testing.T) { } rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fis_experiment_template.test" - var conf types.ExperimentTemplate + var conf awstypes.ExperimentTemplate resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -372,7 +371,7 @@ func TestAccFISExperimentTemplate_loggingConfiguration(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_fis_experiment_template.test" - var conf types.ExperimentTemplate + var conf awstypes.ExperimentTemplate resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -419,25 +418,60 @@ func TestAccFISExperimentTemplate_loggingConfiguration(t *testing.T) { }) } -func testAccExperimentTemplateExists(ctx context.Context, resourceName string, config *types.ExperimentTemplate) resource.TestCheckFunc { +func TestAccFISExperimentTemplate_updateExperimentOptions(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_fis_experiment_template.test" + var conf awstypes.ExperimentTemplate + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, fis.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckExperimentTemplateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccExperimentTemplateConfig_ExperimentOptions(rName, "skip"), + Check: resource.ComposeTestCheckFunc( + testAccExperimentTemplateExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "experiment_options.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "experiment_options.0.account_targeting", "single-account"), + resource.TestCheckResourceAttr(resourceName, "experiment_options.0.empty_target_resolution_mode", "skip"), + ), + }, + { + Config: testAccExperimentTemplateConfig_ExperimentOptions(rName, "fail"), + Check: resource.ComposeTestCheckFunc( + testAccExperimentTemplateExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "experiment_options.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "experiment_options.0.empty_target_resolution_mode", "fail"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccExperimentTemplateExists(ctx context.Context, n string, v *awstypes.ExperimentTemplate) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[resourceName] + rs, ok := s.RootModule().Resources[n] if !ok { - return fmt.Errorf("Not found: %s", resourceName) + return fmt.Errorf("Not found: %s", n) } conn := acctest.Provider.Meta().(*conns.AWSClient).FISClient(ctx) - out, err := conn.GetExperimentTemplate(ctx, &fis.GetExperimentTemplateInput{Id: aws.String(rs.Primary.ID)}) - if err != nil { - return fmt.Errorf("Describe Experiment Template error: %v", err) - } + output, err := tffis.FindExperimentTemplateByID(ctx, conn, rs.Primary.ID) - if out.ExperimentTemplate == nil { - return fmt.Errorf("No Experiment Template returned %v in %v", out.ExperimentTemplate, out) + if err != nil { + return err } - *out.ExperimentTemplate = *config + *v = *output return nil } @@ -451,12 +485,17 @@ func testAccCheckExperimentTemplateDestroy(ctx context.Context) resource.TestChe continue } - _, err := conn.GetExperimentTemplate(ctx, &fis.GetExperimentTemplateInput{Id: aws.String(rs.Primary.ID)}) + _, err := tffis.FindExperimentTemplateByID(ctx, conn, rs.Primary.ID) - var nf *types.ResourceNotFoundException - if !tfawserr.ErrStatusCodeEquals(err, tffis.ErrCodeNotFound) && !errors.As(err, &nf) { - return fmt.Errorf("Experiment Template '%s' was not deleted properly", rs.Primary.ID) + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err } + + return fmt.Errorf("FIS Experiment Template %s still exists", rs.Primary.ID) } return nil @@ -1067,3 +1106,62 @@ resource "aws_fis_experiment_template" "test" { } `, rName, desc, actionName, actionDesc, actionID, actionTargetK, actionTargetV, targetResType, targetSelectMode, targetResTagK, targetResTagV) } + +func testAccExperimentTemplateConfig_ExperimentOptions(rName, mode string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = jsonencode({ + Statement = [{ + Action = "sts:AssumeRole" + Effect = "Allow" + Principal = { + Service = [ + "fis.${data.aws_partition.current.dns_suffix}", + ] + } + }] + Version = "2012-10-17" + }) +} + +resource "aws_fis_experiment_template" "test" { + description = "An experiment template for testing" + role_arn = aws_iam_role.test.arn + + stop_condition { + source = "none" + } + + experiment_options { + account_targeting = "single-account" + empty_target_resolution_mode = %[2]q + } + + action { + name = "test-action-1" + description = "" + action_id = "aws:ec2:terminate-instances" + + target { + key = "Instances" + value = "to-terminate-1" + } + } + + target { + name = "to-terminate-1" + resource_type = "aws:ec2:instance" + selection_mode = "ALL" + + resource_tag { + key = "env2" + value = "test2" + } + } +} +`, rName, mode) +} diff --git a/internal/service/fis/exports_test.go b/internal/service/fis/exports_test.go new file mode 100644 index 00000000000..d22424e77e1 --- /dev/null +++ b/internal/service/fis/exports_test.go @@ -0,0 +1,11 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fis + +// Exports for use in tests only. +var ( + ResourceExperimentTemplate = resourceExperimentTemplate + + FindExperimentTemplateByID = findExperimentTemplateByID +) diff --git a/internal/service/fis/service_endpoint_resolver_gen.go b/internal/service/fis/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..7d0816e8673 --- /dev/null +++ b/internal/service/fis/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package fis + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + fis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fis" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ fis_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver fis_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: fis_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params fis_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up fis endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*fis_sdkv2.Options) { + return func(o *fis_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/fis/service_endpoints_gen_test.go b/internal/service/fis/service_endpoints_gen_test.go index 26f93a45af9..42d82b4d95a 100644 --- a/internal/service/fis/service_endpoints_gen_test.go +++ b/internal/service/fis/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := fis_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), fis_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := fis_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), fis_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/fis/service_package_gen.go b/internal/service/fis/service_package_gen.go index adb7375351e..b8305e06a87 100644 --- a/internal/service/fis/service_package_gen.go +++ b/internal/service/fis/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package fis @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" fis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fis" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -30,7 +29,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceExperimentTemplate, + Factory: resourceExperimentTemplate, TypeName: "aws_fis_experiment_template", Name: "Experiment Template", Tags: &types.ServicePackageResourceTags{}, @@ -46,19 +45,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*fis_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return fis_sdkv2.NewFromConfig(cfg, func(o *fis_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return fis_sdkv2.NewFromConfig(cfg, + fis_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/fis/sweep.go b/internal/service/fis/sweep.go index 9668fa11d98..e709d14b74f 100644 --- a/internal/service/fis/sweep.go +++ b/internal/service/fis/sweep.go @@ -9,9 +9,9 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/fis" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -30,24 +30,24 @@ func sweepExperimentTemplates(region string) error { conn := client.FISClient(ctx) input := &fis.ListExperimentTemplatesInput{} sweepResources := make([]sweep.Sweepable, 0) - var sweeperErrs *multierror.Error - pg := fis.NewListExperimentTemplatesPaginator(conn, input) + pages := fis.NewListExperimentTemplatesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for pg.HasMorePages() { - page, err := pg.NextPage(ctx) + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping FIS Experiment Template sweep for %s: %s", region, err) + return nil + } if err != nil { - sweeperErr := fmt.Errorf("error listing FIS Experiment Templates: %w", err) - log.Printf("[ERROR] %s", sweeperErr) - sweeperErrs = multierror.Append(sweeperErrs, sweeperErr) - continue + return fmt.Errorf("error listing FIS Experiment Templates (%s): %w", region, err) } - for _, experimentTemplate := range page.ExperimentTemplates { - r := ResourceExperimentTemplate() + for _, v := range page.ExperimentTemplates { + r := resourceExperimentTemplate() d := r.Data(nil) - d.SetId(aws.ToString(experimentTemplate.Id)) + d.SetId(aws.ToString(v.Id)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } diff --git a/internal/service/fms/fms_test.go b/internal/service/fms/fms_test.go index db22d12ad90..084aabfe840 100644 --- a/internal/service/fms/fms_test.go +++ b/internal/service/fms/fms_test.go @@ -28,6 +28,7 @@ func TestAccFMS_serial(t *testing.T) { "securityGroup": testAccPolicy_securityGroup, "tags": testAccPolicy_tags, "update": testAccPolicy_update, + "rscSet": testAccPolicy_rscSet, }, "ResourceSet": { acctest.CtBasic: testAccFMSResourceSet_basic, diff --git a/internal/service/fms/policy.go b/internal/service/fms/policy.go index 34df0230459..4dc7f8f12e9 100644 --- a/internal/service/fms/policy.go +++ b/internal/service/fms/policy.go @@ -134,6 +134,14 @@ func resourcePolicy() *schema.Resource { ValidateFunc: validation.StringMatch(regexache.MustCompile(`^([\p{L}\p{Z}\p{N}_.:/=+\-@]*)$`), "must match a supported resource type, such as AWS::EC2::VPC, see also: https://docs.aws.amazon.com/fms/2018-01-01/APIReference/API_Policy.html"), ConflictsWith: []string{"resource_type_list"}, }, + "resource_set_ids": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, "resource_type_list": { Type: schema.TypeSet, Optional: true, @@ -275,6 +283,7 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interf } d.Set(names.AttrResourceType, policy.ResourceType) d.Set("resource_type_list", policy.ResourceTypeList) + d.Set("resource_set_ids", policy.ResourceSetIds) securityServicePolicy := []map[string]interface{}{{ names.AttrType: string(policy.SecurityServicePolicyData.Type), "managed_service_data": aws.ToString(policy.SecurityServicePolicyData.ManagedServiceData), @@ -376,6 +385,7 @@ func expandPolicy(d *schema.ResourceData) *awstypes.Policy { RemediationEnabled: d.Get("remediation_enabled").(bool), ResourceType: resourceType, ResourceTypeList: flex.ExpandStringValueSet(d.Get("resource_type_list").(*schema.Set)), + ResourceSetIds: flex.ExpandStringValueSet(d.Get("resource_set_ids").(*schema.Set)), } if d.Id() != "" { diff --git a/internal/service/fms/policy_test.go b/internal/service/fms/policy_test.go index e106a9eb3cc..cc6a734bfa4 100644 --- a/internal/service/fms/policy_test.go +++ b/internal/service/fms/policy_test.go @@ -365,6 +365,34 @@ func testAccPolicy_securityGroup(t *testing.T) { }) } +func testAccPolicy_rscSet(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_fms_policy.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckRegion(t, names.USEast1RegionID) + acctest.PreCheckOrganizationsEnabled(ctx, t) + acctest.PreCheckOrganizationManagementAccount(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.FMSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccPolicyConfig_rscSet(rName, rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckPolicyExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "resource_set_ids.#", acctest.Ct1), + ), + }, + }, + }) +} + func testAccCheckPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FMSClient(ctx) @@ -413,6 +441,7 @@ resource "aws_fms_policy" "test" { name = %[1]q description = "test description" remediation_enabled = false + resource_set_ids = [aws_fms_resource_set.test.id] resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] exclude_map { @@ -431,6 +460,14 @@ resource "aws_wafregional_rule_group" "test" { metric_name = "MyTest" name = %[2]q } + +resource "aws_fms_resource_set" "test" { + depends_on = [aws_fms_admin_account.test] + resource_set { + name = %[1]q + resource_type_list = ["AWS::NetworkFirewall::Firewall"] + } +} `, policyName, ruleGroupName)) } @@ -845,3 +882,40 @@ resource "aws_fms_policy" "test" { } `, rName)) } + +func testAccPolicyConfig_rscSet(policyName, ruleGroupName string) string { + return acctest.ConfigCompose(testAccAdminAccountConfig_basic, fmt.Sprintf(` +resource "aws_fms_policy" "test" { + exclude_resource_tags = false + name = %[1]q + description = "test description" + remediation_enabled = false + resource_set_ids = [aws_fms_resource_set.test.id] + resource_type_list = ["AWS::ElasticLoadBalancingV2::LoadBalancer"] + + exclude_map { + account = [data.aws_caller_identity.current.account_id] + } + + security_service_policy_data { + type = "WAF" + managed_service_data = "{\"type\": \"WAF\", \"ruleGroups\": [{\"id\":\"${aws_wafregional_rule_group.test.id}\", \"overrideAction\" : {\"type\": \"COUNT\"}}],\"defaultAction\": {\"type\": \"BLOCK\"}, \"overrideCustomerWebACLAssociation\": false}" + } + + depends_on = [aws_fms_admin_account.test] +} + +resource "aws_wafregional_rule_group" "test" { + metric_name = "MyTest" + name = %[2]q +} + +resource "aws_fms_resource_set" "test" { + depends_on = [aws_fms_admin_account.test] + resource_set { + name = %[1]q + resource_type_list = ["AWS::NetworkFirewall::Firewall"] + } +} +`, policyName, ruleGroupName)) +} diff --git a/internal/service/fms/service_endpoint_resolver_gen.go b/internal/service/fms/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..cdfe197026f --- /dev/null +++ b/internal/service/fms/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package fms + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + fms_sdkv2 "github.com/aws/aws-sdk-go-v2/service/fms" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ fms_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver fms_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: fms_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params fms_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up fms endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*fms_sdkv2.Options) { + return func(o *fms_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/fms/service_endpoints_gen_test.go b/internal/service/fms/service_endpoints_gen_test.go index 50c2ba3233f..66ed0ae4db0 100644 --- a/internal/service/fms/service_endpoints_gen_test.go +++ b/internal/service/fms/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := fms_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), fms_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := fms_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), fms_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/fms/service_package.go b/internal/service/fms/service_package.go index 566bb11b215..c14bacb15eb 100644 --- a/internal/service/fms/service_package.go +++ b/internal/service/fms/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/fms" awstypes "github.com/aws/aws-sdk-go-v2/service/fms/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,29 +19,21 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*fms.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return fms.NewFromConfig(cfg, func(o *fms.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - // Acceptance testing creates and deletes resources in quick succession. - // The FMS onboarding process into Organizations is opaque to consumers. - // Since we cannot reasonably check this status before receiving the error, - // set the operation as retryable. - if errs.IsAErrorMessageContains[*awstypes.InvalidOperationException](err, "Your AWS Organization is currently onboarding with AWS Firewall Manager and cannot be offboarded") || - errs.IsAErrorMessageContains[*awstypes.InvalidOperationException](err, "Your AWS Organization is currently offboarding with AWS Firewall Manager. Please submit onboard request after offboarded") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return fms.NewFromConfig(cfg, + fms.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *fms.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + // Acceptance testing creates and deletes resources in quick succession. + // The FMS onboarding process into Organizations is opaque to consumers. + // Since we cannot reasonably check this status before receiving the error, + // set the operation as retryable. + if errs.IsAErrorMessageContains[*awstypes.InvalidOperationException](err, "Your AWS Organization is currently onboarding with AWS Firewall Manager and cannot be offboarded") || + errs.IsAErrorMessageContains[*awstypes.InvalidOperationException](err, "Your AWS Organization is currently offboarding with AWS Firewall Manager. Please submit onboard request after offboarded") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/fms/service_package_gen.go b/internal/service/fms/service_package_gen.go index 83fbd11608e..d63cf2f91a1 100644 --- a/internal/service/fms/service_package_gen.go +++ b/internal/service/fms/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package fms diff --git a/internal/service/fsx/lustre_file_system.go b/internal/service/fsx/lustre_file_system.go index 955691c2c34..5bbdc0112cf 100644 --- a/internal/service/fsx/lustre_file_system.go +++ b/internal/service/fsx/lustre_file_system.go @@ -889,6 +889,9 @@ func waitFileSystemCreated(ctx context.Context, conn *fsx.FSx, id string, timeou Refresh: statusFileSystem(ctx, conn, id), Timeout: timeout, Delay: 30 * time.Second, + + // When the filesystem is used by another service, e.g. an M2 Environment, it is not immediately available + ContinuousTargetOccurence: 3, } outputRaw, err := stateConf.WaitForStateContext(ctx) diff --git a/internal/service/fsx/ontap_file_system.go b/internal/service/fsx/ontap_file_system.go index a35345599f2..9ef217f7f43 100644 --- a/internal/service/fsx/ontap_file_system.go +++ b/internal/service/fsx/ontap_file_system.go @@ -156,7 +156,6 @@ func resourceONTAPFileSystem() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - ForceNew: true, ValidateFunc: validation.IntBetween(1, 12), }, names.AttrKMSKeyID: { @@ -222,14 +221,14 @@ func resourceONTAPFileSystem() *schema.Resource { Type: schema.TypeInt, Computed: true, Optional: true, - ValidateFunc: validation.IntInSlice([]int{128, 256, 512, 1024, 2048, 4096}), + ValidateFunc: validation.IntInSlice([]int{128, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144}), ExactlyOneOf: []string{"throughput_capacity", "throughput_capacity_per_ha_pair"}, }, "throughput_capacity_per_ha_pair": { Type: schema.TypeInt, Computed: true, Optional: true, - ValidateFunc: validation.IntInSlice([]int{128, 256, 512, 1024, 2048, 3072, 4096, 6144}), + ValidateFunc: validation.IntInSlice([]int{128, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144}), ExactlyOneOf: []string{"throughput_capacity", "throughput_capacity_per_ha_pair"}, }, names.AttrVPCID: { @@ -249,17 +248,40 @@ func resourceONTAPFileSystem() *schema.Resource { CustomizeDiff: customdiff.All( verify.SetTagsDiff, - customdiff.ForceNewIfChange("throughput_capacity_per_ha_pair", func(ctx context.Context, old, new, meta any) bool { - if new != nil && new != 0 { - return new.(int) != old.(int) - } else { - return false - } - }), + resourceONTAPFileSystemThroughputCapacityPerHAPairCustomizeDiff, + resourceONTAPFileSystemHAPairsCustomizeDiff, ), } } +func resourceONTAPFileSystemThroughputCapacityPerHAPairCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta any) error { + // we want to force a new resource if the throughput_capacity_per_ha_pair is increased for Gen1 file systems + if d.HasChange("throughput_capacity_per_ha_pair") { + o, n := d.GetChange("throughput_capacity_per_ha_pair") + if n != nil && n.(int) != 0 && n.(int) > o.(int) && (d.Get("deployment_type").(string) == fsx.OntapDeploymentTypeSingleAz1 || d.Get("deployment_type").(string) == fsx.OntapDeploymentTypeMultiAz1) { + if err := d.ForceNew("throughput_capacity_per_ha_pair"); err != nil { + return err + } + } + } + + return nil +} + +func resourceONTAPFileSystemHAPairsCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta any) error { + // we want to force a new resource if the ha_pairs is increased for Gen1 single AZ file systems. multiple ha_pairs is not supported on Multi AZ. + if d.HasChange("ha_pairs") { + o, n := d.GetChange("ha_pairs") + if n != nil && n.(int) != 0 && n.(int) > o.(int) && (d.Get("deployment_type").(string) == fsx.OntapDeploymentTypeSingleAz1) { + if err := d.ForceNew("ha_pairs"); err != nil { + return err + } + } + } + + return nil +} + func resourceONTAPFileSystemCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FSxConn(ctx) @@ -423,6 +445,12 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, input.OntapConfiguration.FsxAdminPassword = aws.String(d.Get("fsx_admin_password").(string)) } + if d.HasChange("ha_pairs") { + input.OntapConfiguration.HAPairs = aws.Int64(int64(d.Get("ha_pairs").(int))) + //for the ONTAP update API the ThroughputCapacityPerHAPair must explicitly be passed when adding ha_pairs even if it hasn't changed. + input.OntapConfiguration.ThroughputCapacityPerHAPair = aws.Int64(int64(d.Get("throughput_capacity_per_ha_pair").(int))) + } + if d.HasChange("route_table_ids") { o, n := d.GetChange("route_table_ids") os, ns := o.(*schema.Set), n.(*schema.Set) @@ -445,6 +473,10 @@ func resourceONTAPFileSystemUpdate(ctx context.Context, d *schema.ResourceData, input.OntapConfiguration.ThroughputCapacity = aws.Int64(int64(d.Get("throughput_capacity").(int))) } + if d.HasChange("throughput_capacity_per_ha_pair") { + input.OntapConfiguration.ThroughputCapacityPerHAPair = aws.Int64(int64(d.Get("throughput_capacity_per_ha_pair").(int))) + } + if d.HasChange("weekly_maintenance_start_time") { input.OntapConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string)) } diff --git a/internal/service/fsx/ontap_file_system_test.go b/internal/service/fsx/ontap_file_system_test.go index 0c573c19402..a67aa7a42fd 100644 --- a/internal/service/fsx/ontap_file_system_test.go +++ b/internal/service/fsx/ontap_file_system_test.go @@ -107,6 +107,66 @@ func TestAccFSxONTAPFileSystem_singleAZ(t *testing.T) { }) } +func TestAccFSxONTAPFileSystem_multiAZ2(t *testing.T) { + ctx := acctest.Context(t) + var filesystem1, filesystem2 fsx.FileSystem + throughput1 := 384 + throughput2 := 768 + throughput3 := 768 + capacity1 := 1024 + capacity2 := 1024 + capacity3 := 2048 + + resourceName := "aws_fsx_ontap_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPFileSystemConfig_multiAZ2(rName, throughput1, capacity1), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OntapDeploymentTypeMultiAz2), + resource.TestCheckResourceAttr(resourceName, "ha_pairs", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", fmt.Sprint(throughput1)), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput1)), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", fmt.Sprint(capacity1)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + }, + { + Config: testAccONTAPFileSystemConfig_multiAZ2(rName, throughput2, capacity2), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckONTAPFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OntapDeploymentTypeMultiAz2), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput2)), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", fmt.Sprint(capacity2)), + ), + }, + { + Config: testAccONTAPFileSystemConfig_multiAZ2(rName, throughput3, capacity3), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckONTAPFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OntapDeploymentTypeMultiAz2), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput3)), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", fmt.Sprint(capacity3)), + ), + }, + }, + }) +} + func TestAccFSxONTAPFileSystem_haPair(t *testing.T) { ctx := acctest.Context(t) var filesystem fsx.FileSystem @@ -149,6 +209,54 @@ func TestAccFSxONTAPFileSystem_haPair(t *testing.T) { }) } +func TestAccFSxONTAPFileSystem_haPair_increase(t *testing.T) { + ctx := acctest.Context(t) + var filesystem1, filesystem2 fsx.FileSystem + throughput := 3072 + capacity1 := 4096 + capacity2 := 8192 + haPair1 := 2 + haPair2 := 4 + resourceName := "aws_fsx_ontap_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPFileSystemConfig_singleAZ2(rName, throughput, capacity1, haPair1), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "ha_pairs", acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput)), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", fmt.Sprint(capacity1)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + }, + { + Config: testAccONTAPFileSystemConfig_singleAZ2(rName, throughput, capacity2, haPair2), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckONTAPFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "ha_pairs", acctest.Ct4), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput)), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", fmt.Sprint(capacity2)), + ), + }, + }, + }) +} + func TestAccFSxONTAPFileSystem_fsxAdminPassword(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem @@ -607,6 +715,86 @@ func TestAccFSxONTAPFileSystem_throughputCapacity(t *testing.T) { }) } +func TestAccFSxONTAPFileSystem_throughputCapacity_singleAZ1(t *testing.T) { + ctx := acctest.Context(t) + var filesystem1, filesystem2 fsx.FileSystem + throughput1 := 128 + throughput2 := 256 + capacity := 1024 + resourceName := "aws_fsx_ontap_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPFileSystemConfig_singleAZ1(rName, throughput1, capacity), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput1)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + }, + { + Config: testAccONTAPFileSystemConfig_singleAZ1(rName, throughput2, capacity), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckONTAPFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput2)), + ), + }, + }, + }) +} + +func TestAccFSxONTAPFileSystem_throughputCapacity_multiAZ1(t *testing.T) { + ctx := acctest.Context(t) + var filesystem1, filesystem2 fsx.FileSystem + throughput1 := 128 + throughput2 := 256 + capacity := 1024 + resourceName := "aws_fsx_ontap_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, fsx.EndpointsID) }, + ErrorCheck: acctest.ErrorCheck(t, names.FSxServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckONTAPFileSystemDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccONTAPFileSystemConfig_multiAZ1(rName, throughput1, capacity), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput1)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{names.AttrSecurityGroupIDs}, + }, + { + Config: testAccONTAPFileSystemConfig_multiAZ1(rName, throughput2, capacity), + Check: resource.ComposeTestCheckFunc( + testAccCheckONTAPFileSystemExists(ctx, resourceName, &filesystem2), + testAccCheckONTAPFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity_per_ha_pair", fmt.Sprint(throughput2)), + ), + }, + }, + }) +} + func TestAccFSxONTAPFileSystem_storageCapacity(t *testing.T) { ctx := acctest.Context(t) var filesystem1, filesystem2 fsx.FileSystem @@ -743,6 +931,74 @@ resource "aws_fsx_ontap_file_system" "test" { `, rName)) } +func testAccONTAPFileSystemConfig_multiAZ2(rName string, throughput int, capacity int) string { + return acctest.ConfigCompose(testAccONTAPFileSystemConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_file_system" "test" { + storage_capacity = %[3]d + subnet_ids = aws_subnet.test[*].id + deployment_type = "MULTI_AZ_2" + ha_pairs = 1 + throughput_capacity_per_ha_pair = %[2]d + preferred_subnet_id = aws_subnet.test[0].id + + tags = { + Name = %[1]q + } +} +`, rName, throughput, capacity)) +} + +func testAccONTAPFileSystemConfig_multiAZ1(rName string, throughput int, capacity int) string { + return acctest.ConfigCompose(testAccONTAPFileSystemConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_file_system" "test" { + storage_capacity = %[3]d + subnet_ids = aws_subnet.test[*].id + deployment_type = "MULTI_AZ_1" + ha_pairs = 1 + throughput_capacity_per_ha_pair = %[2]d + preferred_subnet_id = aws_subnet.test[0].id + + tags = { + Name = %[1]q + } +} +`, rName, throughput, capacity)) +} + +func testAccONTAPFileSystemConfig_singleAZ1(rName string, throughput int, capacity int) string { + return acctest.ConfigCompose(testAccONTAPFileSystemConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_file_system" "test" { + storage_capacity = %[3]d + subnet_ids = [aws_subnet.test[0].id] + deployment_type = "SINGLE_AZ_1" + ha_pairs = 1 + throughput_capacity_per_ha_pair = %[2]d + preferred_subnet_id = aws_subnet.test[0].id + + tags = { + Name = %[1]q + } +} +`, rName, throughput, capacity)) +} + +func testAccONTAPFileSystemConfig_singleAZ2(rName string, throughput int, capacity int, haPairs int) string { + return acctest.ConfigCompose(testAccONTAPFileSystemConfig_base(rName), fmt.Sprintf(` +resource "aws_fsx_ontap_file_system" "test" { + storage_capacity = %[3]d + subnet_ids = [aws_subnet.test[0].id] + deployment_type = "SINGLE_AZ_2" + ha_pairs = %[4]d + throughput_capacity_per_ha_pair = %[2]d + preferred_subnet_id = aws_subnet.test[0].id + + tags = { + Name = %[1]q + } +} +`, rName, throughput, capacity, haPairs)) +} + func testAccONTAPFileSystemConfig_haPair(rName string, capacity int) string { return acctest.ConfigCompose(testAccONTAPFileSystemConfig_base(rName), fmt.Sprintf(` resource "aws_fsx_ontap_file_system" "test" { diff --git a/internal/service/fsx/service_endpoint_resolver_gen.go b/internal/service/fsx/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..5b3ebdb7ac2 --- /dev/null +++ b/internal/service/fsx/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package fsx + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/fsx/service_endpoints_gen_test.go b/internal/service/fsx/service_endpoints_gen_test.go index 57de05fcec1..6ca13f8d7b9 100644 --- a/internal/service/fsx/service_endpoints_gen_test.go +++ b/internal/service/fsx/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(fsx_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(fsx_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/fsx/service_package_gen.go b/internal/service/fsx/service_package_gen.go index c26a77334f7..4b4ad114d64 100644 --- a/internal/service/fsx/service_package_gen.go +++ b/internal/service/fsx/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package fsx @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" fsx_sdkv1 "github.com/aws/aws-sdk-go/service/fsx" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -166,11 +165,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*f "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return fsx_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 53ec242108a..488be4ae0f7 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -26,6 +26,7 @@ func RegisterSweepers() { F: sweepLustreFileSystems, Dependencies: []string{ "aws_datasync_location", + "aws_m2_environment", }, }) @@ -35,6 +36,7 @@ func RegisterSweepers() { Dependencies: []string{ "aws_datasync_location", "aws_fsx_ontap_storage_virtual_machine", + "aws_m2_environment", }, }) @@ -57,6 +59,7 @@ func RegisterSweepers() { Dependencies: []string{ "aws_datasync_location", "aws_fsx_openzfs_volume", + "aws_m2_environment", }, }) @@ -70,6 +73,7 @@ func RegisterSweepers() { F: sweepWindowsFileSystems, Dependencies: []string{ "aws_datasync_location", + "aws_m2_environment", "aws_storagegateway_file_system_association", }, }) diff --git a/internal/service/gamelift/service_endpoint_resolver_gen.go b/internal/service/gamelift/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..74ac7d7ccab --- /dev/null +++ b/internal/service/gamelift/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package gamelift + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/gamelift/service_endpoints_gen_test.go b/internal/service/gamelift/service_endpoints_gen_test.go index 983de29fec6..beba1fc465e 100644 --- a/internal/service/gamelift/service_endpoints_gen_test.go +++ b/internal/service/gamelift/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(gamelift_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(gamelift_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/gamelift/service_package_gen.go b/internal/service/gamelift/service_package_gen.go index 150d7ffe265..92641c9fcbb 100644 --- a/internal/service/gamelift/service_package_gen.go +++ b/internal/service/gamelift/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package gamelift @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" gamelift_sdkv1 "github.com/aws/aws-sdk-go/service/gamelift" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -97,11 +96,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*g "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return gamelift_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/glacier/service_endpoint_resolver_gen.go b/internal/service/glacier/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..231b2af334a --- /dev/null +++ b/internal/service/glacier/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package glacier + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + glacier_sdkv2 "github.com/aws/aws-sdk-go-v2/service/glacier" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ glacier_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver glacier_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: glacier_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params glacier_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up glacier endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*glacier_sdkv2.Options) { + return func(o *glacier_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/glacier/service_endpoints_gen_test.go b/internal/service/glacier/service_endpoints_gen_test.go index b4d2fe7b024..1f3c37d8129 100644 --- a/internal/service/glacier/service_endpoints_gen_test.go +++ b/internal/service/glacier/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := glacier_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), glacier_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := glacier_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), glacier_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/glacier/service_package_gen.go b/internal/service/glacier/service_package_gen.go index 781609fddc9..a9c4186d699 100644 --- a/internal/service/glacier/service_package_gen.go +++ b/internal/service/glacier/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package glacier @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" glacier_sdkv2 "github.com/aws/aws-sdk-go-v2/service/glacier" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -52,19 +51,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*glacier_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return glacier_sdkv2.NewFromConfig(cfg, func(o *glacier_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return glacier_sdkv2.NewFromConfig(cfg, + glacier_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/glacier/tags_gen.go b/internal/service/glacier/tags_gen.go index 82f1ce5d4e2..f9884d700d9 100644 --- a/internal/service/glacier/tags_gen.go +++ b/internal/service/glacier/tags_gen.go @@ -80,12 +80,12 @@ func setTagsOut(ctx context.Context, tags map[string]string) { } // createTags creates glacier service tags for new resources. -func createTags(ctx context.Context, conn *glacier.Client, identifier string, tags map[string]string) error { +func createTags(ctx context.Context, conn *glacier.Client, identifier string, tags map[string]string, optFns ...func(*glacier.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, tags) + return updateTags(ctx, conn, identifier, nil, tags, optFns...) } // updateTags updates glacier service tags. diff --git a/internal/service/globalaccelerator/cross_account_attachment.go b/internal/service/globalaccelerator/cross_account_attachment.go index c6204cfc63a..436858c258b 100644 --- a/internal/service/globalaccelerator/cross_account_attachment.go +++ b/internal/service/globalaccelerator/cross_account_attachment.go @@ -85,6 +85,9 @@ func (r *crossAccountAttachmentResource) Schema(ctx context.Context, request res names.AttrRegion: schema.StringAttribute{ Optional: true, }, + names.AttrCIDRBlock: schema.StringAttribute{ + Optional: true, + }, }, }, }, @@ -213,17 +216,19 @@ func (r *crossAccountAttachmentResource) Update(ctx context.Context, request res } add, remove, _ := flex.DiffSlices(oldResources, newResources, func(v1, v2 *resourceModel) bool { - return v1.EndpointID.Equal(v2.EndpointID) && v1.Region.Equal(v2.Region) + return v1.Cidr.Equal(v2.Cidr) && v1.EndpointID.Equal(v2.EndpointID) && v1.Region.Equal(v2.Region) }) input.AddResources = tfslices.ApplyToAll(add, func(v *resourceModel) awstypes.Resource { return awstypes.Resource{ + Cidr: fwflex.StringFromFramework(ctx, v.Cidr), EndpointId: fwflex.StringFromFramework(ctx, v.EndpointID), Region: fwflex.StringFromFramework(ctx, v.Region), } }) input.RemoveResources = tfslices.ApplyToAll(remove, func(v *resourceModel) awstypes.Resource { return awstypes.Resource{ + Cidr: fwflex.StringFromFramework(ctx, v.Cidr), EndpointId: fwflex.StringFromFramework(ctx, v.EndpointID), Region: fwflex.StringFromFramework(ctx, v.Region), } @@ -322,6 +327,7 @@ func (m *crossAccountAttachmentResourceModel) setID() { } type resourceModel struct { + Cidr types.String `tfsdk:"cidr_block"` EndpointID types.String `tfsdk:"endpoint_id"` Region types.String `tfsdk:"region"` } diff --git a/internal/service/globalaccelerator/service_endpoint_resolver_gen.go b/internal/service/globalaccelerator/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f278b6c4634 --- /dev/null +++ b/internal/service/globalaccelerator/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package globalaccelerator + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + globalaccelerator_sdkv2 "github.com/aws/aws-sdk-go-v2/service/globalaccelerator" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ globalaccelerator_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver globalaccelerator_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: globalaccelerator_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params globalaccelerator_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up globalaccelerator endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*globalaccelerator_sdkv2.Options) { + return func(o *globalaccelerator_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/globalaccelerator/service_endpoints_gen_test.go b/internal/service/globalaccelerator/service_endpoints_gen_test.go index 3d082d2d207..0f3672ed127 100644 --- a/internal/service/globalaccelerator/service_endpoints_gen_test.go +++ b/internal/service/globalaccelerator/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -243,24 +245,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := globalaccelerator_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), globalaccelerator_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := globalaccelerator_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), globalaccelerator_sdkv2.EndpointParameters{ @@ -268,14 +270,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/globalaccelerator/service_package.go b/internal/service/globalaccelerator/service_package.go index 2f4a49d2f38..6627b43d968 100644 --- a/internal/service/globalaccelerator/service_package.go +++ b/internal/service/globalaccelerator/service_package.go @@ -16,22 +16,20 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*globalaccelerator.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return globalaccelerator.NewFromConfig(cfg, func(o *globalaccelerator.Options) { - if config["partition"].(string) == names.StandardPartitionID { - // Global Accelerator endpoint is only available in AWS Commercial us-west-2 Region. - o.Region = names.USWest2RegionID - } - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled + return globalaccelerator.NewFromConfig(cfg, + globalaccelerator.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *globalaccelerator.Options) { + if config["partition"].(string) == names.StandardPartitionID { + // Global Accelerator endpoint is only available in AWS Commercial us-west-2 Region. + if cfg.Region != names.USWest2RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USWest2RegionID, + }) + o.Region = names.USWest2RegionID + } } - } - }), nil + }, + ), nil } diff --git a/internal/service/globalaccelerator/service_package_gen.go b/internal/service/globalaccelerator/service_package_gen.go index c6b631d8166..2818d999b3d 100644 --- a/internal/service/globalaccelerator/service_package_gen.go +++ b/internal/service/globalaccelerator/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package globalaccelerator diff --git a/internal/service/glue/job.go b/internal/service/glue/job.go index b197c026e37..ca55693b5e3 100644 --- a/internal/service/glue/job.go +++ b/internal/service/glue/job.go @@ -119,6 +119,10 @@ func ResourceJob() *schema.Resource { Computed: true, ConflictsWith: []string{"number_of_workers", "worker_type"}, }, + "maintenance_window": { + Type: schema.TypeString, + Optional: true, + }, "max_retries": { Type: schema.TypeInt, Optional: true, @@ -227,6 +231,10 @@ func resourceJobCreate(ctx context.Context, d *schema.ResourceData, meta interfa input.MaxCapacity = aws.Float64(v.(float64)) } + if v, ok := d.GetOk("maintenance_window"); ok { + input.MaintenanceWindow = aws.String(v.(string)) + } + if v, ok := d.GetOk("max_retries"); ok { input.MaxRetries = aws.Int64(int64(v.(int))) } @@ -303,6 +311,7 @@ func resourceJobRead(ctx context.Context, d *schema.ResourceData, meta interface return sdkdiag.AppendErrorf(diags, "setting execution_property: %s", err) } d.Set("glue_version", job.GlueVersion) + d.Set("maintenance_window", job.MaintenanceWindow) d.Set(names.AttrMaxCapacity, job.MaxCapacity) d.Set("max_retries", job.MaxRetries) d.Set(names.AttrName, job.Name) @@ -355,6 +364,10 @@ func resourceJobUpdate(ctx context.Context, d *schema.ResourceData, meta interfa jobUpdate.GlueVersion = aws.String(v.(string)) } + if v, ok := d.GetOk("maintenance_window"); ok { + jobUpdate.MaintenanceWindow = aws.String(v.(string)) + } + if v, ok := d.GetOk("max_retries"); ok { jobUpdate.MaxRetries = aws.Int64(int64(v.(int))) } diff --git a/internal/service/glue/job_test.go b/internal/service/glue/job_test.go index 8f4fca1eb6f..a7748baeab6 100644 --- a/internal/service/glue/job_test.go +++ b/internal/service/glue/job_test.go @@ -388,6 +388,35 @@ func TestAccGlueJob_executionProperty(t *testing.T) { }) } +func TestAccGlueJob_maintenanceWindow(t *testing.T) { + ctx := acctest.Context(t) + var job glue.Job + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_glue_job.test" + maintenanceWindow := "Sun:23" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.GlueServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckJobDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccJobConfig_maintenanceWindow(rName, maintenanceWindow), + Check: resource.ComposeTestCheckFunc( + testAccCheckJobExists(ctx, resourceName, &job), + resource.TestCheckResourceAttr(resourceName, "maintenance_window", "Sun:23"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccGlueJob_maxRetries(t *testing.T) { ctx := acctest.Context(t) var job glue.Job @@ -1042,6 +1071,23 @@ resource "aws_glue_job" "test" { `, rName, maxConcurrentRuns)) } +func testAccJobConfig_maintenanceWindow(rName, maintenanceWindow string) string { + return acctest.ConfigCompose(testAccJobConfig_base(rName), fmt.Sprintf(` +resource "aws_glue_job" "test" { + name = %[2]q + role_arn = aws_iam_role.test.arn + maintenance_window = %[2]q + + command { + name = "gluestreaming" + script_location = "testscriptlocation" + } + + depends_on = [aws_iam_role_policy_attachment.test] +} +`, rName, maintenanceWindow)) +} + func testAccJobConfig_maxRetries(rName string, maxRetries int) string { return acctest.ConfigCompose(testAccJobConfig_base(rName), fmt.Sprintf(` resource "aws_glue_job" "test" { @@ -1115,10 +1161,8 @@ resource "aws_glue_job" "test" { func testAccJobConfig_tags1(rName, tagKey1, tagValue1 string) string { return acctest.ConfigCompose(testAccJobConfig_base(rName), fmt.Sprintf(` resource "aws_glue_job" "test" { - name = %[1]q - number_of_workers = 1 - role_arn = aws_iam_role.test.arn - worker_type = "Standard" + name = %[1]q + role_arn = aws_iam_role.test.arn command { script_location = "testscriptlocation" diff --git a/internal/service/glue/service_endpoint_resolver_gen.go b/internal/service/glue/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..2842a690a24 --- /dev/null +++ b/internal/service/glue/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package glue + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/glue/service_endpoints_gen_test.go b/internal/service/glue/service_endpoints_gen_test.go index af9a2c56ba1..ce4b40e444b 100644 --- a/internal/service/glue/service_endpoints_gen_test.go +++ b/internal/service/glue/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(glue_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(glue_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/glue/service_package_gen.go b/internal/service/glue/service_package_gen.go index 06d04b14b01..111dd5b979a 100644 --- a/internal/service/glue/service_package_gen.go +++ b/internal/service/glue/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package glue @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" glue_sdkv1 "github.com/aws/aws-sdk-go/service/glue" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -186,11 +185,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*g "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return glue_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/grafana/errors.go b/internal/service/grafana/errors.go new file mode 100644 index 00000000000..f85780964d0 --- /dev/null +++ b/internal/service/grafana/errors.go @@ -0,0 +1,29 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grafana + +import ( + "errors" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" +) + +func updatesError(apiObjects []awstypes.UpdateError) error { + errs := tfslices.ApplyToAll(apiObjects, func(v awstypes.UpdateError) error { + return updateError(&v) + }) + + return errors.Join(errs...) +} + +func updateError(apiObject *awstypes.UpdateError) error { + if apiObject == nil { + return nil + } + + return fmt.Errorf("%d: %s", aws.ToInt32(apiObject.Code), aws.ToString(apiObject.Message)) +} diff --git a/internal/service/grafana/exports_test.go b/internal/service/grafana/exports_test.go new file mode 100644 index 00000000000..9e966b71853 --- /dev/null +++ b/internal/service/grafana/exports_test.go @@ -0,0 +1,20 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grafana + +// Exports for use in tests only. +var ( + ResourceWorkspace = resourceWorkspace + ResourceWorkspaceAPIKey = resourceWorkspaceAPIKey + ResourceWorkspaceSAMLConfiguration = resourceWorkspaceSAMLConfiguration + ResourceWorkspaceServiceAccount = newWorkspaceServiceAccountResource + ResourceWorkspaceServiceAccountToken = newWorkspaceServiceAccountTokenResource + + FindLicensedWorkspaceByID = findLicensedWorkspaceByID + FindRoleAssociationsByTwoPartKey = findRoleAssociationsByTwoPartKey + FindSAMLConfigurationByID = findSAMLConfigurationByID + FindWorkspaceByID = findWorkspaceByID + FindWorkspaceServiceAccountByTwoPartKey = findWorkspaceServiceAccountByTwoPartKey + FindWorkspaceServiceAccountTokenByThreePartKey = findWorkspaceServiceAccountTokenByThreePartKey +) diff --git a/internal/service/grafana/find.go b/internal/service/grafana/find.go deleted file mode 100644 index eb997aa21f0..00000000000 --- a/internal/service/grafana/find.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grafana - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindLicensedWorkspaceByID(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string) (*managedgrafana.WorkspaceDescription, error) { - output, err := FindWorkspaceByID(ctx, conn, id) - - if err != nil { - return nil, err - } - - if output.LicenseType == nil { - return nil, &retry.NotFoundError{} - } - - return output, nil -} - -func FindWorkspaceByID(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string) (*managedgrafana.WorkspaceDescription, error) { - input := &managedgrafana.DescribeWorkspaceInput{ - WorkspaceId: aws.String(id), - } - - output, err := conn.DescribeWorkspaceWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, managedgrafana.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Workspace == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.Workspace, nil -} - -func FindSamlConfigurationByID(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string) (*managedgrafana.SamlAuthentication, error) { - input := &managedgrafana.DescribeWorkspaceAuthenticationInput{ - WorkspaceId: aws.String(id), - } - - output, err := conn.DescribeWorkspaceAuthenticationWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, managedgrafana.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.Authentication == nil || output.Authentication.Saml == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if status := aws.StringValue(output.Authentication.Saml.Status); status == managedgrafana.SamlConfigurationStatusNotConfigured { - return nil, &retry.NotFoundError{ - Message: status, - LastRequest: input, - } - } - - return output.Authentication.Saml, nil -} - -func FindRoleAssociationsByRoleAndWorkspaceID(ctx context.Context, conn *managedgrafana.ManagedGrafana, role string, workspaceID string) (map[string][]string, error) { - input := &managedgrafana.ListPermissionsInput{ - WorkspaceId: aws.String(workspaceID), - } - output := make(map[string][]string, 0) - - err := conn.ListPermissionsPagesWithContext(ctx, input, func(page *managedgrafana.ListPermissionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Permissions { - if aws.StringValue(v.Role) == role { - userType := aws.StringValue(v.User.Type) - output[userType] = append(output[userType], aws.StringValue(v.User.Id)) - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, managedgrafana.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if len(output) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} diff --git a/internal/service/grafana/generate.go b/internal/service/grafana/generate.go index 488ce62ad19..a815e047756 100644 --- a/internal/service/grafana/generate.go +++ b/internal/service/grafana/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ServiceTagsMap -UpdateTags -KVTValues -SkipTypesImp //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/grafana/license_association.go b/internal/service/grafana/license_association.go index 4836db77876..1a5aabf5150 100644 --- a/internal/service/grafana/license_association.go +++ b/internal/service/grafana/license_association.go @@ -8,19 +8,21 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/grafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_grafana_license_association") -func ResourceLicenseAssociation() *schema.Resource { +// @SDKResource("aws_grafana_license_association", name="License Association") +func resourceLicenseAssociation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLicenseAssociationCreate, ReadWithoutTimeout: resourceLicenseAssociationRead, @@ -45,10 +47,10 @@ func ResourceLicenseAssociation() *schema.Resource { Computed: true, }, "license_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(managedgrafana.LicenseType_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.LicenseType](), }, "workspace_id": { Type: schema.TypeString, @@ -61,21 +63,21 @@ func ResourceLicenseAssociation() *schema.Resource { func resourceLicenseAssociationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) - input := &managedgrafana.AssociateLicenseInput{ - LicenseType: aws.String(d.Get("license_type").(string)), - WorkspaceId: aws.String(d.Get("workspace_id").(string)), + workspaceID := d.Get("workspace_id").(string) + input := &grafana.AssociateLicenseInput{ + LicenseType: awstypes.LicenseType(d.Get("license_type").(string)), + WorkspaceId: aws.String(workspaceID), } - log.Printf("[DEBUG] Creating Grafana License Association: %s", input) - output, err := conn.AssociateLicenseWithContext(ctx, input) + output, err := conn.AssociateLicense(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Grafana License Association: %s", err) + return sdkdiag.AppendErrorf(diags, "creating Grafana License Association (%s): %s", workspaceID, err) } - d.SetId(aws.StringValue(output.Workspace.Id)) + d.SetId(aws.ToString(output.Workspace.Id)) if _, err := waitLicenseAssociationCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Grafana License Association (%s) create: %s", d.Id(), err) @@ -86,9 +88,9 @@ func resourceLicenseAssociationCreate(ctx context.Context, d *schema.ResourceDat func resourceLicenseAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) - workspace, err := FindLicensedWorkspaceByID(ctx, conn, d.Id()) + workspace, err := findLicensedWorkspaceByID(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] Grafana License Association (%s) not found, removing from state", d.Id()) @@ -118,15 +120,15 @@ func resourceLicenseAssociationRead(ctx context.Context, d *schema.ResourceData, func resourceLicenseAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) log.Printf("[DEBUG] Deleting Grafana License Association: %s", d.Id()) - _, err := conn.DisassociateLicenseWithContext(ctx, &managedgrafana.DisassociateLicenseInput{ - LicenseType: aws.String(d.Get("license_type").(string)), + _, err := conn.DisassociateLicense(ctx, &grafana.DisassociateLicenseInput{ + LicenseType: awstypes.LicenseType(d.Get("license_type").(string)), WorkspaceId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, managedgrafana.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -140,3 +142,34 @@ func resourceLicenseAssociationDelete(ctx context.Context, d *schema.ResourceDat return diags } + +func findLicensedWorkspaceByID(ctx context.Context, conn *grafana.Client, id string) (*awstypes.WorkspaceDescription, error) { + output, err := findWorkspaceByID(ctx, conn, id) + + if err != nil { + return nil, err + } + + if output.LicenseType == "" { + return nil, &retry.NotFoundError{} + } + + return output, nil +} + +func waitLicenseAssociationCreated(ctx context.Context, conn *grafana.Client, id string, timeout time.Duration) (*awstypes.WorkspaceDescription, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.WorkspaceStatusUpgrading), + Target: enum.Slice(awstypes.WorkspaceStatusActive), + Refresh: statusWorkspace(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.WorkspaceDescription); ok { + return output, err + } + + return nil, err +} diff --git a/internal/service/grafana/license_association_test.go b/internal/service/grafana/license_association_test.go index 5e1052aeb02..5cae0e194f6 100644 --- a/internal/service/grafana/license_association_test.go +++ b/internal/service/grafana/license_association_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/managedgrafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -20,23 +20,25 @@ import ( ) func testAccLicenseAssociation_freeTrial(t *testing.T) { + acctest.Skip(t, "ENTERPRISE_FREE_TRIAL has been deprecated and is no longer offered") + ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_license_association.test" workspaceResourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckLicenseAssociationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { - Config: testAccLicenseAssociationConfig_basic(rName, managedgrafana.LicenseTypeEnterpriseFreeTrial), + Config: testAccLicenseAssociationConfig_basic(rName, string(awstypes.LicenseTypeEnterpriseFreeTrial)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckLicenseAssociationExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, "free_trial_expiration"), - resource.TestCheckResourceAttr(resourceName, "license_type", managedgrafana.LicenseTypeEnterpriseFreeTrial), + resource.TestCheckResourceAttr(resourceName, "license_type", string(awstypes.LicenseTypeEnterpriseFreeTrial)), resource.TestCheckResourceAttrPair(resourceName, "workspace_id", workspaceResourceName, names.AttrID), ), }, @@ -65,11 +67,7 @@ func testAccCheckLicenseAssociationExists(ctx context.Context, name string) reso return fmt.Errorf("Not found: %s", name) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Grafana Workspace ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) _, err := tfgrafana.FindLicensedWorkspaceByID(ctx, conn, rs.Primary.ID) @@ -79,7 +77,7 @@ func testAccCheckLicenseAssociationExists(ctx context.Context, name string) reso func testAccCheckLicenseAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_grafana_license_association" { diff --git a/internal/service/grafana/role_association.go b/internal/service/grafana/role_association.go index 5707f095413..aae7c5d14df 100644 --- a/internal/service/grafana/role_association.go +++ b/internal/service/grafana/role_association.go @@ -9,19 +9,22 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/grafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_grafana_role_association") -func ResourceRoleAssociation() *schema.Resource { +// @SDKResource("aws_grafana_role_association", name="Workspace Role Association") +func resourceRoleAssociation() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRoleAssociationUpsert, ReadWithoutTimeout: resourceRoleAssociationRead, @@ -59,67 +62,50 @@ func ResourceRoleAssociation() *schema.Resource { func resourceRoleAssociationUpsert(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) - role := d.Get(names.AttrRole).(string) + role := awstypes.Role(d.Get(names.AttrRole).(string)) workspaceID := d.Get("workspace_id").(string) + id := fmt.Sprintf("%s/%s", workspaceID, role) - updateInstructions := make([]*managedgrafana.UpdateInstruction, 0) + updateInstructions := make([]awstypes.UpdateInstruction, 0) if v, ok := d.GetOk("user_ids"); ok && v.(*schema.Set).Len() > 0 { - typeSsoUser := managedgrafana.UserTypeSsoUser - updateInstructions = populateUpdateInstructions(role, flex.ExpandStringSet(v.(*schema.Set)), managedgrafana.UpdateActionAdd, typeSsoUser, updateInstructions) + updateInstructions = populateUpdateInstructions(role, flex.ExpandStringSet(v.(*schema.Set)), awstypes.UpdateActionAdd, awstypes.UserTypeSsoUser, updateInstructions) } if v, ok := d.GetOk("group_ids"); ok && v.(*schema.Set).Len() > 0 { - typeSsoUser := managedgrafana.UserTypeSsoGroup - updateInstructions = populateUpdateInstructions(role, flex.ExpandStringSet(v.(*schema.Set)), managedgrafana.UpdateActionAdd, typeSsoUser, updateInstructions) + updateInstructions = populateUpdateInstructions(role, flex.ExpandStringSet(v.(*schema.Set)), awstypes.UpdateActionAdd, awstypes.UserTypeSsoGroup, updateInstructions) } - input := &managedgrafana.UpdatePermissionsInput{ + input := &grafana.UpdatePermissionsInput{ UpdateInstructionBatch: updateInstructions, WorkspaceId: aws.String(workspaceID), } - log.Printf("[DEBUG] Creating Grafana Workspace Role Association: %s", input) - response, err := conn.UpdatePermissionsWithContext(ctx, input) + output, err := conn.UpdatePermissions(ctx, input) - for _, updateError := range response.Errors { - return sdkdiag.AppendErrorf(diags, "creating Grafana Workspace Role Association: %s", aws.StringValue(updateError.Message)) + if err == nil { + err = updatesError(output.Errors) } if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Grafana Workspace Role Association: %s", err) + return sdkdiag.AppendErrorf(diags, "updating Grafana Workspace Role Association (%s): %s", id, err) } - if d.Id() == "" { - d.SetId(fmt.Sprintf("%s/%s", workspaceID, role)) + if d.IsNewResource() { + d.SetId(id) } return append(diags, resourceRoleAssociationRead(ctx, d, meta)...) } -func populateUpdateInstructions(role string, list []*string, action string, typeSsoUser string, updateInstructions []*managedgrafana.UpdateInstruction) []*managedgrafana.UpdateInstruction { - users := make([]*managedgrafana.User, len(list)) - for i := 0; i < len(users); i++ { - users[i] = &managedgrafana.User{ - Id: list[i], - Type: aws.String(typeSsoUser), - } - } - updateInstructions = append(updateInstructions, &managedgrafana.UpdateInstruction{ - Action: aws.String(action), - Role: aws.String(role), - Users: users, - }) - - return updateInstructions -} - func resourceRoleAssociationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) - roleAssociations, err := FindRoleAssociationsByRoleAndWorkspaceID(ctx, conn, d.Get(names.AttrRole).(string), d.Get("workspace_id").(string)) + role := awstypes.Role(d.Get(names.AttrRole).(string)) + workspaceID := d.Get("workspace_id").(string) + roleAssociations, err := findRoleAssociationsByTwoPartKey(ctx, conn, role, workspaceID) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Grafana Workspace Role Association %s not found, removing from state", d.Id()) @@ -131,34 +117,39 @@ func resourceRoleAssociationRead(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "reading Grafana Workspace Role Association (%s): %s", d.Id(), err) } - d.Set("group_ids", roleAssociations[managedgrafana.UserTypeSsoGroup]) - d.Set("user_ids", roleAssociations[managedgrafana.UserTypeSsoUser]) + d.Set("group_ids", roleAssociations[awstypes.UserTypeSsoGroup]) + d.Set("user_ids", roleAssociations[awstypes.UserTypeSsoUser]) return diags } func resourceRoleAssociationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) + + role := awstypes.Role(d.Get(names.AttrRole).(string)) + workspaceID := d.Get("workspace_id").(string) - updateInstructions := make([]*managedgrafana.UpdateInstruction, 0) + updateInstructions := make([]awstypes.UpdateInstruction, 0) if v, ok := d.GetOk("user_ids"); ok && v.(*schema.Set).Len() > 0 { - typeSsoUser := managedgrafana.UserTypeSsoUser - updateInstructions = populateUpdateInstructions(d.Get(names.AttrRole).(string), flex.ExpandStringSet(v.(*schema.Set)), managedgrafana.UpdateActionRevoke, typeSsoUser, updateInstructions) + updateInstructions = populateUpdateInstructions(role, flex.ExpandStringSet(v.(*schema.Set)), awstypes.UpdateActionRevoke, awstypes.UserTypeSsoUser, updateInstructions) } if v, ok := d.GetOk("group_ids"); ok && v.(*schema.Set).Len() > 0 { - typeSsoUser := managedgrafana.UserTypeSsoGroup - updateInstructions = populateUpdateInstructions(d.Get(names.AttrRole).(string), flex.ExpandStringSet(v.(*schema.Set)), managedgrafana.UpdateActionRevoke, typeSsoUser, updateInstructions) + updateInstructions = populateUpdateInstructions(role, flex.ExpandStringSet(v.(*schema.Set)), awstypes.UpdateActionRevoke, awstypes.UserTypeSsoGroup, updateInstructions) } - input := &managedgrafana.UpdatePermissionsInput{ + input := &grafana.UpdatePermissionsInput{ UpdateInstructionBatch: updateInstructions, - WorkspaceId: aws.String(d.Get("workspace_id").(string)), + WorkspaceId: aws.String(workspaceID), } - log.Printf("[DEBUG] Deleting Grafana Workspace Role Association: %s", input) - _, err := conn.UpdatePermissionsWithContext(ctx, input) + log.Printf("[DEBUG] Deleting Grafana Workspace Role Association: %s", d.Id()) + output, err := conn.UpdatePermissions(ctx, input) + + if err == nil { + err = updatesError(output.Errors) + } if err != nil { return sdkdiag.AppendErrorf(diags, "deleting Grafana Workspace Role Association: %s", err) @@ -166,3 +157,56 @@ func resourceRoleAssociationDelete(ctx context.Context, d *schema.ResourceData, return diags } + +func findRoleAssociationsByTwoPartKey(ctx context.Context, conn *grafana.Client, role awstypes.Role, workspaceID string) (map[awstypes.UserType][]string, error) { + input := &grafana.ListPermissionsInput{ + WorkspaceId: aws.String(workspaceID), + } + output := make(map[awstypes.UserType][]string, 0) + + pages := grafana.NewListPermissionsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Permissions { + if v.Role == role { + userType := v.User.Type + output[userType] = append(output[userType], aws.ToString(v.User.Id)) + } + } + } + + if len(output) == 0 { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func populateUpdateInstructions(role awstypes.Role, list []*string, action awstypes.UpdateAction, typeSSOUser awstypes.UserType, updateInstructions []awstypes.UpdateInstruction) []awstypes.UpdateInstruction { + users := make([]awstypes.User, len(list)) + for i := 0; i < len(users); i++ { + users[i] = awstypes.User{ + Id: list[i], + Type: typeSSOUser, + } + } + updateInstructions = append(updateInstructions, awstypes.UpdateInstruction{ + Action: action, + Role: role, + Users: users, + }) + + return updateInstructions +} diff --git a/internal/service/grafana/role_association_test.go b/internal/service/grafana/role_association_test.go index cca20416025..1b9c0a8ff46 100644 --- a/internal/service/grafana/role_association_test.go +++ b/internal/service/grafana/role_association_test.go @@ -9,7 +9,7 @@ import ( "os" "testing" - "github.com/aws/aws-sdk-go/service/managedgrafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -28,7 +28,7 @@ func testAccRoleAssociation_usersAdmin(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - role := managedgrafana.RoleAdmin + role := string(awstypes.RoleAdmin) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_role_association.test" workspaceResourceName := "aws_grafana_workspace.test" @@ -37,7 +37,7 @@ func testAccRoleAssociation_usersAdmin(t *testing.T) { resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) acctest.PreCheckSSOAdminInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), @@ -66,7 +66,7 @@ func testAccRoleAssociation_usersEditor(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - role := managedgrafana.RoleEditor + role := string(awstypes.RoleEditor) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_role_association.test" workspaceResourceName := "aws_grafana_workspace.test" @@ -75,7 +75,7 @@ func testAccRoleAssociation_usersEditor(t *testing.T) { resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) acctest.PreCheckSSOAdminInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), @@ -104,7 +104,7 @@ func testAccRoleAssociation_groupsAdmin(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - role := managedgrafana.RoleAdmin + role := string(awstypes.RoleAdmin) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_role_association.test" workspaceResourceName := "aws_grafana_workspace.test" @@ -113,7 +113,7 @@ func testAccRoleAssociation_groupsAdmin(t *testing.T) { resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) acctest.PreCheckSSOAdminInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), @@ -142,7 +142,7 @@ func testAccRoleAssociation_groupsEditor(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - role := managedgrafana.RoleEditor + role := string(awstypes.RoleEditor) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_role_association.test" workspaceResourceName := "aws_grafana_workspace.test" @@ -151,7 +151,7 @@ func testAccRoleAssociation_groupsEditor(t *testing.T) { resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) acctest.PreCheckSSOAdminInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), @@ -185,7 +185,7 @@ func testAccRoleAssociation_usersAndGroupsAdmin(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - role := managedgrafana.RoleAdmin + role := string(awstypes.RoleAdmin) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_role_association.test" workspaceResourceName := "aws_grafana_workspace.test" @@ -194,7 +194,7 @@ func testAccRoleAssociation_usersAndGroupsAdmin(t *testing.T) { resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) acctest.PreCheckSSOAdminInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), @@ -230,7 +230,7 @@ func testAccRoleAssociation_usersAndGroupsEditor(t *testing.T) { t.Skipf("Environment variable %s is not set", key) } - role := managedgrafana.RoleEditor + role := string(awstypes.RoleEditor) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_role_association.test" workspaceResourceName := "aws_grafana_workspace.test" @@ -239,7 +239,7 @@ func testAccRoleAssociation_usersAndGroupsEditor(t *testing.T) { resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) acctest.PreCheckSSOAdminInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), @@ -300,9 +300,9 @@ func testAccCheckRoleAssociationExists(ctx context.Context, n string) resource.T return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) - _, err := tfgrafana.FindRoleAssociationsByRoleAndWorkspaceID(ctx, conn, rs.Primary.Attributes[names.AttrRole], rs.Primary.Attributes["workspace_id"]) + _, err := tfgrafana.FindRoleAssociationsByTwoPartKey(ctx, conn, awstypes.Role(rs.Primary.Attributes[names.AttrRole]), rs.Primary.Attributes["workspace_id"]) return err } @@ -310,14 +310,14 @@ func testAccCheckRoleAssociationExists(ctx context.Context, n string) resource.T func testAccCheckRoleAssociationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_grafana_role_association" { continue } - _, err := tfgrafana.FindRoleAssociationsByRoleAndWorkspaceID(ctx, conn, rs.Primary.Attributes[names.AttrRole], rs.Primary.Attributes["workspace_id"]) + _, err := tfgrafana.FindRoleAssociationsByTwoPartKey(ctx, conn, awstypes.Role(rs.Primary.Attributes[names.AttrRole]), rs.Primary.Attributes["workspace_id"]) if tfresource.NotFound(err) { continue diff --git a/internal/service/grafana/service_endpoint_resolver_gen.go b/internal/service/grafana/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..0b615b5eb0e --- /dev/null +++ b/internal/service/grafana/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package grafana + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + grafana_sdkv2 "github.com/aws/aws-sdk-go-v2/service/grafana" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ grafana_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver grafana_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: grafana_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params grafana_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up grafana endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*grafana_sdkv2.Options) { + return func(o *grafana_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/grafana/service_endpoints_gen_test.go b/internal/service/grafana/service_endpoints_gen_test.go index 1a0b1b8a698..97e728f6c05 100644 --- a/internal/service/grafana/service_endpoints_gen_test.go +++ b/internal/service/grafana/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package grafana_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - managedgrafana_sdkv1 "github.com/aws/aws-sdk-go/service/managedgrafana" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + grafana_sdkv2 "github.com/aws/aws-sdk-go-v2/service/grafana" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/go-cty/cty" @@ -90,7 +95,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -330,7 +335,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -351,55 +356,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := grafana_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(managedgrafana_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), grafana_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := grafana_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(managedgrafana_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), grafana_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.GrafanaConn(ctx) - - req, _ := client.ListWorkspacesRequest(&managedgrafana_sdkv1.ListWorkspacesInput{}) + client := meta.GrafanaClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListWorkspaces(ctx, &grafana_sdkv2.ListWorkspacesInput{}, + func(opts *grafana_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -466,16 +480,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -600,6 +636,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/grafana/service_package_gen.go b/internal/service/grafana/service_package_gen.go index ecdf48b4634..4bd1a7d34b9 100644 --- a/internal/service/grafana/service_package_gen.go +++ b/internal/service/grafana/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package grafana import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - managedgrafana_sdkv1 "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + grafana_sdkv2 "github.com/aws/aws-sdk-go-v2/service/grafana" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -22,14 +19,25 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { - return []*types.ServicePackageFrameworkResource{} + return []*types.ServicePackageFrameworkResource{ + { + Factory: newWorkspaceServiceAccountResource, + Name: "Workspace Service Account", + }, + { + Factory: newWorkspaceServiceAccountTokenResource, + Name: "Workspace Service Account Token", + }, + } } func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceWorkspace, + Factory: dataSourceWorkspace, TypeName: "aws_grafana_workspace", + Name: "Workspace", + Tags: &types.ServicePackageResourceTags{}, }, } } @@ -37,15 +45,17 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceLicenseAssociation, + Factory: resourceLicenseAssociation, TypeName: "aws_grafana_license_association", + Name: "License Association", }, { - Factory: ResourceRoleAssociation, + Factory: resourceRoleAssociation, TypeName: "aws_grafana_role_association", + Name: "Workspace Role Association", }, { - Factory: ResourceWorkspace, + Factory: resourceWorkspace, TypeName: "aws_grafana_workspace", Name: "Workspace", Tags: &types.ServicePackageResourceTags{ @@ -53,12 +63,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceWorkspaceAPIKey, + Factory: resourceWorkspaceAPIKey, TypeName: "aws_grafana_workspace_api_key", + Name: "Workspace API Key", }, { - Factory: ResourceWorkspaceSAMLConfiguration, + Factory: resourceWorkspaceSAMLConfiguration, TypeName: "aws_grafana_workspace_saml_configuration", + Name: "Grafana Workspace SAML Configuration", }, } } @@ -67,25 +79,14 @@ func (p *servicePackage) ServicePackageName() string { return names.Grafana } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*managedgrafana_sdkv1.ManagedGrafana, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*grafana_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return managedgrafana_sdkv1.New(sess.Copy(&cfg)), nil + return grafana_sdkv2.NewFromConfig(cfg, + grafana_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/grafana/status.go b/internal/service/grafana/status.go deleted file mode 100644 index 1d42b9e430d..00000000000 --- a/internal/service/grafana/status.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grafana - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func statusWorkspaceStatus(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindWorkspaceByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} - -func statusWorkspaceSAMLConfiguration(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindSamlConfigurationByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.Status), nil - } -} diff --git a/internal/service/grafana/sweep.go b/internal/service/grafana/sweep.go index 8a6605113a6..65f7e31cd83 100644 --- a/internal/service/grafana/sweep.go +++ b/internal/service/grafana/sweep.go @@ -7,12 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/hashicorp/go-multierror" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/grafana" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -28,48 +27,37 @@ func sweepWorkSpaces(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.GrafanaConn(ctx) - + input := &grafana.ListWorkspacesInput{} + conn := client.GrafanaClient(ctx) sweepResources := make([]sweep.Sweepable, 0) - var errs *multierror.Error - input := &managedgrafana.ListWorkspacesInput{} + pages := grafana.NewListWorkspacesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - err = conn.ListWorkspacesPagesWithContext(ctx, input, func(page *managedgrafana.ListWorkspacesOutput, lastPage bool) bool { - if len(page.Workspaces) == 0 { - log.Printf("[INFO] No Grafana Workspaces to sweep") - return false + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Grafana Workspace sweep for %s: %s", region, err) + return nil } - for _, workspace := range page.Workspaces { - id := aws.StringValue(workspace.Id) - log.Printf("[INFO] Deleting Grafana Workspace: %s", id) - r := ResourceWorkspace() - d := r.Data(nil) - d.SetId(id) - if err != nil { - err := fmt.Errorf("reading Grafana Workspace (%s): %w", id, err) - errs = multierror.Append(errs, err) - continue - } + if err != nil { + return fmt.Errorf("error listing Grafana Workspaces (%s): %w", region, err) + } + + for _, v := range page.Workspaces { + r := resourceWorkspace() + d := r.Data(nil) + d.SetId(aws.ToString(v.Id)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("listing Grafana Workspace for %s: %w", region, err)) } - if err := sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("sweeping Grafana Workspace for %s: %w", region, err)) - } + err = sweep.SweepOrchestrator(ctx, sweepResources) - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Grafana Workspace sweep for %s: %s", region, errs) - return nil + if err != nil { + return fmt.Errorf("error sweeping Grafana Workspaces (%s): %w", region, err) } - return errs.ErrorOrNil() + return nil } diff --git a/internal/service/grafana/tags_gen.go b/internal/service/grafana/tags_gen.go index 5dd8becdadf..4492baba2e0 100644 --- a/internal/service/grafana/tags_gen.go +++ b/internal/service/grafana/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/aws/aws-sdk-go/service/managedgrafana/managedgrafanaiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/grafana" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists grafana service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn managedgrafanaiface.ManagedGrafanaAPI, identifier string) (tftags.KeyValueTags, error) { - input := &managedgrafana.ListTagsForResourceInput{ +func listTags(ctx context.Context, conn *grafana.Client, identifier string, optFns ...func(*grafana.Options)) (tftags.KeyValueTags, error) { + input := &grafana.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn managedgrafanaiface.ManagedGrafanaAPI, i // ListTags lists grafana service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).GrafanaConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).GrafanaClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns grafana service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from grafana service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns grafana service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets grafana service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates grafana service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn managedgrafanaiface.ManagedGrafanaAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *grafana.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*grafana.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -92,12 +91,12 @@ func updateTags(ctx context.Context, conn managedgrafanaiface.ManagedGrafanaAPI, removedTags := oldTags.Removed(newTags) removedTags = removedTags.IgnoreSystem(names.Grafana) if len(removedTags) > 0 { - input := &managedgrafana.UntagResourceInput{ + input := &grafana.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -107,12 +106,12 @@ func updateTags(ctx context.Context, conn managedgrafanaiface.ManagedGrafanaAPI, updatedTags := oldTags.Updated(newTags) updatedTags = updatedTags.IgnoreSystem(names.Grafana) if len(updatedTags) > 0 { - input := &managedgrafana.TagResourceInput{ + input := &grafana.TagResourceInput{ ResourceArn: aws.String(identifier), Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn managedgrafanaiface.ManagedGrafanaAPI, // UpdateTags updates grafana service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).GrafanaConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).GrafanaClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/grafana/wait.go b/internal/service/grafana/wait.go deleted file mode 100644 index 9f0da986e6e..00000000000 --- a/internal/service/grafana/wait.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package grafana - -import ( - "context" - "time" - - "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" -) - -func waitWorkspaceCreated(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string, timeout time.Duration) (*managedgrafana.WorkspaceDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{managedgrafana.WorkspaceStatusCreating}, - Target: []string{managedgrafana.WorkspaceStatusActive}, - Refresh: statusWorkspaceStatus(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*managedgrafana.WorkspaceDescription); ok { - return output, err - } - - return nil, err -} - -func waitWorkspaceUpdated(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string, timeout time.Duration) (*managedgrafana.WorkspaceDescription, error) { //nolint:unparam - stateConf := &retry.StateChangeConf{ - Pending: []string{managedgrafana.WorkspaceStatusUpdating, managedgrafana.WorkspaceStatusVersionUpdating}, - Target: []string{managedgrafana.WorkspaceStatusActive}, - Refresh: statusWorkspaceStatus(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*managedgrafana.WorkspaceDescription); ok { - return output, err - } - - return nil, err -} - -func waitWorkspaceDeleted(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string, timeout time.Duration) (*managedgrafana.WorkspaceDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{managedgrafana.WorkspaceStatusDeleting}, - Target: []string{}, - Refresh: statusWorkspaceStatus(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*managedgrafana.WorkspaceDescription); ok { - return output, err - } - - return nil, err -} - -func waitLicenseAssociationCreated(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string, timeout time.Duration) (*managedgrafana.WorkspaceDescription, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{managedgrafana.WorkspaceStatusUpgrading}, - Target: []string{managedgrafana.WorkspaceStatusActive}, - Refresh: statusWorkspaceStatus(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*managedgrafana.WorkspaceDescription); ok { - return output, err - } - - return nil, err -} - -func waitWorkspaceSAMLConfigurationCreated(ctx context.Context, conn *managedgrafana.ManagedGrafana, id string, timeout time.Duration) (*managedgrafana.SamlAuthentication, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{managedgrafana.SamlConfigurationStatusNotConfigured}, - Target: []string{managedgrafana.SamlConfigurationStatusConfigured}, - Refresh: statusWorkspaceSAMLConfiguration(ctx, conn, id), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*managedgrafana.SamlAuthentication); ok { - return output, err - } - - return nil, err -} diff --git a/internal/service/grafana/workspace.go b/internal/service/grafana/workspace.go index 284c47feda9..9a581058e7d 100644 --- a/internal/service/grafana/workspace.go +++ b/internal/service/grafana/workspace.go @@ -9,16 +9,19 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/grafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -29,7 +32,7 @@ import ( // @SDKResource("aws_grafana_workspace", name="Workspace") // @Tags(identifierAttribute="arn") -func ResourceWorkspace() *schema.Resource { +func resourceWorkspace() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceWorkspaceCreate, ReadWithoutTimeout: resourceWorkspaceRead, @@ -47,9 +50,9 @@ func ResourceWorkspace() *schema.Resource { Schema: map[string]*schema.Schema{ "account_access_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(managedgrafana.AccountAccessType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.AccountAccessType](), }, names.AttrARN: { Type: schema.TypeString, @@ -60,8 +63,8 @@ func ResourceWorkspace() *schema.Resource { Required: true, ForceNew: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(managedgrafana.AuthenticationProviderTypes_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.AuthenticationProviderTypes](), }, }, names.AttrConfiguration: { @@ -80,8 +83,8 @@ func ResourceWorkspace() *schema.Resource { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(managedgrafana.DataSourceType_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.DataSourceType](), }, }, names.AttrDescription: { @@ -127,8 +130,8 @@ func ResourceWorkspace() *schema.Resource { Type: schema.TypeList, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(managedgrafana.NotificationDestinationType_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.NotificationDestinationType](), }, }, "organization_role_name": { @@ -141,9 +144,9 @@ func ResourceWorkspace() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, "permission_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(managedgrafana.PermissionType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.PermissionType](), }, names.AttrRoleARN: { Type: schema.TypeString, @@ -190,13 +193,13 @@ func ResourceWorkspace() *schema.Resource { func resourceWorkspaceCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) - input := &managedgrafana.CreateWorkspaceInput{ - AccountAccessType: aws.String(d.Get("account_access_type").(string)), - AuthenticationProviders: flex.ExpandStringList(d.Get("authentication_providers").([]interface{})), + input := &grafana.CreateWorkspaceInput{ + AccountAccessType: awstypes.AccountAccessType(d.Get("account_access_type").(string)), + AuthenticationProviders: flex.ExpandStringyValueList[awstypes.AuthenticationProviderTypes](d.Get("authentication_providers").([]interface{})), ClientToken: aws.String(id.UniqueId()), - PermissionType: aws.String(d.Get("permission_type").(string)), + PermissionType: awstypes.PermissionType(d.Get("permission_type").(string)), Tags: getTagsIn(ctx), } @@ -205,7 +208,7 @@ func resourceWorkspaceCreate(ctx context.Context, d *schema.ResourceData, meta i } if v, ok := d.GetOk("data_sources"); ok { - input.WorkspaceDataSources = flex.ExpandStringList(v.([]interface{})) + input.WorkspaceDataSources = flex.ExpandStringyValueList[awstypes.DataSourceType](v.([]interface{})) } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -225,7 +228,7 @@ func resourceWorkspaceCreate(ctx context.Context, d *schema.ResourceData, meta i } if v, ok := d.GetOk("notification_destinations"); ok { - input.WorkspaceNotificationDestinations = flex.ExpandStringList(v.([]interface{})) + input.WorkspaceNotificationDestinations = flex.ExpandStringyValueList[awstypes.NotificationDestinationType](v.([]interface{})) } if v, ok := d.GetOk("organization_role_name"); ok { @@ -233,7 +236,7 @@ func resourceWorkspaceCreate(ctx context.Context, d *schema.ResourceData, meta i } if v, ok := d.GetOk("organizational_units"); ok { - input.WorkspaceOrganizationalUnits = flex.ExpandStringList(v.([]interface{})) + input.WorkspaceOrganizationalUnits = flex.ExpandStringValueList(v.([]interface{})) } if v, ok := d.GetOk(names.AttrRoleARN); ok { @@ -248,13 +251,13 @@ func resourceWorkspaceCreate(ctx context.Context, d *schema.ResourceData, meta i input.VpcConfiguration = expandVPCConfiguration(v.([]interface{})) } - output, err := conn.CreateWorkspaceWithContext(ctx, input) + output, err := conn.CreateWorkspace(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Grafana Workspace: %s", err) } - d.SetId(aws.StringValue(output.Workspace.Id)) + d.SetId(aws.ToString(output.Workspace.Id)) if _, err := waitWorkspaceCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Grafana Workspace (%s) create: %s", d.Id(), err) @@ -265,9 +268,9 @@ func resourceWorkspaceCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) - workspace, err := FindWorkspaceByID(ctx, conn, d.Id()) + workspace, err := findWorkspaceByID(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] Grafana Workspace (%s) not found, removing from state", d.Id()) @@ -283,7 +286,7 @@ func resourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta int // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonmanagedgrafana.html#amazonmanagedgrafana-resources-for-iam-policies. workspaceARN := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, - Service: managedgrafana.ServiceName, + Service: "grafana", Region: meta.(*conns.AWSClient).Region, AccountID: meta.(*conns.AWSClient).AccountID, Resource: fmt.Sprintf("/workspaces/%s", d.Id()), @@ -311,11 +314,11 @@ func resourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta int setTagsOut(ctx, workspace.Tags) - input := &managedgrafana.DescribeWorkspaceConfigurationInput{ + input := &grafana.DescribeWorkspaceConfigurationInput{ WorkspaceId: aws.String(d.Id()), } - output, err := conn.DescribeWorkspaceConfigurationWithContext(ctx, input) + output, err := conn.DescribeWorkspaceConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Grafana Workspace (%s) configuration: %s", d.Id(), err) @@ -328,19 +331,19 @@ func resourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta int func resourceWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) if d.HasChangesExcept(names.AttrConfiguration, "grafana_version", names.AttrTags, names.AttrTagsAll) { - input := &managedgrafana.UpdateWorkspaceInput{ + input := &grafana.UpdateWorkspaceInput{ WorkspaceId: aws.String(d.Id()), } if d.HasChange("account_access_type") { - input.AccountAccessType = aws.String(d.Get("account_access_type").(string)) + input.AccountAccessType = awstypes.AccountAccessType(d.Get("account_access_type").(string)) } if d.HasChange("data_sources") { - input.WorkspaceDataSources = flex.ExpandStringList(d.Get("data_sources").([]interface{})) + input.WorkspaceDataSources = flex.ExpandStringyValueList[awstypes.DataSourceType](d.Get("data_sources").([]interface{})) } if d.HasChange(names.AttrDescription) { @@ -362,7 +365,7 @@ func resourceWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta i } if d.HasChange("notification_destinations") { - input.WorkspaceNotificationDestinations = flex.ExpandStringList(d.Get("notification_destinations").([]interface{})) + input.WorkspaceNotificationDestinations = flex.ExpandStringyValueList[awstypes.NotificationDestinationType](d.Get("notification_destinations").([]interface{})) } if d.HasChange("organization_role_name") { @@ -370,11 +373,11 @@ func resourceWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta i } if d.HasChange("organizational_units") { - input.WorkspaceOrganizationalUnits = flex.ExpandStringList(d.Get("organizational_units").([]interface{})) + input.WorkspaceOrganizationalUnits = flex.ExpandStringValueList(d.Get("organizational_units").([]interface{})) } if d.HasChange("permission_type") { - input.PermissionType = aws.String(d.Get("permission_type").(string)) + input.PermissionType = awstypes.PermissionType(d.Get("permission_type").(string)) } if d.HasChange(names.AttrRoleARN) { @@ -395,7 +398,7 @@ func resourceWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta i } } - _, err := conn.UpdateWorkspaceWithContext(ctx, input) + _, err := conn.UpdateWorkspace(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Grafana Workspace (%s): %s", d.Id(), err) @@ -407,7 +410,7 @@ func resourceWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta i } if d.HasChanges(names.AttrConfiguration, "grafana_version") { - input := &managedgrafana.UpdateWorkspaceConfigurationInput{ + input := &grafana.UpdateWorkspaceConfigurationInput{ Configuration: aws.String(d.Get(names.AttrConfiguration).(string)), WorkspaceId: aws.String(d.Id()), } @@ -416,7 +419,7 @@ func resourceWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta i input.GrafanaVersion = aws.String(d.Get("grafana_version").(string)) } - _, err := conn.UpdateWorkspaceConfigurationWithContext(ctx, input) + _, err := conn.UpdateWorkspaceConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Grafana Workspace (%s) configuration: %s", d.Id(), err) @@ -432,14 +435,14 @@ func resourceWorkspaceUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceWorkspaceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) log.Printf("[DEBUG] Deleting Grafana Workspace: %s", d.Id()) - _, err := conn.DeleteWorkspaceWithContext(ctx, &managedgrafana.DeleteWorkspaceInput{ + _, err := conn.DeleteWorkspace(ctx, &grafana.DeleteWorkspaceInput{ WorkspaceId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, managedgrafana.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -454,74 +457,156 @@ func resourceWorkspaceDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func expandVPCConfiguration(cfg []interface{}) *managedgrafana.VpcConfiguration { - if len(cfg) < 1 { - return nil +func findWorkspaceByID(ctx context.Context, conn *grafana.Client, id string) (*awstypes.WorkspaceDescription, error) { + input := &grafana.DescribeWorkspaceInput{ + WorkspaceId: aws.String(id), } - conf := cfg[0].(map[string]interface{}) + output, err := conn.DescribeWorkspace(ctx, input) - out := managedgrafana.VpcConfiguration{} + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } - if v, ok := conf[names.AttrSecurityGroupIDs].(*schema.Set); ok && v.Len() > 0 { - out.SecurityGroupIds = flex.ExpandStringSet(v) + if err != nil { + return nil, err } - if v, ok := conf[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { - out.SubnetIds = flex.ExpandStringSet(v) + if output == nil || output.Workspace == nil { + return nil, tfresource.NewEmptyResultError(input) } - return &out + return output.Workspace, nil } -func flattenVPCConfiguration(rs *managedgrafana.VpcConfiguration) []interface{} { - if rs == nil { - return []interface{}{} +func statusWorkspace(ctx context.Context, conn *grafana.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findWorkspaceByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil } +} - m := make(map[string]interface{}) - if rs.SecurityGroupIds != nil { - m[names.AttrSecurityGroupIDs] = flex.FlattenStringSet(rs.SecurityGroupIds) +func waitWorkspaceCreated(ctx context.Context, conn *grafana.Client, id string, timeout time.Duration) (*awstypes.WorkspaceDescription, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.WorkspaceStatusCreating), + Target: enum.Slice(awstypes.WorkspaceStatusActive), + Refresh: statusWorkspace(ctx, conn, id), + Timeout: timeout, } - if rs.SubnetIds != nil { - m[names.AttrSubnetIDs] = flex.FlattenStringSet(rs.SubnetIds) + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.WorkspaceDescription); ok { + return output, err } - return []interface{}{m} + return nil, err } -func expandNetworkAccessControl(cfg []interface{}) *managedgrafana.NetworkAccessConfiguration { - if len(cfg) < 1 { - return nil +func waitWorkspaceUpdated(ctx context.Context, conn *grafana.Client, id string, timeout time.Duration) (*awstypes.WorkspaceDescription, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.WorkspaceStatusUpdating, awstypes.WorkspaceStatusVersionUpdating), + Target: enum.Slice(awstypes.WorkspaceStatusActive), + Refresh: statusWorkspace(ctx, conn, id), + Timeout: timeout, } - conf := cfg[0].(map[string]interface{}) + outputRaw, err := stateConf.WaitForStateContext(ctx) - out := managedgrafana.NetworkAccessConfiguration{} + if output, ok := outputRaw.(*awstypes.WorkspaceDescription); ok { + return output, err + } - if v, ok := conf["prefix_list_ids"].(*schema.Set); ok && v.Len() > 0 { - out.PrefixListIds = flex.ExpandStringSet(v) + return nil, err +} + +func waitWorkspaceDeleted(ctx context.Context, conn *grafana.Client, id string, timeout time.Duration) (*awstypes.WorkspaceDescription, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.WorkspaceStatusDeleting), + Target: []string{}, + Refresh: statusWorkspace(ctx, conn, id), + Timeout: timeout, } - if v, ok := conf["vpce_ids"].(*schema.Set); ok && v.Len() > 0 { - out.VpceIds = flex.ExpandStringSet(v) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.WorkspaceDescription); ok { + return output, err } - return &out + return nil, err } -func flattenNetworkAccessControl(rs *managedgrafana.NetworkAccessConfiguration) []interface{} { - if rs == nil { +func expandVPCConfiguration(tfList []interface{}) *awstypes.VpcConfiguration { + if len(tfList) < 1 { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + apiObject := awstypes.VpcConfiguration{} + + if v, ok := tfMap[names.AttrSecurityGroupIDs].(*schema.Set); ok && v.Len() > 0 { + apiObject.SecurityGroupIds = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { + apiObject.SubnetIds = flex.ExpandStringValueSet(v) + } + + return &apiObject +} + +func flattenVPCConfiguration(apiObject *awstypes.VpcConfiguration) []interface{} { + if apiObject == nil { return []interface{}{} } - m := make(map[string]interface{}) - if rs.PrefixListIds != nil { - m["prefix_list_ids"] = flex.FlattenStringSet(rs.PrefixListIds) + tfMap := make(map[string]interface{}) + tfMap[names.AttrSecurityGroupIDs] = apiObject.SecurityGroupIds + tfMap[names.AttrSubnetIDs] = apiObject.SubnetIds + + return []interface{}{tfMap} +} + +func expandNetworkAccessControl(tfList []interface{}) *awstypes.NetworkAccessConfiguration { + if len(tfList) < 1 { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + apiObject := awstypes.NetworkAccessConfiguration{} + + if v, ok := tfMap["prefix_list_ids"].(*schema.Set); ok && v.Len() > 0 { + apiObject.PrefixListIds = flex.ExpandStringValueSet(v) } - if rs.VpceIds != nil { - m["vpce_ids"] = flex.FlattenStringSet(rs.VpceIds) + + if v, ok := tfMap["vpce_ids"].(*schema.Set); ok && v.Len() > 0 { + apiObject.VpceIds = flex.ExpandStringValueSet(v) } - return []interface{}{m} + return &apiObject +} + +func flattenNetworkAccessControl(apiObject *awstypes.NetworkAccessConfiguration) []interface{} { + if apiObject == nil { + return []interface{}{} + } + + tfMap := make(map[string]interface{}) + tfMap["prefix_list_ids"] = apiObject.PrefixListIds + tfMap["vpce_ids"] = apiObject.VpceIds + + return []interface{}{tfMap} } diff --git a/internal/service/grafana/workspace_api_key.go b/internal/service/grafana/workspace_api_key.go index 1238babf109..a67875e0c46 100644 --- a/internal/service/grafana/workspace_api_key.go +++ b/internal/service/grafana/workspace_api_key.go @@ -9,19 +9,21 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/grafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_grafana_workspace_api_key") -func ResourceWorkspaceAPIKey() *schema.Resource { +// @SDKResource("aws_grafana_workspace_api_key", name="Workspace API Key") +func resourceWorkspaceAPIKey() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceWorkspaceAPIKeyCreate, ReadWithoutTimeout: schema.NoopContext, @@ -41,10 +43,10 @@ func ResourceWorkspaceAPIKey() *schema.Resource { ValidateFunc: validation.StringLenBetween(1, 100), }, "key_role": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(managedgrafana.Role_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.Role](), }, "seconds_to_live": { Type: schema.TypeInt, @@ -62,20 +64,19 @@ func ResourceWorkspaceAPIKey() *schema.Resource { func resourceWorkspaceAPIKeyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) keyName := d.Get("key_name").(string) workspaceID := d.Get("workspace_id").(string) - id := WorkspaceAPIKeyCreateResourceID(workspaceID, keyName) - input := &managedgrafana.CreateWorkspaceApiKeyInput{ + id := workspaceAPIKeyCreateResourceID(workspaceID, keyName) + input := &grafana.CreateWorkspaceApiKeyInput{ KeyName: aws.String(keyName), KeyRole: aws.String(d.Get("key_role").(string)), - SecondsToLive: aws.Int64(int64(d.Get("seconds_to_live").(int))), + SecondsToLive: aws.Int32(int32(d.Get("seconds_to_live").(int))), WorkspaceId: aws.String(workspaceID), } - log.Printf("[DEBUG] Creating Grafana Workspace API Key: %s", input) - output, err := conn.CreateWorkspaceApiKeyWithContext(ctx, input) + output, err := conn.CreateWorkspaceApiKey(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Grafana Workspace API Key (%s): %s", id, err) @@ -89,21 +90,20 @@ func resourceWorkspaceAPIKeyCreate(ctx context.Context, d *schema.ResourceData, func resourceWorkspaceAPIKeyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) - - workspaceID, keyName, err := WorkspaceAPIKeyParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) + workspaceID, keyName, err := workspaceAPIKeyParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Grafana Workspace API Key (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting Grafana Workspace API Key: %s", d.Id()) - _, err = conn.DeleteWorkspaceApiKeyWithContext(ctx, &managedgrafana.DeleteWorkspaceApiKeyInput{ + _, err = conn.DeleteWorkspaceApiKey(ctx, &grafana.DeleteWorkspaceApiKeyInput{ KeyName: aws.String(keyName), WorkspaceId: aws.String(workspaceID), }) - if tfawserr.ErrCodeEquals(err, managedgrafana.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -114,20 +114,20 @@ func resourceWorkspaceAPIKeyDelete(ctx context.Context, d *schema.ResourceData, return diags } -const workspaceAPIKeyIDSeparator = "/" +const workspaceAPIKeyResourceIDSeparator = "/" -func WorkspaceAPIKeyCreateResourceID(workspaceID, keyName string) string { +func workspaceAPIKeyCreateResourceID(workspaceID, keyName string) string { parts := []string{workspaceID, keyName} - id := strings.Join(parts, workspaceAPIKeyIDSeparator) + id := strings.Join(parts, workspaceAPIKeyResourceIDSeparator) return id } -func WorkspaceAPIKeyParseResourceID(id string) (string, string, error) { - parts := strings.Split(id, workspaceAPIKeyIDSeparator) +func workspaceAPIKeyParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, workspaceAPIKeyResourceIDSeparator) if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected workspace-id%[2]skey-name", id, workspaceAPIKeyIDSeparator) + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected workspace-id%[2]skey-name", id, workspaceAPIKeyResourceIDSeparator) } return parts[0], parts[1], nil diff --git a/internal/service/grafana/workspace_api_key_test.go b/internal/service/grafana/workspace_api_key_test.go index 54f859ab420..f52a98d631b 100644 --- a/internal/service/grafana/workspace_api_key_test.go +++ b/internal/service/grafana/workspace_api_key_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/managedgrafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -21,7 +21,7 @@ func testAccWorkspaceAPIKey_basic(t *testing.T) { workspaceResourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: acctest.CheckDestroyNoop, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -31,7 +31,7 @@ func testAccWorkspaceAPIKey_basic(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttrSet(resourceName, names.AttrKey), resource.TestCheckResourceAttr(resourceName, "key_name", rName), - resource.TestCheckResourceAttr(resourceName, "key_role", managedgrafana.RoleEditor), + resource.TestCheckResourceAttr(resourceName, "key_role", string(awstypes.RoleEditor)), resource.TestCheckResourceAttr(resourceName, "seconds_to_live", "3600"), resource.TestCheckResourceAttrPair(resourceName, "workspace_id", workspaceResourceName, names.AttrID), ), diff --git a/internal/service/grafana/workspace_data_source.go b/internal/service/grafana/workspace_data_source.go index 1f3b6362a17..a42e515f87b 100644 --- a/internal/service/grafana/workspace_data_source.go +++ b/internal/service/grafana/workspace_data_source.go @@ -8,8 +8,7 @@ import ( "fmt" "time" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/managedgrafana" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,8 +17,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_grafana_workspace") -func DataSourceWorkspace() *schema.Resource { +// @SDKDataSource("aws_grafana_workspace", name="Workspace") +// @Tags +func dataSourceWorkspace() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceWorkspaceRead, @@ -111,11 +111,10 @@ func DataSourceWorkspace() *schema.Resource { func dataSourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) workspaceID := d.Get("workspace_id").(string) - workspace, err := FindWorkspaceByID(ctx, conn, workspaceID) + workspace, err := findWorkspaceByID(ctx, conn, workspaceID) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Grafana Workspace (%s): %s", workspaceID, err) @@ -126,7 +125,7 @@ func dataSourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta i // https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonmanagedgrafana.html#amazonmanagedgrafana-resources-for-iam-policies. workspaceARN := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, - Service: managedgrafana.ServiceName, + Service: "grafana", Region: meta.(*conns.AWSClient).Region, AccountID: meta.(*conns.AWSClient).AccountID, Resource: fmt.Sprintf("/workspaces/%s", d.Id()), @@ -149,9 +148,7 @@ func dataSourceWorkspaceRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("stack_set_name", workspace.StackSetName) d.Set(names.AttrStatus, workspace.Status) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, workspace.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOut(ctx, workspace.Tags) return diags } diff --git a/internal/service/grafana/workspace_data_source_test.go b/internal/service/grafana/workspace_data_source_test.go index b0a287fbd96..28827749e56 100644 --- a/internal/service/grafana/workspace_data_source_test.go +++ b/internal/service/grafana/workspace_data_source_test.go @@ -6,7 +6,6 @@ package grafana_test import ( "testing" - "github.com/aws/aws-sdk-go/service/managedgrafana" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -20,7 +19,7 @@ func testAccWorkspaceDataSource_basic(t *testing.T) { dataSourceName := "data.aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: nil, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, diff --git a/internal/service/grafana/workspace_saml_configuration.go b/internal/service/grafana/workspace_saml_configuration.go index d70acd6b3da..2c3144860f4 100644 --- a/internal/service/grafana/workspace_saml_configuration.go +++ b/internal/service/grafana/workspace_saml_configuration.go @@ -8,19 +8,23 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/grafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_grafana_workspace_saml_configuration") -func ResourceWorkspaceSAMLConfiguration() *schema.Resource { +// @SDKResource("aws_grafana_workspace_saml_configuration", name="Grafana Workspace SAML Configuration") +func resourceWorkspaceSAMLConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceWorkspaceSAMLConfigurationUpsert, ReadWithoutTimeout: resourceWorkspaceSAMLConfigurationRead, @@ -107,47 +111,47 @@ func ResourceWorkspaceSAMLConfiguration() *schema.Resource { func resourceWorkspaceSAMLConfigurationUpsert(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) - d.SetId(d.Get("workspace_id").(string)) - workspace, err := FindWorkspaceByID(ctx, conn, d.Id()) + workspaceID := d.Get("workspace_id").(string) + workspace, err := findWorkspaceByID(ctx, conn, workspaceID) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Grafana Workspace (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Grafana Workspace (%s): %s", workspaceID, err) } authenticationProviders := workspace.Authentication.Providers - roleValues := &managedgrafana.RoleValues{ - Editor: flex.ExpandStringList(d.Get("editor_role_values").([]interface{})), + roleValues := &awstypes.RoleValues{ + Editor: flex.ExpandStringValueList(d.Get("editor_role_values").([]interface{})), } if v, ok := d.GetOk("admin_role_values"); ok { - roleValues.Admin = flex.ExpandStringList(v.([]interface{})) + roleValues.Admin = flex.ExpandStringValueList(v.([]interface{})) } - samlConfiguration := &managedgrafana.SamlConfiguration{ + samlConfiguration := &awstypes.SamlConfiguration{ RoleValues: roleValues, } if v, ok := d.GetOk("allowed_organizations"); ok { - samlConfiguration.AllowedOrganizations = flex.ExpandStringList(v.([]interface{})) + samlConfiguration.AllowedOrganizations = flex.ExpandStringValueList(v.([]interface{})) } if v, ok := d.GetOk("login_validity_duration"); ok { - samlConfiguration.LoginValidityDuration = aws.Int64(int64(v.(int))) + samlConfiguration.LoginValidityDuration = int32(v.(int)) } - var assertionAttributes *managedgrafana.AssertionAttributes + var assertionAttributes *awstypes.AssertionAttributes if v, ok := d.GetOk("email_assertion"); ok { - assertionAttributes = &managedgrafana.AssertionAttributes{ + assertionAttributes = &awstypes.AssertionAttributes{ Email: aws.String(v.(string)), } } if v, ok := d.GetOk("groups_assertion"); ok { if assertionAttributes == nil { - assertionAttributes = &managedgrafana.AssertionAttributes{} + assertionAttributes = &awstypes.AssertionAttributes{} } assertionAttributes.Groups = aws.String(v.(string)) @@ -155,7 +159,7 @@ func resourceWorkspaceSAMLConfigurationUpsert(ctx context.Context, d *schema.Res if v, ok := d.GetOk("login_assertion"); ok { if assertionAttributes == nil { - assertionAttributes = &managedgrafana.AssertionAttributes{} + assertionAttributes = &awstypes.AssertionAttributes{} } assertionAttributes.Login = aws.String(v.(string)) @@ -163,7 +167,7 @@ func resourceWorkspaceSAMLConfigurationUpsert(ctx context.Context, d *schema.Res if v, ok := d.GetOk("name_assertion"); ok { if assertionAttributes == nil { - assertionAttributes = &managedgrafana.AssertionAttributes{} + assertionAttributes = &awstypes.AssertionAttributes{} } assertionAttributes.Name = aws.String(v.(string)) @@ -171,7 +175,7 @@ func resourceWorkspaceSAMLConfigurationUpsert(ctx context.Context, d *schema.Res if v, ok := d.GetOk("org_assertion"); ok { if assertionAttributes == nil { - assertionAttributes = &managedgrafana.AssertionAttributes{} + assertionAttributes = &awstypes.AssertionAttributes{} } assertionAttributes.Org = aws.String(v.(string)) @@ -179,7 +183,7 @@ func resourceWorkspaceSAMLConfigurationUpsert(ctx context.Context, d *schema.Res if v, ok := d.GetOk("role_assertion"); ok { if assertionAttributes == nil { - assertionAttributes = &managedgrafana.AssertionAttributes{} + assertionAttributes = &awstypes.AssertionAttributes{} } assertionAttributes.Role = aws.String(v.(string)) @@ -189,36 +193,40 @@ func resourceWorkspaceSAMLConfigurationUpsert(ctx context.Context, d *schema.Res samlConfiguration.AssertionAttributes = assertionAttributes } - var idpMetadata *managedgrafana.IdpMetadata + var idpMetadata awstypes.IdpMetadata if v, ok := d.GetOk("idp_metadata_url"); ok { - idpMetadata = &managedgrafana.IdpMetadata{ - Url: aws.String(v.(string)), + idpMetadata = &awstypes.IdpMetadataMemberUrl{ + Value: v.(string), } } if v, ok := d.GetOk("idp_metadata_xml"); ok { - idpMetadata = &managedgrafana.IdpMetadata{ - Xml: aws.String(v.(string)), + idpMetadata = &awstypes.IdpMetadataMemberXml{ + Value: v.(string), } } samlConfiguration.IdpMetadata = idpMetadata - input := &managedgrafana.UpdateWorkspaceAuthenticationInput{ + input := &grafana.UpdateWorkspaceAuthenticationInput{ AuthenticationProviders: authenticationProviders, SamlConfiguration: samlConfiguration, - WorkspaceId: aws.String(d.Id()), + WorkspaceId: aws.String(workspaceID), } - _, err = conn.UpdateWorkspaceAuthenticationWithContext(ctx, input) + _, err = conn.UpdateWorkspaceAuthentication(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating Grafana Saml Configuration: %s", err) + return sdkdiag.AppendErrorf(diags, "creating Grafana Workspace SAML Configuration (%s): %s", workspaceID, err) + } + + if d.IsNewResource() { + d.SetId(workspaceID) } if _, err := waitWorkspaceSAMLConfigurationCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for Grafana Workspace Saml Configuration (%s) create: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for Grafana Workspace SAML Configuration (%s) create: %s", d.Id(), err) } return append(diags, resourceWorkspaceSAMLConfigurationRead(ctx, d, meta)...) @@ -226,18 +234,18 @@ func resourceWorkspaceSAMLConfigurationUpsert(ctx context.Context, d *schema.Res func resourceWorkspaceSAMLConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).GrafanaConn(ctx) + conn := meta.(*conns.AWSClient).GrafanaClient(ctx) - saml, err := FindSamlConfigurationByID(ctx, conn, d.Id()) + saml, err := findSAMLConfigurationByID(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { - log.Printf("[WARN] Grafana Workspace Saml Configuration (%s) not found, removing from state", d.Id()) + log.Printf("[WARN] Grafana Workspace SAML Configuration (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Grafana Workspace Saml Configuration (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading Grafana Workspace SAML Configuration (%s): %s", d.Id(), err) } d.Set("admin_role_values", saml.Configuration.RoleValues.Admin) @@ -245,8 +253,14 @@ func resourceWorkspaceSAMLConfigurationRead(ctx context.Context, d *schema.Resou d.Set("editor_role_values", saml.Configuration.RoleValues.Editor) d.Set("email_assertion", saml.Configuration.AssertionAttributes.Email) d.Set("groups_assertion", saml.Configuration.AssertionAttributes.Groups) - d.Set("idp_metadata_url", saml.Configuration.IdpMetadata.Url) - d.Set("idp_metadata_xml", saml.Configuration.IdpMetadata.Xml) + d.Set("idp_metadata_url", "") + d.Set("idp_metadata_xml", "") + switch v := saml.Configuration.IdpMetadata.(type) { + case *awstypes.IdpMetadataMemberUrl: + d.Set("idp_metadata_url", v.Value) + case *awstypes.IdpMetadataMemberXml: + d.Set("idp_metadata_xml", v.Value) + } d.Set("login_assertion", saml.Configuration.AssertionAttributes.Login) d.Set("login_validity_duration", saml.Configuration.LoginValidityDuration) d.Set("name_assertion", saml.Configuration.AssertionAttributes.Name) @@ -256,3 +270,68 @@ func resourceWorkspaceSAMLConfigurationRead(ctx context.Context, d *schema.Resou return diags } + +func findSAMLConfigurationByID(ctx context.Context, conn *grafana.Client, id string) (*awstypes.SamlAuthentication, error) { + input := &grafana.DescribeWorkspaceAuthenticationInput{ + WorkspaceId: aws.String(id), + } + + output, err := conn.DescribeWorkspaceAuthentication(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Authentication == nil || output.Authentication.Saml == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + if status := output.Authentication.Saml.Status; status == awstypes.SamlConfigurationStatusNotConfigured { + return nil, &retry.NotFoundError{ + Message: string(status), + LastRequest: input, + } + } + + return output.Authentication.Saml, nil +} + +func statusWorkspaceSAMLConfiguration(ctx context.Context, conn *grafana.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findSAMLConfigurationByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.Status), nil + } +} + +func waitWorkspaceSAMLConfigurationCreated(ctx context.Context, conn *grafana.Client, id string, timeout time.Duration) (*awstypes.SamlAuthentication, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{}, + Target: enum.Slice(awstypes.SamlConfigurationStatusConfigured), + Refresh: statusWorkspaceSAMLConfiguration(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*awstypes.SamlAuthentication); ok { + return output, err + } + + return nil, err +} diff --git a/internal/service/grafana/workspace_saml_configuration_test.go b/internal/service/grafana/workspace_saml_configuration_test.go index 8bc1f859482..ae059bd40c9 100644 --- a/internal/service/grafana/workspace_saml_configuration_test.go +++ b/internal/service/grafana/workspace_saml_configuration_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/managedgrafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,9 +25,9 @@ func testAccWorkspaceSAMLConfiguration_basic(t *testing.T) { workspaceResourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), - CheckDestroy: nil, + CheckDestroy: acctest.CheckDestroyNoop, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -39,7 +39,7 @@ func testAccWorkspaceSAMLConfiguration_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "editor_role_values.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "editor_role_values.0", "editor"), resource.TestCheckResourceAttrSet(resourceName, "idp_metadata_xml"), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, managedgrafana.SamlConfigurationStatusConfigured), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.SamlConfigurationStatusConfigured)), resource.TestCheckResourceAttrPair(resourceName, "workspace_id", workspaceResourceName, names.AttrID), ), }, @@ -54,9 +54,9 @@ func testAccWorkspaceSAMLConfiguration_loginValidity(t *testing.T) { workspaceResourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), - CheckDestroy: nil, + CheckDestroy: acctest.CheckDestroyNoop, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -68,7 +68,7 @@ func testAccWorkspaceSAMLConfiguration_loginValidity(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "editor_role_values.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "editor_role_values.0", "editor"), resource.TestCheckResourceAttrSet(resourceName, "idp_metadata_xml"), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, managedgrafana.SamlConfigurationStatusConfigured), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.SamlConfigurationStatusConfigured)), resource.TestCheckResourceAttr(resourceName, "login_validity_duration", "1440"), resource.TestCheckResourceAttrPair(resourceName, "workspace_id", workspaceResourceName, names.AttrID), ), @@ -84,9 +84,9 @@ func testAccWorkspaceSAMLConfiguration_assertions(t *testing.T) { workspaceResourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), - CheckDestroy: nil, + CheckDestroy: acctest.CheckDestroyNoop, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -98,7 +98,7 @@ func testAccWorkspaceSAMLConfiguration_assertions(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "editor_role_values.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "editor_role_values.0", "editor"), resource.TestCheckResourceAttrSet(resourceName, "idp_metadata_xml"), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, managedgrafana.SamlConfigurationStatusConfigured), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.SamlConfigurationStatusConfigured)), resource.TestCheckResourceAttr(resourceName, "email_assertion", "mail"), resource.TestCheckResourceAttrSet(resourceName, "idp_metadata_xml"), resource.TestCheckResourceAttr(resourceName, "groups_assertion", "groups"), @@ -160,13 +160,9 @@ func testAccCheckWorkspaceSAMLConfigurationExists(ctx context.Context, name stri return fmt.Errorf("Not found: %s", name) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Grafana Workspace ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) - _, err := tfgrafana.FindSamlConfigurationByID(ctx, conn, rs.Primary.ID) + _, err := tfgrafana.FindSAMLConfigurationByID(ctx, conn, rs.Primary.ID) return err } diff --git a/internal/service/grafana/workspace_service_account.go b/internal/service/grafana/workspace_service_account.go new file mode 100644 index 00000000000..56941d5153e --- /dev/null +++ b/internal/service/grafana/workspace_service_account.go @@ -0,0 +1,259 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grafana + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/grafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_grafana_workspace_service_account", name="Workspace Service Account") +func newWorkspaceServiceAccountResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &workspaceServiceAccountResource{}, nil +} + +type workspaceServiceAccountResource struct { + framework.ResourceWithConfigure + framework.WithNoUpdate + framework.WithImportByID +} + +func (*workspaceServiceAccountResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_grafana_workspace_service_account" +} + +func (r *workspaceServiceAccountResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "grafana_role": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.Role](), + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrID: framework.IDAttribute(), + names.AttrName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(128), + }, + }, + "service_account_id": framework.IDAttribute(), + "workspace_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *workspaceServiceAccountResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data workspaceServiceAccountResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().GrafanaClient(ctx) + + name := data.Name.ValueString() + input := &grafana.CreateWorkspaceServiceAccountInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + + output, err := conn.CreateWorkspaceServiceAccount(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Grafana Workspace Service Account (%s)", name), err.Error()) + + return + } + + // Set values for unknowns. + data.ServiceAccountID = fwflex.StringToFramework(ctx, output.Id) + data.setID() + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *workspaceServiceAccountResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data workspaceServiceAccountResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + + return + } + + conn := r.Meta().GrafanaClient(ctx) + + output, err := findWorkspaceServiceAccountByTwoPartKey(ctx, conn, data.WorkspaceID.ValueString(), data.ServiceAccountID.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Grafana Workspace Service Account (%s)", data.ID.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + // Restore resource ID. + // It has been overwritten by the 'Id' field from the API response. + data.setID() + + // Role is returned from the API in lowercase. + data.GrafanaRole = fwtypes.StringEnumValueToUpper(output.GrafanaRole) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *workspaceServiceAccountResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data workspaceServiceAccountResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().GrafanaClient(ctx) + + input := &grafana.DeleteWorkspaceServiceAccountInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + _, err := conn.DeleteWorkspaceServiceAccount(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Grafana Workspace Service Account (%s)", data.ID.ValueString()), err.Error()) + + return + } +} + +func findWorkspaceServiceAccount(ctx context.Context, conn *grafana.Client, input *grafana.ListWorkspaceServiceAccountsInput, filter tfslices.Predicate[*awstypes.ServiceAccountSummary]) (*awstypes.ServiceAccountSummary, error) { + output, err := findWorkspaceServiceAccounts(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findWorkspaceServiceAccounts(ctx context.Context, conn *grafana.Client, input *grafana.ListWorkspaceServiceAccountsInput, filter tfslices.Predicate[*awstypes.ServiceAccountSummary]) ([]awstypes.ServiceAccountSummary, error) { + var output []awstypes.ServiceAccountSummary + + pages := grafana.NewListWorkspaceServiceAccountsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.ServiceAccounts { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func findWorkspaceServiceAccountByTwoPartKey(ctx context.Context, conn *grafana.Client, workspaceID, serviceAccountID string) (*awstypes.ServiceAccountSummary, error) { + input := &grafana.ListWorkspaceServiceAccountsInput{ + WorkspaceId: aws.String(workspaceID), + } + + return findWorkspaceServiceAccount(ctx, conn, input, func(v *awstypes.ServiceAccountSummary) bool { + return aws.ToString(v.Id) == serviceAccountID + }) +} + +type workspaceServiceAccountResourceModel struct { + GrafanaRole fwtypes.StringEnum[awstypes.Role] `tfsdk:"grafana_role"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + ServiceAccountID types.String `tfsdk:"service_account_id"` + WorkspaceID types.String `tfsdk:"workspace_id"` +} + +const ( + workspaceServiceAccountResourceIDPartCount = 2 +) + +func (data *workspaceServiceAccountResourceModel) InitFromID() error { + id := data.ID.ValueString() + parts, err := flex.ExpandResourceId(id, workspaceServiceAccountResourceIDPartCount, false) + + if err != nil { + return err + } + + data.WorkspaceID = types.StringValue(parts[0]) + data.ServiceAccountID = types.StringValue(parts[1]) + + return nil +} + +func (data *workspaceServiceAccountResourceModel) setID() { + data.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{data.WorkspaceID.ValueString(), data.ServiceAccountID.ValueString()}, workspaceServiceAccountResourceIDPartCount, false))) +} diff --git a/internal/service/grafana/workspace_service_account_test.go b/internal/service/grafana/workspace_service_account_test.go new file mode 100644 index 00000000000..2f1a9512927 --- /dev/null +++ b/internal/service/grafana/workspace_service_account_test.go @@ -0,0 +1,136 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grafana_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/grafana" + "github.com/aws/aws-sdk-go-v2/service/grafana/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfgrafana "github.com/hashicorp/terraform-provider-aws/internal/service/grafana" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGrafanaWorkspaceServiceAccount_basic(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_grafana_workspace_service_account.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var v types.ServiceAccountSummary + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) + acctest.PreCheckSSOAdminInstances(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, grafana.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWorkspaceServiceAccountDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWorkspaceServiceAccountConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkspaceServiceAccountExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrSet(resourceName, "service_account_id"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGrafanaWorkspaceServiceAccount_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v types.ServiceAccountSummary + resourceName := "aws_grafana_workspace_service_account.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) + acctest.PreCheckSSOAdminInstances(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWorkspaceServiceAccountDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWorkspaceServiceAccountConfig_basic(resourceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkspaceServiceAccountExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfgrafana.ResourceWorkspaceServiceAccount, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckWorkspaceServiceAccountExists(ctx context.Context, n string, v *types.ServiceAccountSummary) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) + + output, err := tfgrafana.FindWorkspaceServiceAccountByTwoPartKey(ctx, conn, rs.Primary.Attributes["workspace_id"], rs.Primary.Attributes["service_account_id"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccCheckWorkspaceServiceAccountDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_grafana_workspace_service_account" { + continue + } + + _, err := tfgrafana.FindWorkspaceServiceAccountByTwoPartKey(ctx, conn, rs.Primary.Attributes["workspace_id"], rs.Primary.Attributes["service_account_id"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("Grafana Workspace Service Account %s still exists", rs.Primary.ID) + } + return nil + } +} + +func testAccWorkspaceServiceAccountConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccWorkspaceConfig_authenticationProvider(rName, "AWS_SSO"), fmt.Sprintf(` +resource "aws_grafana_workspace_service_account" "test" { + name = %[1]q + grafana_role = "ADMIN" + workspace_id = aws_grafana_workspace.test.id +} +`, rName)) +} diff --git a/internal/service/grafana/workspace_service_account_token.go b/internal/service/grafana/workspace_service_account_token.go new file mode 100644 index 00000000000..0673cf313e1 --- /dev/null +++ b/internal/service/grafana/workspace_service_account_token.go @@ -0,0 +1,305 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grafana + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/grafana" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" + "github.com/hashicorp/terraform-plugin-framework-timetypes/timetypes" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource("aws_grafana_workspace_service_account_token", name="Workspace Service Account Token") +func newWorkspaceServiceAccountTokenResource(_ context.Context) (resource.ResourceWithConfigure, error) { + return &workspaceServiceAccountTokenResource{}, nil +} + +type workspaceServiceAccountTokenResource struct { + framework.ResourceWithConfigure + framework.WithNoUpdate +} + +func (r *workspaceServiceAccountTokenResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_grafana_workspace_service_account_token" +} + +func (r *workspaceServiceAccountTokenResource) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrCreatedAt: schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthAtLeast(1), + stringvalidator.LengthAtMost(128), + }, + }, + "expires_at": schema.StringAttribute{ + CustomType: timetypes.RFC3339Type{}, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrID: framework.IDAttribute(), + names.AttrKey: schema.StringAttribute{ + Computed: true, + Sensitive: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "seconds_to_live": schema.Int64Attribute{ + Required: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.RequiresReplace(), + }, + Validators: []validator.Int64{ + int64validator.Between(1, 2592000), + }, + }, + "service_account_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + "service_account_token_id": framework.IDAttribute(), + "workspace_id": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + }, + } +} + +func (r *workspaceServiceAccountTokenResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data workspaceServiceAccountTokenResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().GrafanaClient(ctx) + + name := data.Name.ValueString() + input := &grafana.CreateWorkspaceServiceAccountTokenInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + + output, err := conn.CreateWorkspaceServiceAccountToken(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating Grafana Workspace Service Account Token (%s)", name), err.Error()) + + return + } + + // Set values for unknowns. + data.Key = fwflex.StringToFramework(ctx, output.ServiceAccountToken.Key) + data.TokenID = fwflex.StringToFramework(ctx, output.ServiceAccountToken.Id) + data.setID() + + serviceAccountToken, err := findWorkspaceServiceAccountTokenByThreePartKey(ctx, conn, data.WorkspaceID.ValueString(), data.ServiceAccountID.ValueString(), data.TokenID.ValueString()) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Grafana Workspace Service Account Token (%s)", data.ID.ValueString()), err.Error()) + + return + } + + data.CreatedAt = fwflex.TimeToFramework(ctx, serviceAccountToken.CreatedAt) + data.ExpiresAt = fwflex.TimeToFramework(ctx, serviceAccountToken.ExpiresAt) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *workspaceServiceAccountTokenResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data workspaceServiceAccountTokenResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + + return + } + + conn := r.Meta().GrafanaClient(ctx) + + output, err := findWorkspaceServiceAccountTokenByThreePartKey(ctx, conn, data.WorkspaceID.ValueString(), data.ServiceAccountID.ValueString(), data.TokenID.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading Grafana Workspace Service Account Token (%s)", data.ID.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + // Restore resource ID. + // It has been overwritten by the 'Id' field from the API response. + data.setID() + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *workspaceServiceAccountTokenResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data workspaceServiceAccountTokenResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().GrafanaClient(ctx) + + input := &grafana.DeleteWorkspaceServiceAccountTokenInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.DeleteWorkspaceServiceAccountToken(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting Grafana Workspace Service Account Token (%s)", data.ID.ValueString()), err.Error()) + + return + } +} + +func findWorkspaceServiceAccountToken(ctx context.Context, conn *grafana.Client, input *grafana.ListWorkspaceServiceAccountTokensInput, filter tfslices.Predicate[*awstypes.ServiceAccountTokenSummary]) (*awstypes.ServiceAccountTokenSummary, error) { + output, err := findWorkspaceServiceAccountTokens(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findWorkspaceServiceAccountTokens(ctx context.Context, conn *grafana.Client, input *grafana.ListWorkspaceServiceAccountTokensInput, filter tfslices.Predicate[*awstypes.ServiceAccountTokenSummary]) ([]awstypes.ServiceAccountTokenSummary, error) { + var output []awstypes.ServiceAccountTokenSummary + + pages := grafana.NewListWorkspaceServiceAccountTokensPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.ServiceAccountTokens { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func findWorkspaceServiceAccountTokenByThreePartKey(ctx context.Context, conn *grafana.Client, workspaceID, serviceAccountID, tokenID string) (*awstypes.ServiceAccountTokenSummary, error) { + input := &grafana.ListWorkspaceServiceAccountTokensInput{ + ServiceAccountId: aws.String(serviceAccountID), + WorkspaceId: aws.String(workspaceID), + } + + return findWorkspaceServiceAccountToken(ctx, conn, input, func(v *awstypes.ServiceAccountTokenSummary) bool { + return aws.ToString(v.Id) == tokenID + }) +} + +type workspaceServiceAccountTokenResourceModel struct { + CreatedAt timetypes.RFC3339 `tfsdk:"created_at"` + ExpiresAt timetypes.RFC3339 `tfsdk:"expires_at"` + ID types.String `tfsdk:"id"` + Key types.String `tfsdk:"key"` + Name types.String `tfsdk:"name"` + SecondsToLive types.Int64 `tfsdk:"seconds_to_live"` + ServiceAccountID types.String `tfsdk:"service_account_id"` + TokenID types.String `tfsdk:"service_account_token_id"` + WorkspaceID types.String `tfsdk:"workspace_id"` +} + +const ( + workspaceServiceAccountTokenResourceIDPartCount = 3 +) + +func (data *workspaceServiceAccountTokenResourceModel) InitFromID() error { + id := data.ID.ValueString() + parts, err := flex.ExpandResourceId(id, workspaceServiceAccountTokenResourceIDPartCount, false) + + if err != nil { + return err + } + + data.WorkspaceID = types.StringValue(parts[0]) + data.ServiceAccountID = types.StringValue(parts[1]) + data.TokenID = types.StringValue(parts[2]) + + return nil +} + +func (data *workspaceServiceAccountTokenResourceModel) setID() { + data.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{data.WorkspaceID.ValueString(), data.ServiceAccountID.ValueString(), data.TokenID.ValueString()}, workspaceServiceAccountTokenResourceIDPartCount, false))) +} diff --git a/internal/service/grafana/workspace_service_account_token_test.go b/internal/service/grafana/workspace_service_account_token_test.go new file mode 100644 index 00000000000..131386ad764 --- /dev/null +++ b/internal/service/grafana/workspace_service_account_token_test.go @@ -0,0 +1,134 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grafana_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/grafana" + "github.com/aws/aws-sdk-go-v2/service/grafana/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfgrafana "github.com/hashicorp/terraform-provider-aws/internal/service/grafana" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccGrafanaWorkspaceServiceAccountToken_basic(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_grafana_workspace_service_account_token.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + var v types.ServiceAccountTokenSummary + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) + acctest.PreCheckSSOAdminInstances(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, grafana.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWorkspaceServiceAccountTokenDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWorkspaceServiceAccountTokenConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkspaceServiceAccountTokenExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrSet(resourceName, names.AttrCreatedAt), + resource.TestCheckResourceAttrSet(resourceName, "expires_at"), + resource.TestCheckResourceAttrSet(resourceName, "service_account_token_id"), + ), + }, + }, + }) +} + +func TestAccGrafanaWorkspaceServiceAccountToken_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v types.ServiceAccountTokenSummary + resourceName := "aws_grafana_workspace_service_account_token.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) + acctest.PreCheckSSOAdminInstances(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWorkspaceServiceAccountTokenDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWorkspaceServiceAccountTokenConfig_basic(resourceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkspaceServiceAccountTokenExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfgrafana.ResourceWorkspaceServiceAccountToken, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckWorkspaceServiceAccountTokenExists(ctx context.Context, n string, v *types.ServiceAccountTokenSummary) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) + + output, err := tfgrafana.FindWorkspaceServiceAccountTokenByThreePartKey(ctx, conn, rs.Primary.Attributes["workspace_id"], rs.Primary.Attributes["service_account_id"], rs.Primary.Attributes["service_account_token_id"]) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccCheckWorkspaceServiceAccountTokenDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_grafana_workspace_service_account_token" { + continue + } + + _, err := tfgrafana.FindWorkspaceServiceAccountTokenByThreePartKey(ctx, conn, rs.Primary.Attributes["workspace_id"], rs.Primary.Attributes["service_account_id"], rs.Primary.Attributes["service_account_token_id"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("Grafana Workspace Service Account Token %s still exists", rs.Primary.ID) + } + return nil + } +} + +func testAccWorkspaceServiceAccountTokenConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccWorkspaceServiceAccountConfig_basic(rName), fmt.Sprintf(` +resource "aws_grafana_workspace_service_account_token" "test" { + name = %[1]q + service_account_id = aws_grafana_workspace_service_account.test.service_account_id + seconds_to_live = 3600 + workspace_id = aws_grafana_workspace.test.id +} +`, rName)) +} diff --git a/internal/service/grafana/workspace_test.go b/internal/service/grafana/workspace_test.go index 20e54dc7f5b..2937ed878c0 100644 --- a/internal/service/grafana/workspace_test.go +++ b/internal/service/grafana/workspace_test.go @@ -10,8 +10,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/managedgrafana" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/grafana/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -24,13 +24,13 @@ import ( func testAccWorkspace_saml(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" iamRoleResourceName := "aws_iam_role.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -40,9 +40,9 @@ func testAccWorkspace_saml(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckWorkspaceExists(ctx, resourceName, &v), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "grafana", regexache.MustCompile(`/workspaces/.+`)), - resource.TestCheckResourceAttr(resourceName, "account_access_type", managedgrafana.AccountAccessTypeCurrentAccount), + resource.TestCheckResourceAttr(resourceName, "account_access_type", string(awstypes.AccountAccessTypeCurrentAccount)), resource.TestCheckResourceAttr(resourceName, "authentication_providers.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "authentication_providers.0", managedgrafana.AuthenticationProviderTypesSaml), + resource.TestCheckResourceAttr(resourceName, "authentication_providers.0", string(awstypes.AuthenticationProviderTypesSaml)), resource.TestCheckResourceAttr(resourceName, "data_sources.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), resource.TestCheckResourceAttrSet(resourceName, names.AttrEndpoint), @@ -51,9 +51,9 @@ func testAccWorkspace_saml(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "notification_destinations.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "organization_role_name", ""), resource.TestCheckResourceAttr(resourceName, "organizational_units.#", acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "permission_type", managedgrafana.PermissionTypeServiceManaged), + resource.TestCheckResourceAttr(resourceName, "permission_type", string(awstypes.PermissionTypeServiceManaged)), resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, iamRoleResourceName, names.AttrARN), - resource.TestCheckResourceAttr(resourceName, "saml_configuration_status", managedgrafana.SamlConfigurationStatusNotConfigured), + resource.TestCheckResourceAttr(resourceName, "saml_configuration_status", string(awstypes.SamlConfigurationStatusNotConfigured)), resource.TestCheckResourceAttr(resourceName, "stack_set_name", ""), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "vpc_configuration.#", acctest.Ct0), @@ -71,12 +71,12 @@ func testAccWorkspace_saml(t *testing.T) { func testAccWorkspace_vpc(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -117,7 +117,7 @@ func testAccWorkspace_vpc(t *testing.T) { func testAccWorkspace_sso(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" iamRoleResourceName := "aws_iam_role.test" @@ -125,7 +125,7 @@ func testAccWorkspace_sso(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) acctest.PreCheckSSOAdminInstances(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), @@ -137,9 +137,9 @@ func testAccWorkspace_sso(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckWorkspaceExists(ctx, resourceName, &v), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "grafana", regexache.MustCompile(`/workspaces/.+`)), - resource.TestCheckResourceAttr(resourceName, "account_access_type", managedgrafana.AccountAccessTypeCurrentAccount), + resource.TestCheckResourceAttr(resourceName, "account_access_type", string(awstypes.AccountAccessTypeCurrentAccount)), resource.TestCheckResourceAttr(resourceName, "authentication_providers.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "authentication_providers.0", managedgrafana.AuthenticationProviderTypesAwsSso), + resource.TestCheckResourceAttr(resourceName, "authentication_providers.0", string(awstypes.AuthenticationProviderTypesAwsSso)), resource.TestCheckResourceAttr(resourceName, "data_sources.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, names.AttrDescription, ""), resource.TestCheckResourceAttrSet(resourceName, names.AttrEndpoint), @@ -148,7 +148,7 @@ func testAccWorkspace_sso(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "notification_destinations.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "organization_role_name", ""), resource.TestCheckResourceAttr(resourceName, "organizational_units.#", acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "permission_type", managedgrafana.PermissionTypeServiceManaged), + resource.TestCheckResourceAttr(resourceName, "permission_type", string(awstypes.PermissionTypeServiceManaged)), resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, iamRoleResourceName, names.AttrARN), resource.TestCheckResourceAttr(resourceName, "saml_configuration_status", ""), resource.TestCheckResourceAttr(resourceName, "stack_set_name", ""), @@ -166,12 +166,12 @@ func testAccWorkspace_sso(t *testing.T) { func testAccWorkspace_disappears(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -190,14 +190,14 @@ func testAccWorkspace_disappears(t *testing.T) { func testAccWorkspace_organization(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) + acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) acctest.PreCheckOrganizationsEnabled(ctx, t) acctest.PreCheckOrganizationManagementAccount(ctx, t) }, @@ -209,9 +209,9 @@ func testAccWorkspace_organization(t *testing.T) { Config: testAccWorkspaceConfig_organization(rName), Check: resource.ComposeTestCheckFunc( testAccCheckWorkspaceExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "account_access_type", managedgrafana.AccountAccessTypeOrganization), + resource.TestCheckResourceAttr(resourceName, "account_access_type", string(awstypes.AccountAccessTypeOrganization)), resource.TestCheckResourceAttr(resourceName, "authentication_providers.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "authentication_providers.0", managedgrafana.AuthenticationProviderTypesSaml), + resource.TestCheckResourceAttr(resourceName, "authentication_providers.0", string(awstypes.AuthenticationProviderTypesSaml)), resource.TestCheckResourceAttr(resourceName, "organization_role_name", ""), resource.TestCheckResourceAttr(resourceName, "organizational_units.#", acctest.Ct1), ), @@ -227,12 +227,12 @@ func testAccWorkspace_organization(t *testing.T) { func testAccWorkspace_tags(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -273,13 +273,13 @@ func testAccWorkspace_tags(t *testing.T) { func testAccWorkspace_dataSources(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" iamRoleResourceName := "aws_iam_role.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -289,9 +289,9 @@ func testAccWorkspace_dataSources(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckWorkspaceExists(ctx, resourceName, &v), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "grafana", regexache.MustCompile(`/workspaces/.+`)), - resource.TestCheckResourceAttr(resourceName, "account_access_type", managedgrafana.AccountAccessTypeCurrentAccount), + resource.TestCheckResourceAttr(resourceName, "account_access_type", string(awstypes.AccountAccessTypeCurrentAccount)), resource.TestCheckResourceAttr(resourceName, "authentication_providers.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "authentication_providers.0", managedgrafana.AuthenticationProviderTypesSaml), + resource.TestCheckResourceAttr(resourceName, "authentication_providers.0", string(awstypes.AuthenticationProviderTypesSaml)), resource.TestCheckResourceAttr(resourceName, "data_sources.#", acctest.Ct3), resource.TestCheckResourceAttr(resourceName, "data_sources.0", "CLOUDWATCH"), resource.TestCheckResourceAttr(resourceName, "data_sources.1", "PROMETHEUS"), @@ -303,9 +303,9 @@ func testAccWorkspace_dataSources(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "notification_destinations.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "organization_role_name", ""), resource.TestCheckResourceAttr(resourceName, "organizational_units.#", acctest.Ct0), - resource.TestCheckResourceAttr(resourceName, "permission_type", managedgrafana.PermissionTypeServiceManaged), + resource.TestCheckResourceAttr(resourceName, "permission_type", string(awstypes.PermissionTypeServiceManaged)), resource.TestCheckResourceAttrPair(resourceName, names.AttrRoleARN, iamRoleResourceName, names.AttrARN), - resource.TestCheckResourceAttr(resourceName, "saml_configuration_status", managedgrafana.SamlConfigurationStatusNotConfigured), + resource.TestCheckResourceAttr(resourceName, "saml_configuration_status", string(awstypes.SamlConfigurationStatusNotConfigured)), resource.TestCheckResourceAttr(resourceName, "stack_set_name", ""), ), }, @@ -320,12 +320,12 @@ func testAccWorkspace_dataSources(t *testing.T) { func testAccWorkspace_permissionType(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -334,7 +334,7 @@ func testAccWorkspace_permissionType(t *testing.T) { Config: testAccWorkspaceConfig_permissionType(rName, "CUSTOMER_MANAGED"), Check: resource.ComposeTestCheckFunc( testAccCheckWorkspaceExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "permission_type", managedgrafana.PermissionTypeCustomerManaged), + resource.TestCheckResourceAttr(resourceName, "permission_type", string(awstypes.PermissionTypeCustomerManaged)), ), }, { @@ -346,7 +346,7 @@ func testAccWorkspace_permissionType(t *testing.T) { Config: testAccWorkspaceConfig_permissionType(rName, "SERVICE_MANAGED"), Check: resource.ComposeTestCheckFunc( testAccCheckWorkspaceExists(ctx, resourceName, &v), - resource.TestCheckResourceAttr(resourceName, "permission_type", managedgrafana.PermissionTypeServiceManaged), + resource.TestCheckResourceAttr(resourceName, "permission_type", string(awstypes.PermissionTypeServiceManaged)), ), }, }, @@ -355,12 +355,12 @@ func testAccWorkspace_permissionType(t *testing.T) { func testAccWorkspace_notificationDestinations(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -395,12 +395,12 @@ func testAccWorkspace_notificationDestinations(t *testing.T) { func testAccWorkspace_configuration(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -430,12 +430,12 @@ func testAccWorkspace_configuration(t *testing.T) { func testAccWorkspace_networkAccess(t *testing.T) { ctx := acctest.Context(t) - var v managedgrafana.WorkspaceDescription + var v awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -476,12 +476,12 @@ func testAccWorkspace_networkAccess(t *testing.T) { func testAccWorkspace_version(t *testing.T) { ctx := acctest.Context(t) - var v1, v2 managedgrafana.WorkspaceDescription + var v1, v2, v3 awstypes.WorkspaceDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_grafana_workspace.test" resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, managedgrafana.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.GrafanaEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.GrafanaServiceID), CheckDestroy: testAccCheckWorkspaceDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -506,22 +506,26 @@ func testAccWorkspace_version(t *testing.T) { testAccCheckWorkspaceNotRecreated(&v2, &v1), ), }, + { + Config: testAccWorkspaceConfig_version(rName, "10.4"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkspaceExists(ctx, resourceName, &v3), + resource.TestCheckResourceAttr(resourceName, "grafana_version", "10.4"), + testAccCheckWorkspaceNotRecreated(&v3, &v2), + ), + }, }, }) } -func testAccCheckWorkspaceExists(ctx context.Context, n string, v *managedgrafana.WorkspaceDescription) resource.TestCheckFunc { +func testAccCheckWorkspaceExists(ctx context.Context, n string, v *awstypes.WorkspaceDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Grafana Workspace ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) output, err := tfgrafana.FindWorkspaceByID(ctx, conn, rs.Primary.ID) @@ -537,7 +541,7 @@ func testAccCheckWorkspaceExists(ctx context.Context, n string, v *managedgrafan func testAccCheckWorkspaceDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).GrafanaClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_grafana_workspace" { @@ -560,9 +564,9 @@ func testAccCheckWorkspaceDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckWorkspaceNotRecreated(i, j *managedgrafana.WorkspaceDescription) resource.TestCheckFunc { +func testAccCheckWorkspaceNotRecreated(i, j *awstypes.WorkspaceDescription) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.Id) != aws.StringValue(j.Id) { + if aws.ToString(i.Id) != aws.ToString(j.Id) { return errors.New("Grafana Workspace was recreated") } diff --git a/internal/service/greengrass/generate.go b/internal/service/greengrass/generate.go index dc1f07e6d4e..62f853c1770 100644 --- a/internal/service/greengrass/generate.go +++ b/internal/service/greengrass/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags -AWSSDKVersion=2 -KVTValues -SkipTypesImp //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/greengrass/service_endpoint_resolver_gen.go b/internal/service/greengrass/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..c1563d389c3 --- /dev/null +++ b/internal/service/greengrass/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package greengrass + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + greengrass_sdkv2 "github.com/aws/aws-sdk-go-v2/service/greengrass" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ greengrass_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver greengrass_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: greengrass_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params greengrass_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up greengrass endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*greengrass_sdkv2.Options) { + return func(o *greengrass_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/greengrass/service_endpoints_gen_test.go b/internal/service/greengrass/service_endpoints_gen_test.go index bca8626fb03..7a6a2c1699c 100644 --- a/internal/service/greengrass/service_endpoints_gen_test.go +++ b/internal/service/greengrass/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package greengrass_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - greengrass_sdkv1 "github.com/aws/aws-sdk-go/service/greengrass" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + greengrass_sdkv2 "github.com/aws/aws-sdk-go-v2/service/greengrass" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := greengrass_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(greengrass_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), greengrass_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := greengrass_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(greengrass_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), greengrass_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.GreengrassConn(ctx) - - req, _ := client.ListGroupsRequest(&greengrass_sdkv1.ListGroupsInput{}) + client := meta.GreengrassClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListGroups(ctx, &greengrass_sdkv2.ListGroupsInput{}, + func(opts *greengrass_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/greengrass/service_package_gen.go b/internal/service/greengrass/service_package_gen.go index 8347378ae0f..171fd82de01 100644 --- a/internal/service/greengrass/service_package_gen.go +++ b/internal/service/greengrass/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package greengrass import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - greengrass_sdkv1 "github.com/aws/aws-sdk-go/service/greengrass" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + greengrass_sdkv2 "github.com/aws/aws-sdk-go-v2/service/greengrass" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -37,25 +34,14 @@ func (p *servicePackage) ServicePackageName() string { return names.Greengrass } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*greengrass_sdkv1.Greengrass, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*greengrass_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return greengrass_sdkv1.New(sess.Copy(&cfg)), nil + return greengrass_sdkv2.NewFromConfig(cfg, + greengrass_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/greengrass/tags_gen.go b/internal/service/greengrass/tags_gen.go index 76b1d773c02..e1332362973 100644 --- a/internal/service/greengrass/tags_gen.go +++ b/internal/service/greengrass/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/greengrass" - "github.com/aws/aws-sdk-go/service/greengrass/greengrassiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/greengrass" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +18,12 @@ import ( // listTags lists greengrass service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn greengrassiface.GreengrassAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *greengrass.Client, identifier string, optFns ...func(*greengrass.Options)) (tftags.KeyValueTags, error) { input := &greengrass.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +35,7 @@ func listTags(ctx context.Context, conn greengrassiface.GreengrassAPI, identifie // ListTags lists greengrass service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).GreengrassConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).GreengrassClient(ctx), identifier) if err != nil { return err @@ -49,21 +48,21 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri return nil } -// map[string]*string handling +// map[string]string handling // Tags returns greengrass service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from greengrass service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns greengrass service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -74,7 +73,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets greengrass service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -83,7 +82,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates greengrass service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn greengrassiface.GreengrassAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *greengrass.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*greengrass.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -94,10 +93,10 @@ func updateTags(ctx context.Context, conn greengrassiface.GreengrassAPI, identif if len(removedTags) > 0 { input := &greengrass.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -112,7 +111,7 @@ func updateTags(ctx context.Context, conn greengrassiface.GreengrassAPI, identif Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -125,5 +124,5 @@ func updateTags(ctx context.Context, conn greengrassiface.GreengrassAPI, identif // UpdateTags updates greengrass service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).GreengrassConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).GreengrassClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/groundstation/service_endpoint_resolver_gen.go b/internal/service/groundstation/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..e449cafde9f --- /dev/null +++ b/internal/service/groundstation/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package groundstation + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + groundstation_sdkv2 "github.com/aws/aws-sdk-go-v2/service/groundstation" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ groundstation_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver groundstation_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: groundstation_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params groundstation_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up groundstation endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*groundstation_sdkv2.Options) { + return func(o *groundstation_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/groundstation/service_endpoints_gen_test.go b/internal/service/groundstation/service_endpoints_gen_test.go index f78516cf4a0..07bff147631 100644 --- a/internal/service/groundstation/service_endpoints_gen_test.go +++ b/internal/service/groundstation/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := groundstation_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), groundstation_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := groundstation_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), groundstation_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/groundstation/service_package_gen.go b/internal/service/groundstation/service_package_gen.go index 4ce181199a4..8f6b5a5ea31 100644 --- a/internal/service/groundstation/service_package_gen.go +++ b/internal/service/groundstation/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package groundstation @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" groundstation_sdkv2 "github.com/aws/aws-sdk-go-v2/service/groundstation" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*groundstation_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return groundstation_sdkv2.NewFromConfig(cfg, func(o *groundstation_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return groundstation_sdkv2.NewFromConfig(cfg, + groundstation_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/guardduty/service_endpoint_resolver_gen.go b/internal/service/guardduty/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..450e4f42eb5 --- /dev/null +++ b/internal/service/guardduty/service_endpoint_resolver_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package guardduty + +import ( + "context" + "fmt" + "net" + "net/url" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + guardduty_sdkv2 "github.com/aws/aws-sdk-go-v2/service/guardduty" + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} + +var _ guardduty_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver guardduty_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: guardduty_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params guardduty_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up guardduty endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*guardduty_sdkv2.Options) { + return func(o *guardduty_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/guardduty/service_endpoints_gen_test.go b/internal/service/guardduty/service_endpoints_gen_test.go index 7c50f087414..5e96c6bccd7 100644 --- a/internal/service/guardduty/service_endpoints_gen_test.go +++ b/internal/service/guardduty/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -255,24 +257,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }) } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := guardduty_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), guardduty_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := guardduty_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), guardduty_sdkv2.EndpointParameters{ @@ -280,14 +282,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -364,16 +366,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/guardduty/service_package_gen.go b/internal/service/guardduty/service_package_gen.go index c8f12f08eb1..ebdc4535ff1 100644 --- a/internal/service/guardduty/service_package_gen.go +++ b/internal/service/guardduty/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package guardduty @@ -8,7 +8,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" guardduty_sdkv2 "github.com/aws/aws-sdk-go-v2/service/guardduty" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" guardduty_sdkv1 "github.com/aws/aws-sdk-go/service/guardduty" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -132,11 +131,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*g "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return guardduty_sdkv1.New(sess.Copy(&cfg)), nil @@ -146,19 +142,10 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*g func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*guardduty_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return guardduty_sdkv2.NewFromConfig(cfg, func(o *guardduty_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return guardduty_sdkv2.NewFromConfig(cfg, + guardduty_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/healthlake/service_endpoint_resolver_gen.go b/internal/service/healthlake/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..d106de30043 --- /dev/null +++ b/internal/service/healthlake/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package healthlake + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + healthlake_sdkv2 "github.com/aws/aws-sdk-go-v2/service/healthlake" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ healthlake_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver healthlake_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: healthlake_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params healthlake_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up healthlake endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*healthlake_sdkv2.Options) { + return func(o *healthlake_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/healthlake/service_endpoints_gen_test.go b/internal/service/healthlake/service_endpoints_gen_test.go index 4042125d703..1379fbffcc0 100644 --- a/internal/service/healthlake/service_endpoints_gen_test.go +++ b/internal/service/healthlake/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := healthlake_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), healthlake_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := healthlake_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), healthlake_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/healthlake/service_package_gen.go b/internal/service/healthlake/service_package_gen.go index c9da7b7221b..46da7a8c1a7 100644 --- a/internal/service/healthlake/service_package_gen.go +++ b/internal/service/healthlake/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package healthlake @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" healthlake_sdkv2 "github.com/aws/aws-sdk-go-v2/service/healthlake" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*healthlake_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return healthlake_sdkv2.NewFromConfig(cfg, func(o *healthlake_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return healthlake_sdkv2.NewFromConfig(cfg, + healthlake_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/iam/server_certificate.go b/internal/service/iam/server_certificate.go index afaceb0aaf4..a9859e406e6 100644 --- a/internal/service/iam/server_certificate.go +++ b/internal/service/iam/server_certificate.go @@ -43,6 +43,10 @@ func resourceServerCertificate() *schema.Resource { StateContext: resourceServerCertificateImport, }, + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(15 * time.Minute), + }, + Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -210,15 +214,12 @@ func resourceServerCertificateUpdate(ctx context.Context, d *schema.ResourceData return append(diags, resourceServerCertificateRead(ctx, d, meta)...) } -const deleteTimeout = 15 * time.Minute - func resourceServerCertificateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).IAMClient(ctx) log.Printf("[DEBUG] Deleting IAM Server Certificate: %s", d.Id()) - - _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.DeleteConflictException](ctx, deleteTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.DeleteConflictException](ctx, d.Timeout(schema.TimeoutDelete), func() (interface{}, error) { return conn.DeleteServerCertificate(ctx, &iam.DeleteServerCertificateInput{ ServerCertificateName: aws.String(d.Get(names.AttrName).(string)), }) diff --git a/internal/service/iam/service_endpoint_resolver_gen.go b/internal/service/iam/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..288dd84b150 --- /dev/null +++ b/internal/service/iam/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package iam + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + iam_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iam" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ iam_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver iam_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: iam_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params iam_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up iam endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*iam_sdkv2.Options) { + return func(o *iam_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/iam/service_endpoints_gen_test.go b/internal/service/iam/service_endpoints_gen_test.go index 92581c8d922..9ce89ce14f8 100644 --- a/internal/service/iam/service_endpoints_gen_test.go +++ b/internal/service/iam/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -90,7 +92,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -330,7 +332,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -351,24 +353,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := iam_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), iam_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := iam_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), iam_sdkv2.EndpointParameters{ @@ -376,14 +378,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -453,16 +455,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/iam/service_package_gen.go b/internal/service/iam/service_package_gen.go index 38903cbb814..f199832ce96 100644 --- a/internal/service/iam/service_package_gen.go +++ b/internal/service/iam/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package iam @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" iam_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iam" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -301,19 +300,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*iam_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return iam_sdkv2.NewFromConfig(cfg, func(o *iam_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return iam_sdkv2.NewFromConfig(cfg, + iam_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/identitystore/service_endpoint_resolver_gen.go b/internal/service/identitystore/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f0ecaf695ce --- /dev/null +++ b/internal/service/identitystore/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package identitystore + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + identitystore_sdkv2 "github.com/aws/aws-sdk-go-v2/service/identitystore" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ identitystore_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver identitystore_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: identitystore_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params identitystore_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up identitystore endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*identitystore_sdkv2.Options) { + return func(o *identitystore_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/identitystore/service_endpoints_gen_test.go b/internal/service/identitystore/service_endpoints_gen_test.go index dba9961ba82..5c4cb36e617 100644 --- a/internal/service/identitystore/service_endpoints_gen_test.go +++ b/internal/service/identitystore/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := identitystore_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), identitystore_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := identitystore_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), identitystore_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/identitystore/service_package_gen.go b/internal/service/identitystore/service_package_gen.go index d11401004bf..885caf6f0f8 100644 --- a/internal/service/identitystore/service_package_gen.go +++ b/internal/service/identitystore/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package identitystore @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" identitystore_sdkv2 "github.com/aws/aws-sdk-go-v2/service/identitystore" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -66,19 +65,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*identitystore_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return identitystore_sdkv2.NewFromConfig(cfg, func(o *identitystore_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return identitystore_sdkv2.NewFromConfig(cfg, + identitystore_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/imagebuilder/image_pipeline.go b/internal/service/imagebuilder/image_pipeline.go index e7b3adab89c..174bca68b69 100644 --- a/internal/service/imagebuilder/image_pipeline.go +++ b/internal/service/imagebuilder/image_pipeline.go @@ -78,6 +78,11 @@ func ResourceImagePipeline() *schema.Resource { Optional: true, Default: true, }, + "execution_role": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, "image_recipe_arn": { Type: schema.TypeString, Optional: true, @@ -193,6 +198,47 @@ func ResourceImagePipeline() *schema.Resource { }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), + "workflow": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "on_failure": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(imagebuilder.OnWorkflowFailure_Values(), false), + }, + "parallel_group": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 100), + }, + names.AttrParameter: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "workflow_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, }, CustomizeDiff: verify.SetTagsDiff, @@ -221,6 +267,10 @@ func resourceImagePipelineCreate(ctx context.Context, d *schema.ResourceData, me input.DistributionConfigurationArn = aws.String(v.(string)) } + if v, ok := d.GetOk("execution_role"); ok { + input.ExecutionRole = aws.String(v.(string)) + } + if v, ok := d.GetOk("image_recipe_arn"); ok { input.ImageRecipeArn = aws.String(v.(string)) } @@ -249,6 +299,10 @@ func resourceImagePipelineCreate(ctx context.Context, d *schema.ResourceData, me input.Status = aws.String(v.(string)) } + if v, ok := d.GetOk("workflow"); ok && len(v.([]interface{})) > 0 { + input.Workflows = expandWorkflowConfigurations(v.([]interface{})) + } + output, err := conn.CreateImagePipelineWithContext(ctx, input) if err != nil { @@ -299,6 +353,7 @@ func resourceImagePipelineRead(ctx context.Context, d *schema.ResourceData, meta d.Set(names.AttrDescription, imagePipeline.Description) d.Set("distribution_configuration_arn", imagePipeline.DistributionConfigurationArn) d.Set("enhanced_image_metadata_enabled", imagePipeline.EnhancedImageMetadataEnabled) + d.Set("execution_role", imagePipeline.ExecutionRole) d.Set("image_recipe_arn", imagePipeline.ImageRecipeArn) if imagePipeline.ImageScanningConfiguration != nil { d.Set("image_scanning_configuration", []interface{}{flattenImageScanningConfiguration(imagePipeline.ImageScanningConfiguration)}) @@ -319,6 +374,7 @@ func resourceImagePipelineRead(ctx context.Context, d *schema.ResourceData, meta d.Set(names.AttrSchedule, nil) } d.Set(names.AttrStatus, imagePipeline.Status) + d.Set("workflow", flattenWorkflowConfigurations(imagePipeline.Workflows)) setTagsOut(ctx, imagePipeline.Tags) @@ -333,11 +389,13 @@ func resourceImagePipelineUpdate(ctx context.Context, d *schema.ResourceData, me names.AttrDescription, "distribution_configuration_arn", "enhanced_image_metadata_enabled", + "execution_role", "image_scanning_configuration", "image_tests_configuration", "infrastructure_configuration_arn", names.AttrSchedule, names.AttrStatus, + "workflow", ) { input := &imagebuilder.UpdateImagePipelineInput{ ClientToken: aws.String(id.UniqueId()), @@ -357,6 +415,10 @@ func resourceImagePipelineUpdate(ctx context.Context, d *schema.ResourceData, me input.DistributionConfigurationArn = aws.String(v.(string)) } + if v, ok := d.GetOk("execution_role"); ok { + input.ExecutionRole = aws.String(v.(string)) + } + if v, ok := d.GetOk("image_recipe_arn"); ok { input.ImageRecipeArn = aws.String(v.(string)) } @@ -381,6 +443,10 @@ func resourceImagePipelineUpdate(ctx context.Context, d *schema.ResourceData, me input.Status = aws.String(v.(string)) } + if v, ok := d.GetOk("workflow"); ok && len(v.([]interface{})) > 0 { + input.Workflows = expandWorkflowConfigurations(v.([]interface{})) + } + _, err := conn.UpdateImagePipelineWithContext(ctx, input) if err != nil { diff --git a/internal/service/imagebuilder/image_pipeline_test.go b/internal/service/imagebuilder/image_pipeline_test.go index 3b7311cbeb8..61fd37dea1b 100644 --- a/internal/service/imagebuilder/image_pipeline_test.go +++ b/internal/service/imagebuilder/image_pipeline_test.go @@ -644,6 +644,80 @@ func TestAccImageBuilderImagePipeline_tags(t *testing.T) { }) } +func TestAccImageBuilderImagePipeline_workflow(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_imagebuilder_image_pipeline.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckImagePipelineDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccImagePipelineConfig_workflow(rName, imagebuilder.OnWorkflowFailureAbort, "test1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckImagePipelineExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "workflow.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "workflow.0.on_failure", imagebuilder.OnWorkflowFailureAbort), + resource.TestCheckResourceAttr(resourceName, "workflow.0.parallel_group", "test1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccImagePipelineConfig_workflow(rName, imagebuilder.OnWorkflowFailureContinue, "test2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckImagePipelineExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "workflow.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "workflow.0.on_failure", imagebuilder.OnWorkflowFailureContinue), + resource.TestCheckResourceAttr(resourceName, "workflow.0.parallel_group", "test2"), + ), + }, + }, + }) +} + +func TestAccImageBuilderImagePipeline_workflowParameter(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_imagebuilder_image_pipeline.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.ImageBuilderServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckImagePipelineDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccImagePipelineConfig_workflowParameter(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckImagePipelineExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "workflow.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "workflow.0.parameter.#", acctest.Ct1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccImagePipelineConfig_workflowParameter(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckImagePipelineExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "workflow.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "workflow.0.parameter.#", acctest.Ct1), + ), + }, + }, + }) +} + func testAccCheckImagePipelineDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ImageBuilderConn(ctx) @@ -705,6 +779,8 @@ data "aws_region" "current" {} data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} + resource "aws_iam_instance_profile" "test" { name = aws_iam_role.role.name role = aws_iam_role.role.name @@ -1105,3 +1181,104 @@ resource "aws_imagebuilder_image_pipeline" "test" { } `, rName, tagKey1, tagValue1, tagKey2, tagValue2)) } + +func testAccImagePipelineConfig_workflow(rName, onFailure, parallelGroup string) string { + return acctest.ConfigCompose(testAccImagePipelineConfig_base(rName), fmt.Sprintf(` +resource "aws_imagebuilder_workflow" "test" { + name = %[1]q + version = "1.0.0" + type = "TEST" + + data = <<-EOT + name: test-image + description: Workflow to test an image + schemaVersion: 1.0 + + steps: + - name: LaunchTestInstance + action: LaunchInstance + onFailure: Abort + inputs: + waitFor: "ssmAgent" + + - name: TerminateTestInstance + action: TerminateInstance + onFailure: Continue + inputs: + instanceId.$: "$.stepOutputs.LaunchTestInstance.instanceId" + EOT +} + +resource "aws_imagebuilder_image_pipeline" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + name = %[1]q + + execution_role = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/imagebuilder.amazonaws.com/AWSServiceRoleForImageBuilder" + + workflow { + on_failure = %[2]q + parallel_group = %[3]q + workflow_arn = aws_imagebuilder_workflow.test.arn + } +} +`, rName, onFailure, parallelGroup)) +} + +func testAccImagePipelineConfig_workflowParameter(rName string) string { + return acctest.ConfigCompose(testAccImagePipelineConfig_base(rName), fmt.Sprintf(` +resource "aws_imagebuilder_workflow" "test" { + name = %[1]q + version = "1.0.0" + type = "TEST" + + data = <<-EOT + name: test-image + description: Workflow to test an image + schemaVersion: 1.0 + + parameters: + - name: waitForActionAtEnd + type: boolean + + steps: + - name: LaunchTestInstance + action: LaunchInstance + onFailure: Abort + inputs: + waitFor: "ssmAgent" + + - name: TerminateTestInstance + action: TerminateInstance + onFailure: Continue + inputs: + instanceId.$: "$.stepOutputs.LaunchTestInstance.instanceId" + + - name: WaitForActionAtEnd + action: WaitForAction + if: + booleanEquals: true + value: "$.parameters.waitForActionAtEnd" + EOT +} + +resource "aws_imagebuilder_image_pipeline" "test" { + image_recipe_arn = aws_imagebuilder_image_recipe.test.arn + infrastructure_configuration_arn = aws_imagebuilder_infrastructure_configuration.test.arn + name = %[1]q + + execution_role = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/imagebuilder.amazonaws.com/AWSServiceRoleForImageBuilder" + + workflow { + on_failure = "ABORT" + parallel_group = "test" + workflow_arn = aws_imagebuilder_workflow.test.arn + + parameter { + name = "waitForActionAtEnd" + value = "true" + } + } +} +`, rName)) +} diff --git a/internal/service/imagebuilder/service_endpoint_resolver_gen.go b/internal/service/imagebuilder/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..993463ab654 --- /dev/null +++ b/internal/service/imagebuilder/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package imagebuilder + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/imagebuilder/service_endpoints_gen_test.go b/internal/service/imagebuilder/service_endpoints_gen_test.go index 703d471eeb7..92339031efa 100644 --- a/internal/service/imagebuilder/service_endpoints_gen_test.go +++ b/internal/service/imagebuilder/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,14 +239,14 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(imagebuilder_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.ResolveUnknownService = true }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -254,10 +255,10 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(imagebuilder_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { @@ -265,7 +266,7 @@ func defaultFIPSEndpoint(region string) string { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -274,7 +275,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -326,16 +327,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/imagebuilder/service_package_gen.go b/internal/service/imagebuilder/service_package_gen.go index 3802c9e9348..fde42af5d3c 100644 --- a/internal/service/imagebuilder/service_package_gen.go +++ b/internal/service/imagebuilder/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package imagebuilder @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" imagebuilder_sdkv1 "github.com/aws/aws-sdk-go/service/imagebuilder" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -169,11 +168,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*i "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return imagebuilder_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/inspector/service_endpoint_resolver_gen.go b/internal/service/inspector/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ec8bc211f5b --- /dev/null +++ b/internal/service/inspector/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package inspector + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/inspector/service_endpoints_gen_test.go b/internal/service/inspector/service_endpoints_gen_test.go index ed15776d1ac..4c24b3db9a6 100644 --- a/internal/service/inspector/service_endpoints_gen_test.go +++ b/internal/service/inspector/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(inspector_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(inspector_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/inspector/service_package_gen.go b/internal/service/inspector/service_package_gen.go index 59c6d61a624..060b15894db 100644 --- a/internal/service/inspector/service_package_gen.go +++ b/internal/service/inspector/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package inspector @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" inspector_sdkv1 "github.com/aws/aws-sdk-go/service/inspector" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -70,11 +69,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*i "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return inspector_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/inspector2/service_endpoint_resolver_gen.go b/internal/service/inspector2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f01799d5882 --- /dev/null +++ b/internal/service/inspector2/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package inspector2 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + inspector2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/inspector2" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ inspector2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver inspector2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: inspector2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params inspector2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up inspector2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*inspector2_sdkv2.Options) { + return func(o *inspector2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/inspector2/service_endpoints_gen_test.go b/internal/service/inspector2/service_endpoints_gen_test.go index 6cc4911c2ed..69c9102c477 100644 --- a/internal/service/inspector2/service_endpoints_gen_test.go +++ b/internal/service/inspector2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := inspector2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), inspector2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := inspector2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), inspector2_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/inspector2/service_package_gen.go b/internal/service/inspector2/service_package_gen.go index f2a7d4ce7ac..70f40d55f18 100644 --- a/internal/service/inspector2/service_package_gen.go +++ b/internal/service/inspector2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package inspector2 @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" inspector2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/inspector2" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -56,19 +55,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*inspector2_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return inspector2_sdkv2.NewFromConfig(cfg, func(o *inspector2_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return inspector2_sdkv2.NewFromConfig(cfg, + inspector2_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/internetmonitor/service_endpoint_resolver_gen.go b/internal/service/internetmonitor/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..2585620b6e9 --- /dev/null +++ b/internal/service/internetmonitor/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package internetmonitor + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + internetmonitor_sdkv2 "github.com/aws/aws-sdk-go-v2/service/internetmonitor" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ internetmonitor_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver internetmonitor_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: internetmonitor_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params internetmonitor_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up internetmonitor endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*internetmonitor_sdkv2.Options) { + return func(o *internetmonitor_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/internetmonitor/service_endpoints_gen_test.go b/internal/service/internetmonitor/service_endpoints_gen_test.go index bccb4c3a2c7..136c0a13823 100644 --- a/internal/service/internetmonitor/service_endpoints_gen_test.go +++ b/internal/service/internetmonitor/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := internetmonitor_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), internetmonitor_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := internetmonitor_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), internetmonitor_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/internetmonitor/service_package_gen.go b/internal/service/internetmonitor/service_package_gen.go index 26967a5fd53..28fe340ad32 100644 --- a/internal/service/internetmonitor/service_package_gen.go +++ b/internal/service/internetmonitor/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package internetmonitor @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" internetmonitor_sdkv2 "github.com/aws/aws-sdk-go-v2/service/internetmonitor" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*internetmonitor_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return internetmonitor_sdkv2.NewFromConfig(cfg, func(o *internetmonitor_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return internetmonitor_sdkv2.NewFromConfig(cfg, + internetmonitor_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/iot/authorizer.go b/internal/service/iot/authorizer.go index e14e2542e76..94c1053ffc1 100644 --- a/internal/service/iot/authorizer.go +++ b/internal/service/iot/authorizer.go @@ -9,22 +9,28 @@ import ( "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_iot_authorizer") -func ResourceAuthorizer() *schema.Resource { +// @SDKResource("aws_iot_authorizer", name="Authorizer") +// @Tags(identifierAttribute="arn") +func resourceAuthorizer() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAuthorizerCreate, ReadWithoutTimeout: resourceAuthorizerRead, @@ -35,7 +41,10 @@ func ResourceAuthorizer() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, - CustomizeDiff: resourceAuthorizerCustomizeDiff, + CustomizeDiff: customdiff.Sequence( + verify.SetTagsDiff, + resourceAuthorizerCustomizeDiff, + ), Schema: map[string]*schema.Schema{ names.AttrARN: { @@ -66,11 +75,13 @@ func ResourceAuthorizer() *schema.Resource { Default: false, }, names.AttrStatus: { - Type: schema.TypeString, - Optional: true, - Default: iot.AuthorizerStatusActive, - ValidateFunc: validation.StringInSlice(iot.AuthorizerStatus_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.AuthorizerStatusActive, + ValidateDiagFunc: enum.Validate[awstypes.AuthorizerStatus](), }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), "token_key_name": { Type: schema.TypeString, Optional: true, @@ -91,7 +102,7 @@ func ResourceAuthorizer() *schema.Resource { func resourceAuthorizerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) name := d.Get(names.AttrName).(string) input := &iot.CreateAuthorizerInput{ @@ -99,7 +110,8 @@ func resourceAuthorizerCreate(ctx context.Context, d *schema.ResourceData, meta AuthorizerName: aws.String(name), EnableCachingForHttp: aws.Bool(d.Get("enable_caching_for_http").(bool)), SigningDisabled: aws.Bool(d.Get("signing_disabled").(bool)), - Status: aws.String(d.Get(names.AttrStatus).(string)), + Status: awstypes.AuthorizerStatus((d.Get(names.AttrStatus).(string))), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("token_key_name"); ok { @@ -107,26 +119,25 @@ func resourceAuthorizerCreate(ctx context.Context, d *schema.ResourceData, meta } if v, ok := d.GetOk("token_signing_public_keys"); ok { - input.TokenSigningPublicKeys = flex.ExpandStringMap(v.(map[string]interface{})) + input.TokenSigningPublicKeys = flex.ExpandStringValueMap(v.(map[string]interface{})) } - log.Printf("[INFO] Creating IoT Authorizer: %s", input) - output, err := conn.CreateAuthorizerWithContext(ctx, input) + output, err := conn.CreateAuthorizer(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Authorizer (%s): %s", name, err) } - d.SetId(aws.StringValue(output.AuthorizerName)) + d.SetId(aws.ToString(output.AuthorizerName)) return append(diags, resourceAuthorizerRead(ctx, d, meta)...) } func resourceAuthorizerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - authorizer, err := FindAuthorizerByName(ctx, conn, d.Id()) + authorizer, err := findAuthorizerByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Authorizer (%s) not found, removing from state", d.Id()) @@ -145,14 +156,14 @@ func resourceAuthorizerRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("signing_disabled", authorizer.SigningDisabled) d.Set(names.AttrStatus, authorizer.Status) d.Set("token_key_name", authorizer.TokenKeyName) - d.Set("token_signing_public_keys", aws.StringValueMap(authorizer.TokenSigningPublicKeys)) + d.Set("token_signing_public_keys", aws.StringMap(authorizer.TokenSigningPublicKeys)) return diags } func resourceAuthorizerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) input := iot.UpdateAuthorizerInput{ AuthorizerName: aws.String(d.Id()), @@ -167,7 +178,7 @@ func resourceAuthorizerUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange(names.AttrStatus) { - input.Status = aws.String(d.Get(names.AttrStatus).(string)) + input.Status = awstypes.AuthorizerStatus(d.Get(names.AttrStatus).(string)) } if d.HasChange("token_key_name") { @@ -175,11 +186,10 @@ func resourceAuthorizerUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange("token_signing_public_keys") { - input.TokenSigningPublicKeys = flex.ExpandStringMap(d.Get("token_signing_public_keys").(map[string]interface{})) + input.TokenSigningPublicKeys = flex.ExpandStringValueMap(d.Get("token_signing_public_keys").(map[string]interface{})) } - log.Printf("[INFO] Updating IoT Authorizer: %s", input) - _, err := conn.UpdateAuthorizerWithContext(ctx, &input) + _, err := conn.UpdateAuthorizer(ctx, &input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Authorizer (%s): %s", d.Id(), err) @@ -190,27 +200,30 @@ func resourceAuthorizerUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceAuthorizerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) // In order to delete an IoT Authorizer, you must set it inactive first. - if d.Get(names.AttrStatus).(string) == iot.AuthorizerStatusActive { - log.Printf("[INFO] Deactivating IoT Authorizer: %s", d.Id()) - _, err := conn.UpdateAuthorizerWithContext(ctx, &iot.UpdateAuthorizerInput{ + if d.Get(names.AttrStatus).(string) == string(awstypes.AuthorizerStatusActive) { + _, err := conn.UpdateAuthorizer(ctx, &iot.UpdateAuthorizerInput{ AuthorizerName: aws.String(d.Id()), - Status: aws.String(iot.AuthorizerStatusInactive), + Status: awstypes.AuthorizerStatusInactive, }) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "deactivating IoT Authorizer (%s): %s", d.Id(), err) } } log.Printf("[INFO] Deleting IoT Authorizer: %s", d.Id()) - _, err := conn.DeleteAuthorizerWithContext(ctx, &iot.DeleteAuthorizerInput{ + _, err := conn.DeleteAuthorizer(ctx, &iot.DeleteAuthorizerInput{ AuthorizerName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -233,3 +246,28 @@ func resourceAuthorizerCustomizeDiff(_ context.Context, diff *schema.ResourceDif return nil } + +func findAuthorizerByName(ctx context.Context, conn *iot.Client, name string) (*awstypes.AuthorizerDescription, error) { + input := &iot.DescribeAuthorizerInput{ + AuthorizerName: aws.String(name), + } + + output, err := conn.DescribeAuthorizer(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.AuthorizerDescription == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.AuthorizerDescription, nil +} diff --git a/internal/service/iot/authorizer_test.go b/internal/service/iot/authorizer_test.go index 64b6ac45f84..19bb2231940 100644 --- a/internal/service/iot/authorizer_test.go +++ b/internal/service/iot/authorizer_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccIoTAuthorizer_basic(t *testing.T) { ctx := acctest.Context(t) - var conf iot.AuthorizerDescription + var conf awstypes.AuthorizerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_authorizer.test" @@ -56,7 +56,7 @@ func TestAccIoTAuthorizer_basic(t *testing.T) { func TestAccIoTAuthorizer_disappears(t *testing.T) { ctx := acctest.Context(t) - var conf iot.AuthorizerDescription + var conf awstypes.AuthorizerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_authorizer.test" @@ -80,7 +80,7 @@ func TestAccIoTAuthorizer_disappears(t *testing.T) { func TestAccIoTAuthorizer_signingDisabled(t *testing.T) { ctx := acctest.Context(t) - var conf iot.AuthorizerDescription + var conf awstypes.AuthorizerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_authorizer.test" @@ -113,7 +113,7 @@ func TestAccIoTAuthorizer_signingDisabled(t *testing.T) { func TestAccIoTAuthorizer_update(t *testing.T) { ctx := acctest.Context(t) - var conf iot.AuthorizerDescription + var conf awstypes.AuthorizerDescription rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_authorizer.test" @@ -156,7 +156,53 @@ func TestAccIoTAuthorizer_update(t *testing.T) { }) } -func testAccCheckAuthorizerExists(ctx context.Context, n string, v *iot.AuthorizerDescription) resource.TestCheckFunc { +func TestAccIoTAuthorizer_tags(t *testing.T) { + ctx := acctest.Context(t) + var conf awstypes.AuthorizerDescription + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_iot_authorizer.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAuthorizerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAuthorizerConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckAuthorizerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAuthorizerConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAuthorizerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccAuthorizerConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckAuthorizerExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + +func testAccCheckAuthorizerExists(ctx context.Context, n string, v *awstypes.AuthorizerDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -167,7 +213,7 @@ func testAccCheckAuthorizerExists(ctx context.Context, n string, v *iot.Authoriz return fmt.Errorf("No IoT Authorizer ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) output, err := tfiot.FindAuthorizerByName(ctx, conn, rs.Primary.ID) @@ -183,7 +229,7 @@ func testAccCheckAuthorizerExists(ctx context.Context, n string, v *iot.Authoriz func testAccCheckAuthorizerDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_authorizer" { @@ -235,7 +281,7 @@ resource "aws_lambda_function" "test" { function_name = %[1]q role = aws_iam_role.test.arn handler = "exports.example" - runtime = "nodejs16.x" + runtime = "nodejs20.x" } `, rName) } @@ -282,3 +328,40 @@ resource "aws_iot_authorizer" "test" { } `, rName)) } + +func testAccAuthorizerConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccAuthorizerConfig_base(rName), fmt.Sprintf(` +resource "aws_iot_authorizer" "test" { + name = %[1]q + authorizer_function_arn = aws_lambda_function.test.arn + token_key_name = "Token-Header-1" + + token_signing_public_keys = { + Key1 = file("test-fixtures/iot-authorizer-signing-key.pem") + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccAuthorizerConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccAuthorizerConfig_base(rName), fmt.Sprintf(` +resource "aws_iot_authorizer" "test" { + name = %[1]q + authorizer_function_arn = aws_lambda_function.test.arn + token_key_name = "Token-Header-1" + + token_signing_public_keys = { + Key1 = file("test-fixtures/iot-authorizer-signing-key.pem") + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/iot/billing_group.go b/internal/service/iot/billing_group.go index 5c6c6303f41..beaaa34cfcf 100644 --- a/internal/service/iot/billing_group.go +++ b/internal/service/iot/billing_group.go @@ -8,14 +8,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -25,7 +26,7 @@ import ( // @SDKResource("aws_iot_billing_group", name="Billing Group") // @Tags(identifierAttribute="arn") -func ResourceBillingGroup() *schema.Resource { +func resourceBillingGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceBillingGroupCreate, ReadWithoutTimeout: resourceBillingGroupRead, @@ -86,7 +87,7 @@ func ResourceBillingGroup() *schema.Resource { func resourceBillingGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) name := d.Get(names.AttrName).(string) input := &iot.CreateBillingGroupInput{ @@ -98,22 +99,22 @@ func resourceBillingGroupCreate(ctx context.Context, d *schema.ResourceData, met input.BillingGroupProperties = expandBillingGroupProperties(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.CreateBillingGroupWithContext(ctx, input) + output, err := conn.CreateBillingGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Billing Group (%s): %s", name, err) } - d.SetId(aws.StringValue(output.BillingGroupName)) + d.SetId(aws.ToString(output.BillingGroupName)) return append(diags, resourceBillingGroupRead(ctx, d, meta)...) } func resourceBillingGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindBillingGroupByName(ctx, conn, d.Id()) + output, err := findBillingGroupByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Billing Group (%s) not found, removing from state", d.Id()) @@ -149,7 +150,7 @@ func resourceBillingGroupRead(ctx context.Context, d *schema.ResourceData, meta func resourceBillingGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &iot.UpdateBillingGroupInput{ @@ -160,10 +161,10 @@ func resourceBillingGroupUpdate(ctx context.Context, d *schema.ResourceData, met if v, ok := d.GetOk(names.AttrProperties); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.BillingGroupProperties = expandBillingGroupProperties(v.([]interface{})[0].(map[string]interface{})) } else { - input.BillingGroupProperties = &iot.BillingGroupProperties{} + input.BillingGroupProperties = &awstypes.BillingGroupProperties{} } - _, err := conn.UpdateBillingGroupWithContext(ctx, input) + _, err := conn.UpdateBillingGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Billing Group (%s): %s", d.Id(), err) @@ -175,14 +176,14 @@ func resourceBillingGroupUpdate(ctx context.Context, d *schema.ResourceData, met func resourceBillingGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) log.Printf("[DEBUG] Deleting IoT Billing Group: %s", d.Id()) - _, err := conn.DeleteBillingGroupWithContext(ctx, &iot.DeleteBillingGroupInput{ + _, err := conn.DeleteBillingGroup(ctx, &iot.DeleteBillingGroupInput{ BillingGroupName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -193,14 +194,14 @@ func resourceBillingGroupDelete(ctx context.Context, d *schema.ResourceData, met return diags } -func FindBillingGroupByName(ctx context.Context, conn *iot.IoT, name string) (*iot.DescribeBillingGroupOutput, error) { +func findBillingGroupByName(ctx context.Context, conn *iot.Client, name string) (*iot.DescribeBillingGroupOutput, error) { input := &iot.DescribeBillingGroupInput{ BillingGroupName: aws.String(name), } - output, err := conn.DescribeBillingGroupWithContext(ctx, input) + output, err := conn.DescribeBillingGroup(ctx, input) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -218,12 +219,12 @@ func FindBillingGroupByName(ctx context.Context, conn *iot.IoT, name string) (*i return output, nil } -func expandBillingGroupProperties(tfMap map[string]interface{}) *iot.BillingGroupProperties { +func expandBillingGroupProperties(tfMap map[string]interface{}) *awstypes.BillingGroupProperties { if tfMap == nil { return nil } - apiObject := &iot.BillingGroupProperties{} + apiObject := &awstypes.BillingGroupProperties{} if v, ok := tfMap[names.AttrDescription].(string); ok && v != "" { apiObject.BillingGroupDescription = aws.String(v) @@ -232,7 +233,7 @@ func expandBillingGroupProperties(tfMap map[string]interface{}) *iot.BillingGrou return apiObject } -func flattenBillingGroupMetadata(apiObject *iot.BillingGroupMetadata) map[string]interface{} { +func flattenBillingGroupMetadata(apiObject *awstypes.BillingGroupMetadata) map[string]interface{} { if apiObject == nil { return nil } @@ -240,13 +241,13 @@ func flattenBillingGroupMetadata(apiObject *iot.BillingGroupMetadata) map[string tfMap := map[string]interface{}{} if v := apiObject.CreationDate; v != nil { - tfMap[names.AttrCreationDate] = aws.TimeValue(v).Format(time.RFC3339) + tfMap[names.AttrCreationDate] = aws.ToTime(v).Format(time.RFC3339) } return tfMap } -func flattenBillingGroupProperties(apiObject *iot.BillingGroupProperties) map[string]interface{} { +func flattenBillingGroupProperties(apiObject *awstypes.BillingGroupProperties) map[string]interface{} { if apiObject == nil { return nil } @@ -254,7 +255,7 @@ func flattenBillingGroupProperties(apiObject *iot.BillingGroupProperties) map[st tfMap := map[string]interface{}{} if v := apiObject.BillingGroupDescription; v != nil { - tfMap[names.AttrDescription] = aws.StringValue(v) + tfMap[names.AttrDescription] = aws.ToString(v) } return tfMap diff --git a/internal/service/iot/billing_group_test.go b/internal/service/iot/billing_group_test.go index 27ed985b773..50f38d7c628 100644 --- a/internal/service/iot/billing_group_test.go +++ b/internal/service/iot/billing_group_test.go @@ -9,7 +9,6 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/iot" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +21,6 @@ import ( func TestAccIoTBillingGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var v iot.DescribeBillingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_billing_group.test" @@ -35,7 +33,7 @@ func TestAccIoTBillingGroup_basic(t *testing.T) { { Config: testAccBillingGroupConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckBillingGroupExists(ctx, resourceName, &v), + testAccCheckBillingGroupExists(ctx, resourceName), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "iot", regexache.MustCompile(fmt.Sprintf("billinggroup/%s$", rName))), resource.TestCheckResourceAttr(resourceName, "metadata.#", acctest.Ct1), resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), @@ -56,7 +54,6 @@ func TestAccIoTBillingGroup_basic(t *testing.T) { func TestAccIoTBillingGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var v iot.DescribeBillingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_billing_group.test" @@ -69,7 +66,7 @@ func TestAccIoTBillingGroup_disappears(t *testing.T) { { Config: testAccBillingGroupConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBillingGroupExists(ctx, resourceName, &v), + testAccCheckBillingGroupExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfiot.ResourceBillingGroup(), resourceName), ), ExpectNonEmptyPlan: true, @@ -80,7 +77,6 @@ func TestAccIoTBillingGroup_disappears(t *testing.T) { func TestAccIoTBillingGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var v iot.DescribeBillingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_billing_group.test" @@ -93,7 +89,7 @@ func TestAccIoTBillingGroup_tags(t *testing.T) { { Config: testAccBillingGroupConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckBillingGroupExists(ctx, resourceName, &v), + testAccCheckBillingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -106,7 +102,7 @@ func TestAccIoTBillingGroup_tags(t *testing.T) { { Config: testAccBillingGroupConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckBillingGroupExists(ctx, resourceName, &v), + testAccCheckBillingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -115,7 +111,7 @@ func TestAccIoTBillingGroup_tags(t *testing.T) { { Config: testAccBillingGroupConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckBillingGroupExists(ctx, resourceName, &v), + testAccCheckBillingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), @@ -126,7 +122,6 @@ func TestAccIoTBillingGroup_tags(t *testing.T) { func TestAccIoTBillingGroup_properties(t *testing.T) { ctx := acctest.Context(t) - var v iot.DescribeBillingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_billing_group.test" @@ -139,7 +134,7 @@ func TestAccIoTBillingGroup_properties(t *testing.T) { { Config: testAccBillingGroupConfig_properties(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBillingGroupExists(ctx, resourceName, &v), + testAccCheckBillingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "properties.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "properties.0.description", "test description 1"), resource.TestCheckResourceAttr(resourceName, names.AttrVersion, acctest.Ct1), @@ -153,7 +148,7 @@ func TestAccIoTBillingGroup_properties(t *testing.T) { { Config: testAccBillingGroupConfig_propertiesUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckBillingGroupExists(ctx, resourceName, &v), + testAccCheckBillingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "properties.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "properties.0.description", "test description 2"), resource.TestCheckResourceAttr(resourceName, names.AttrVersion, acctest.Ct2), @@ -163,30 +158,24 @@ func TestAccIoTBillingGroup_properties(t *testing.T) { }) } -func testAccCheckBillingGroupExists(ctx context.Context, n string, v *iot.DescribeBillingGroupOutput) resource.TestCheckFunc { +func testAccCheckBillingGroupExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) - output, err := tfiot.FindBillingGroupByName(ctx, conn, rs.Primary.ID) + _, err := tfiot.FindBillingGroupByName(ctx, conn, rs.Primary.ID) - if err != nil { - return err - } - - *v = *output - - return nil + return err } } func testAccCheckBillingGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_billing_group" { diff --git a/internal/service/iot/ca_certificate.go b/internal/service/iot/ca_certificate.go index 43bb4bd6227..cc8ff2c2d74 100644 --- a/internal/service/iot/ca_certificate.go +++ b/internal/service/iot/ca_certificate.go @@ -10,15 +10,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -28,7 +30,7 @@ import ( // @SDKResource("aws_iot_ca_certificate", name="CA Certificate") // @Tags(identifierAttribute="arn") -func ResourceCACertificate() *schema.Resource { +func resourceCACertificate() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCACertificateCreate, ReadWithoutTimeout: resourceCACertificateRead, @@ -55,11 +57,11 @@ func ResourceCACertificate() *schema.Resource { Sensitive: true, }, "certificate_mode": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: iot.CertificateModeDefault, - ValidateFunc: validation.StringInSlice(iot.CertificateMode_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.CertificateModeDefault, + ValidateDiagFunc: enum.Validate[awstypes.CertificateMode](), }, "customer_version": { Type: schema.TypeInt, @@ -124,7 +126,7 @@ func ResourceCACertificate() *schema.Resource { CustomizeDiff: customdiff.All( func(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { - if mode := diff.Get("certificate_mode").(string); mode == iot.CertificateModeDefault { + if mode := diff.Get("certificate_mode").(string); mode == string(awstypes.CertificateModeDefault) { if v := diff.GetRawConfig().GetAttr("verification_certificate_pem"); v.IsKnown() { if v.IsNull() || v.AsString() == "" { return fmt.Errorf(`"verification_certificate_pem" is required when certificate_mode is %q`, mode) @@ -141,13 +143,13 @@ func ResourceCACertificate() *schema.Resource { func resourceCACertificateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) input := &iot.RegisterCACertificateInput{ - AllowAutoRegistration: aws.Bool(d.Get("allow_auto_registration").(bool)), + AllowAutoRegistration: d.Get("allow_auto_registration").(bool), CaCertificate: aws.String(d.Get("ca_certificate_pem").(string)), - CertificateMode: aws.String(d.Get("certificate_mode").(string)), - SetAsActive: aws.Bool(d.Get("active").(bool)), + CertificateMode: awstypes.CertificateMode(d.Get("certificate_mode").(string)), + SetAsActive: d.Get("active").(bool), Tags: getTagsIn(ctx), } @@ -159,26 +161,24 @@ func resourceCACertificateCreate(ctx context.Context, d *schema.ResourceData, me input.VerificationCertificate = aws.String(v.(string)) } - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (interface{}, error) { - return conn.RegisterCACertificateWithContext(ctx, input) - }, - iot.ErrCodeInvalidRequestException, "included in the RegistrationConfig does not exist or cannot be assumed by AWS IoT") + outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (interface{}, error) { + return conn.RegisterCACertificate(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "registering IoT CA Certificate: %s", err) } - d.SetId(aws.StringValue(outputRaw.(*iot.RegisterCACertificateOutput).CertificateId)) + d.SetId(aws.ToString(outputRaw.(*iot.RegisterCACertificateOutput).CertificateId)) return append(diags, resourceCACertificateRead(ctx, d, meta)...) } func resourceCACertificateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindCACertificateByID(ctx, conn, d.Id()) + output, err := findCACertificateByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT CA Certificate (%s) not found, removing from state", d.Id()) @@ -191,8 +191,8 @@ func resourceCACertificateRead(ctx context.Context, d *schema.ResourceData, meta } certificateDescription := output.CertificateDescription - d.Set("active", aws.StringValue(certificateDescription.Status) == iot.CACertificateStatusActive) - d.Set("allow_auto_registration", aws.StringValue(certificateDescription.AutoRegistrationStatus) == iot.AutoRegistrationStatusEnable) + d.Set("active", string(certificateDescription.Status) == string(awstypes.CACertificateStatusActive)) + d.Set("allow_auto_registration", string(certificateDescription.AutoRegistrationStatus) == string(awstypes.AutoRegistrationStatusEnable)) d.Set(names.AttrARN, certificateDescription.CertificateArn) d.Set("ca_certificate_pem", certificateDescription.CertificatePem) d.Set("certificate_mode", certificateDescription.CertificateMode) @@ -218,7 +218,7 @@ func resourceCACertificateRead(ctx context.Context, d *schema.ResourceData, meta func resourceCACertificateUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &iot.UpdateCACertificateInput{ @@ -226,15 +226,15 @@ func resourceCACertificateUpdate(ctx context.Context, d *schema.ResourceData, me } if d.Get("active").(bool) { - input.NewStatus = aws.String(iot.CACertificateStatusActive) + input.NewStatus = awstypes.CACertificateStatusActive } else { - input.NewStatus = aws.String(iot.CACertificateStatusInactive) + input.NewStatus = awstypes.CACertificateStatusInactive } if d.Get("allow_auto_registration").(bool) { - input.NewAutoRegistrationStatus = aws.String(iot.AutoRegistrationStatusEnable) + input.NewAutoRegistrationStatus = awstypes.AutoRegistrationStatusEnable } else { - input.NewAutoRegistrationStatus = aws.String(iot.AutoRegistrationStatusDisable) + input.NewAutoRegistrationStatus = awstypes.AutoRegistrationStatusDisable } if d.HasChange("registration_config") { @@ -243,11 +243,9 @@ func resourceCACertificateUpdate(ctx context.Context, d *schema.ResourceData, me } } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (interface{}, error) { - return conn.UpdateCACertificateWithContext(ctx, input) - }, - iot.ErrCodeInvalidRequestException, "included in the RegistrationConfig does not exist or cannot be assumed by AWS IoT") + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (interface{}, error) { + return conn.UpdateCACertificate(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT CA Certificate (%s): %s", d.Id(), err) @@ -259,16 +257,15 @@ func resourceCACertificateUpdate(ctx context.Context, d *schema.ResourceData, me func resourceCACertificateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.Get("active").(bool) { - log.Printf("[DEBUG] Disabling IoT CA Certificate: %s", d.Id()) - _, err := conn.UpdateCACertificateWithContext(ctx, &iot.UpdateCACertificateInput{ + _, err := conn.UpdateCACertificate(ctx, &iot.UpdateCACertificateInput{ CertificateId: aws.String(d.Id()), - NewStatus: aws.String(iot.CACertificateStatusInactive), + NewStatus: awstypes.CACertificateStatusInactive, }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -278,11 +275,11 @@ func resourceCACertificateDelete(ctx context.Context, d *schema.ResourceData, me } log.Printf("[DEBUG] Deleting IoT CA Certificate: %s", d.Id()) - _, err := conn.DeleteCACertificateWithContext(ctx, &iot.DeleteCACertificateInput{ + _, err := conn.DeleteCACertificate(ctx, &iot.DeleteCACertificateInput{ CertificateId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -293,14 +290,14 @@ func resourceCACertificateDelete(ctx context.Context, d *schema.ResourceData, me return diags } -func FindCACertificateByID(ctx context.Context, conn *iot.IoT, id string) (*iot.DescribeCACertificateOutput, error) { +func findCACertificateByID(ctx context.Context, conn *iot.Client, id string) (*iot.DescribeCACertificateOutput, error) { input := &iot.DescribeCACertificateInput{ CertificateId: aws.String(id), } - output, err := conn.DescribeCACertificateWithContext(ctx, input) + output, err := conn.DescribeCACertificate(ctx, input) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -318,12 +315,12 @@ func FindCACertificateByID(ctx context.Context, conn *iot.IoT, id string) (*iot. return output, nil } -func expandRegistrationConfig(tfMap map[string]interface{}) *iot.RegistrationConfig { +func expandRegistrationConfig(tfMap map[string]interface{}) *awstypes.RegistrationConfig { if tfMap == nil { return nil } - apiObject := &iot.RegistrationConfig{} + apiObject := &awstypes.RegistrationConfig{} if v, ok := tfMap[names.AttrRoleARN].(string); ok && v != "" { apiObject.RoleArn = aws.String(v) @@ -340,7 +337,7 @@ func expandRegistrationConfig(tfMap map[string]interface{}) *iot.RegistrationCon return apiObject } -func flattenRegistrationConfig(apiObject *iot.RegistrationConfig) map[string]interface{} { +func flattenRegistrationConfig(apiObject *awstypes.RegistrationConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -348,21 +345,21 @@ func flattenRegistrationConfig(apiObject *iot.RegistrationConfig) map[string]int tfMap := map[string]interface{}{} if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.TemplateBody; v != nil { - tfMap["template_body"] = aws.StringValue(v) + tfMap["template_body"] = aws.ToString(v) } if v := apiObject.TemplateName; v != nil { - tfMap["template_name"] = aws.StringValue(v) + tfMap["template_name"] = aws.ToString(v) } return tfMap } -func flattenCertificateValidity(apiObject *iot.CertificateValidity) map[string]interface{} { +func flattenCertificateValidity(apiObject *awstypes.CertificateValidity) map[string]interface{} { if apiObject == nil { return nil } @@ -370,11 +367,11 @@ func flattenCertificateValidity(apiObject *iot.CertificateValidity) map[string]i tfMap := map[string]interface{}{} if v := apiObject.NotAfter; v != nil { - tfMap["not_after"] = aws.TimeValue(v).Format(time.RFC3339) + tfMap["not_after"] = aws.ToTime(v).Format(time.RFC3339) } if v := apiObject.NotBefore; v != nil { - tfMap["not_before"] = aws.TimeValue(v).Format(time.RFC3339) + tfMap["not_before"] = aws.ToTime(v).Format(time.RFC3339) } return tfMap diff --git a/internal/service/iot/ca_certificate_test.go b/internal/service/iot/ca_certificate_test.go index 819571e57cc..d1f9e9b60cc 100644 --- a/internal/service/iot/ca_certificate_test.go +++ b/internal/service/iot/ca_certificate_test.go @@ -200,7 +200,7 @@ func testAccCheckCACertificateExists(ctx context.Context, n string) resource.Tes return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) _, err := tfiot.FindCACertificateByID(ctx, conn, rs.Primary.ID) @@ -210,7 +210,7 @@ func testAccCheckCACertificateExists(ctx context.Context, n string) resource.Tes func testAccCheckCACertificateDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_ca_certificate" { diff --git a/internal/service/iot/certificate.go b/internal/service/iot/certificate.go index 825f67422d3..23ae37e39ad 100644 --- a/internal/service/iot/certificate.go +++ b/internal/service/iot/certificate.go @@ -7,20 +7,21 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_iot_certificate", name="Certificate) -func ResourceCertificate() *schema.Resource { +func resourceCertificate() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCertificateCreate, ReadWithoutTimeout: resourceCertificateRead, @@ -74,12 +75,12 @@ func ResourceCertificate() *schema.Resource { func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) active := d.Get("active").(bool) - status := iot.CertificateStatusInactive + status := awstypes.CertificateStatusInactive if active { - status = iot.CertificateStatusActive + status = awstypes.CertificateStatusActive } vCert, okCert := d.GetOk("certificate_pem") vCA, okCA := d.GetOk("ca_pem") @@ -87,55 +88,55 @@ func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta if vCSR, okCSR := d.GetOk("csr"); okCSR { input := &iot.CreateCertificateFromCsrInput{ CertificateSigningRequest: aws.String(vCSR.(string)), - SetAsActive: aws.Bool(active), + SetAsActive: active, } - output, err := conn.CreateCertificateFromCsrWithContext(ctx, input) + output, err := conn.CreateCertificateFromCsr(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Certificate from CSR: %s", err) } - d.SetId(aws.StringValue(output.CertificateId)) + d.SetId(aws.ToString(output.CertificateId)) } else if okCert && okCA { input := &iot.RegisterCertificateInput{ CaCertificatePem: aws.String(vCA.(string)), CertificatePem: aws.String(vCert.(string)), - Status: aws.String(status), + Status: status, } - output, err := conn.RegisterCertificateWithContext(ctx, input) + output, err := conn.RegisterCertificate(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "registering IoT Certificate with CA: %s", err) } - d.SetId(aws.StringValue(output.CertificateId)) + d.SetId(aws.ToString(output.CertificateId)) } else if okCert { input := &iot.RegisterCertificateWithoutCAInput{ CertificatePem: aws.String(vCert.(string)), - Status: aws.String(status), + Status: status, } - output, err := conn.RegisterCertificateWithoutCAWithContext(ctx, input) + output, err := conn.RegisterCertificateWithoutCA(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "registering IoT Certificate without CA: %s", err) } - d.SetId(aws.StringValue(output.CertificateId)) + d.SetId(aws.ToString(output.CertificateId)) } else { input := &iot.CreateKeysAndCertificateInput{ - SetAsActive: aws.Bool(active), + SetAsActive: active, } - output, err := conn.CreateKeysAndCertificateWithContext(ctx, input) + output, err := conn.CreateKeysAndCertificate(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Certificate: %s", err) } - d.SetId(aws.StringValue(output.CertificateId)) + d.SetId(aws.ToString(output.CertificateId)) d.Set(names.AttrPrivateKey, output.KeyPair.PrivateKey) d.Set(names.AttrPublicKey, output.KeyPair.PublicKey) } @@ -145,9 +146,9 @@ func resourceCertificateCreate(ctx context.Context, d *schema.ResourceData, meta func resourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindCertificateByID(ctx, conn, d.Id()) + output, err := findCertificateByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Certificate (%s) not found, removing from state", d.Id()) @@ -160,7 +161,7 @@ func resourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta i } certificateDescription := output.CertificateDescription - d.Set("active", aws.StringValue(certificateDescription.Status) == iot.CertificateStatusActive) + d.Set("active", certificateDescription.Status == awstypes.CertificateStatusActive) d.Set(names.AttrARN, certificateDescription.CertificateArn) d.Set("ca_certificate_id", certificateDescription.CaCertificateId) d.Set("certificate_pem", certificateDescription.CertificatePem) @@ -170,18 +171,18 @@ func resourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta i func resourceCertificateUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - status := iot.CertificateStatusInactive + status := awstypes.CertificateStatusInactive if d.Get("active").(bool) { - status = iot.CertificateStatusActive + status = awstypes.CertificateStatusActive } input := &iot.UpdateCertificateInput{ CertificateId: aws.String(d.Id()), - NewStatus: aws.String(status), + NewStatus: status, } - _, err := conn.UpdateCertificateWithContext(ctx, input) + _, err := conn.UpdateCertificate(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Certificate (%s): %s", d.Id(), err) @@ -192,16 +193,15 @@ func resourceCertificateUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.Get("active").(bool) { - log.Printf("[DEBUG] Disabling IoT Certificate: %s", d.Id()) - _, err := conn.UpdateCertificateWithContext(ctx, &iot.UpdateCertificateInput{ + _, err := conn.UpdateCertificate(ctx, &iot.UpdateCertificateInput{ CertificateId: aws.String(d.Id()), - NewStatus: aws.String(iot.CertificateStatusInactive), + NewStatus: awstypes.CertificateStatusInactive, }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -211,11 +211,11 @@ func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta } log.Printf("[DEBUG] Deleting IoT Certificate: %s", d.Id()) - _, err := conn.DeleteCertificateWithContext(ctx, &iot.DeleteCertificateInput{ + _, err := conn.DeleteCertificate(ctx, &iot.DeleteCertificateInput{ CertificateId: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -226,14 +226,14 @@ func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func FindCertificateByID(ctx context.Context, conn *iot.IoT, id string) (*iot.DescribeCertificateOutput, error) { +func findCertificateByID(ctx context.Context, conn *iot.Client, id string) (*iot.DescribeCertificateOutput, error) { input := &iot.DescribeCertificateInput{ CertificateId: aws.String(id), } - output, err := conn.DescribeCertificateWithContext(ctx, input) + output, err := conn.DescribeCertificate(ctx, input) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/iot/certificate_test.go b/internal/service/iot/certificate_test.go index c009c569d42..b58383bb8d9 100644 --- a/internal/service/iot/certificate_test.go +++ b/internal/service/iot/certificate_test.go @@ -111,7 +111,7 @@ func testAccCheckCertificateExists(ctx context.Context, n string) resource.TestC return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) _, err := tfiot.FindCertificateByID(ctx, conn, rs.Primary.ID) @@ -121,7 +121,7 @@ func testAccCheckCertificateExists(ctx context.Context, n string) resource.TestC func testAccCheckCertificateDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_certificate" { diff --git a/internal/service/iot/consts.go b/internal/service/iot/consts.go index 57bbf5672b7..267822dbfd6 100644 --- a/internal/service/iot/consts.go +++ b/internal/service/iot/consts.go @@ -8,5 +8,6 @@ import ( ) const ( - propagationTimeout = 2 * time.Minute + propagationTimeout = 2 * time.Minute + deprecatePropagationTimeout = 6 * time.Minute ) diff --git a/internal/service/iot/domain_configuration.go b/internal/service/iot/domain_configuration.go index 57119844a48..db1c60f4ed8 100644 --- a/internal/service/iot/domain_configuration.go +++ b/internal/service/iot/domain_configuration.go @@ -7,14 +7,15 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" @@ -26,7 +27,7 @@ import ( // @SDKResource("aws_iot_domain_configuration", name="Domain Configuration") // @Tags(identifierAttribute="arn") -func ResourceDomainConfiguration() *schema.Resource { +func resourceDomainConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceDomainConfigurationCreate, ReadWithoutTimeout: resourceDomainConfigurationRead, @@ -85,17 +86,17 @@ func ResourceDomainConfiguration() *schema.Resource { }, }, "service_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: iot.ServiceTypeData, - ValidateFunc: validation.StringInSlice(iot.ServiceType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.ServiceTypeData, + ValidateDiagFunc: enum.Validate[awstypes.ServiceType](), }, names.AttrStatus: { - Type: schema.TypeString, - Optional: true, - Default: iot.DomainConfigurationStatusEnabled, - ValidateFunc: validation.StringInSlice(iot.DomainConfigurationStatus_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DomainConfigurationStatusEnabled, + ValidateDiagFunc: enum.Validate[awstypes.DomainConfigurationStatus](), }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), @@ -126,7 +127,7 @@ func ResourceDomainConfiguration() *schema.Resource { func resourceDomainConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) name := d.Get(names.AttrName).(string) input := &iot.CreateDomainConfigurationInput{ @@ -143,11 +144,11 @@ func resourceDomainConfigurationCreate(ctx context.Context, d *schema.ResourceDa } if v, ok := d.GetOk("server_certificate_arns"); ok && v.(*schema.Set).Len() > 0 { - input.ServerCertificateArns = flex.ExpandStringSet(v.(*schema.Set)) + input.ServerCertificateArns = flex.ExpandStringValueSet(v.(*schema.Set)) } if v, ok := d.GetOk("service_type"); ok { - input.ServiceType = aws.String(v.(string)) + input.ServiceType = awstypes.ServiceType(v.(string)) } if v, ok := d.GetOk("tls_config"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -158,22 +159,22 @@ func resourceDomainConfigurationCreate(ctx context.Context, d *schema.ResourceDa input.ValidationCertificateArn = aws.String(v.(string)) } - output, err := conn.CreateDomainConfigurationWithContext(ctx, input) + output, err := conn.CreateDomainConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Domain Configuration (%s): %s", name, err) } - d.SetId(aws.StringValue(output.DomainConfigurationName)) + d.SetId(aws.ToString(output.DomainConfigurationName)) return append(diags, resourceDomainConfigurationRead(ctx, d, meta)...) } func resourceDomainConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindDomainConfigurationByName(ctx, conn, d.Id()) + output, err := findDomainConfigurationByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Domain Configuration (%s) not found, removing from state", d.Id()) @@ -196,8 +197,8 @@ func resourceDomainConfigurationRead(ctx context.Context, d *schema.ResourceData d.Set(names.AttrDomainName, output.DomainName) d.Set("domain_type", output.DomainType) d.Set(names.AttrName, output.DomainConfigurationName) - d.Set("server_certificate_arns", tfslices.ApplyToAll(output.ServerCertificates, func(v *iot.ServerCertificateSummary) string { - return aws.StringValue(v.ServerCertificateArn) + d.Set("server_certificate_arns", tfslices.ApplyToAll(output.ServerCertificates, func(v awstypes.ServerCertificateSummary) string { + return aws.ToString(v.ServerCertificateArn) })) d.Set("service_type", output.ServiceType) d.Set(names.AttrStatus, output.DomainConfigurationStatus) @@ -215,7 +216,7 @@ func resourceDomainConfigurationRead(ctx context.Context, d *schema.ResourceData func resourceDomainConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &iot.UpdateDomainConfigurationInput{ @@ -226,12 +227,12 @@ func resourceDomainConfigurationUpdate(ctx context.Context, d *schema.ResourceDa if v, ok := d.GetOk("authorizer_config"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.AuthorizerConfig = expandAuthorizerConfig(v.([]interface{})[0].(map[string]interface{})) } else { - input.RemoveAuthorizerConfig = aws.Bool(true) + input.RemoveAuthorizerConfig = true } } if d.HasChange(names.AttrStatus) { - input.DomainConfigurationStatus = aws.String(d.Get(names.AttrStatus).(string)) + input.DomainConfigurationStatus = awstypes.DomainConfigurationStatus(d.Get(names.AttrStatus).(string)) } if d.HasChange("tls_config") { @@ -240,7 +241,7 @@ func resourceDomainConfigurationUpdate(ctx context.Context, d *schema.ResourceDa } } - _, err := conn.UpdateDomainConfigurationWithContext(ctx, input) + _, err := conn.UpdateDomainConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Domain Configuration (%s): %s", d.Id(), err) @@ -252,16 +253,15 @@ func resourceDomainConfigurationUpdate(ctx context.Context, d *schema.ResourceDa func resourceDomainConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - if d.Get(names.AttrStatus).(string) == iot.DomainConfigurationStatusEnabled { - log.Printf("[DEBUG] Disabling IoT Domain Configuration: %s", d.Id()) - _, err := conn.UpdateDomainConfigurationWithContext(ctx, &iot.UpdateDomainConfigurationInput{ + if d.Get(names.AttrStatus).(string) == string(awstypes.DomainConfigurationStatusEnabled) { + _, err := conn.UpdateDomainConfiguration(ctx, &iot.UpdateDomainConfigurationInput{ DomainConfigurationName: aws.String(d.Id()), - DomainConfigurationStatus: aws.String(iot.DomainConfigurationStatusDisabled), + DomainConfigurationStatus: awstypes.DomainConfigurationStatusDisabled, }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -271,11 +271,11 @@ func resourceDomainConfigurationDelete(ctx context.Context, d *schema.ResourceDa } log.Printf("[DEBUG] Deleting IoT Domain Configuration: %s", d.Id()) - _, err := conn.DeleteDomainConfigurationWithContext(ctx, &iot.DeleteDomainConfigurationInput{ + _, err := conn.DeleteDomainConfiguration(ctx, &iot.DeleteDomainConfigurationInput{ DomainConfigurationName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -286,14 +286,14 @@ func resourceDomainConfigurationDelete(ctx context.Context, d *schema.ResourceDa return diags } -func FindDomainConfigurationByName(ctx context.Context, conn *iot.IoT, name string) (*iot.DescribeDomainConfigurationOutput, error) { +func findDomainConfigurationByName(ctx context.Context, conn *iot.Client, name string) (*iot.DescribeDomainConfigurationOutput, error) { input := &iot.DescribeDomainConfigurationInput{ DomainConfigurationName: aws.String(name), } - output, err := conn.DescribeDomainConfigurationWithContext(ctx, input) + output, err := conn.DescribeDomainConfiguration(ctx, input) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -311,12 +311,12 @@ func FindDomainConfigurationByName(ctx context.Context, conn *iot.IoT, name stri return output, nil } -func expandAuthorizerConfig(tfMap map[string]interface{}) *iot.AuthorizerConfig { +func expandAuthorizerConfig(tfMap map[string]interface{}) *awstypes.AuthorizerConfig { if tfMap == nil { return nil } - apiObject := &iot.AuthorizerConfig{} + apiObject := &awstypes.AuthorizerConfig{} if v, ok := tfMap["allow_authorizer_override"].(bool); ok { apiObject.AllowAuthorizerOverride = aws.Bool(v) @@ -329,12 +329,12 @@ func expandAuthorizerConfig(tfMap map[string]interface{}) *iot.AuthorizerConfig return apiObject } -func expandTlsConfig(tfMap map[string]interface{}) *iot.TlsConfig { // nosemgrep:ci.caps5-in-func-name +func expandTlsConfig(tfMap map[string]interface{}) *awstypes.TlsConfig { // nosemgrep:ci.caps5-in-func-name if tfMap == nil { return nil } - apiObject := &iot.TlsConfig{} + apiObject := &awstypes.TlsConfig{} if v, ok := tfMap["security_policy"].(string); ok && v != "" { apiObject.SecurityPolicy = aws.String(v) @@ -343,7 +343,7 @@ func expandTlsConfig(tfMap map[string]interface{}) *iot.TlsConfig { // nosemgrep return apiObject } -func flattenAuthorizerConfig(apiObject *iot.AuthorizerConfig) map[string]interface{} { +func flattenAuthorizerConfig(apiObject *awstypes.AuthorizerConfig) map[string]interface{} { if apiObject == nil { return nil } @@ -351,17 +351,17 @@ func flattenAuthorizerConfig(apiObject *iot.AuthorizerConfig) map[string]interfa tfMap := map[string]interface{}{} if v := apiObject.AllowAuthorizerOverride; v != nil { - tfMap["allow_authorizer_override"] = aws.BoolValue(v) + tfMap["allow_authorizer_override"] = aws.ToBool(v) } if v := apiObject.DefaultAuthorizerName; v != nil { - tfMap["default_authorizer_name"] = aws.StringValue(v) + tfMap["default_authorizer_name"] = aws.ToString(v) } return tfMap } -func flattenTlsConfig(apiObject *iot.TlsConfig) map[string]interface{} { // nosemgrep:ci.caps5-in-func-name +func flattenTlsConfig(apiObject *awstypes.TlsConfig) map[string]interface{} { // nosemgrep:ci.caps5-in-func-name if apiObject == nil { return nil } @@ -369,7 +369,7 @@ func flattenTlsConfig(apiObject *iot.TlsConfig) map[string]interface{} { // nose tfMap := map[string]interface{}{} if v := apiObject.SecurityPolicy; v != nil { - tfMap["security_policy"] = aws.StringValue(v) + tfMap["security_policy"] = aws.ToString(v) } return tfMap diff --git a/internal/service/iot/domain_configuration_test.go b/internal/service/iot/domain_configuration_test.go index c56f0cf8757..bd1e0204894 100644 --- a/internal/service/iot/domain_configuration_test.go +++ b/internal/service/iot/domain_configuration_test.go @@ -215,7 +215,7 @@ func testAccCheckDomainConfigurationExists(ctx context.Context, n string) resour return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) _, err := tfiot.FindDomainConfigurationByName(ctx, conn, rs.Primary.ID) @@ -225,7 +225,7 @@ func testAccCheckDomainConfigurationExists(ctx context.Context, n string) resour func testAccCheckDomainConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_domain_configuration" { diff --git a/internal/service/iot/endpoint_data_source.go b/internal/service/iot/endpoint_data_source.go index 5d110fdbf63..0a8e4aa0eb4 100644 --- a/internal/service/iot/endpoint_data_source.go +++ b/internal/service/iot/endpoint_data_source.go @@ -6,8 +6,8 @@ package iot import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -16,8 +16,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_iot_endpoint") -func DataSourceEndpoint() *schema.Resource { +// @SDKDataSource("aws_iot_endpoint", name="Endpoint") +func dataSourceEndpoint() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceEndpointRead, Schema: map[string]*schema.Schema{ @@ -41,21 +41,25 @@ func DataSourceEndpoint() *schema.Resource { func dataSourceEndpointRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) + input := &iot.DescribeEndpointInput{} if v, ok := d.GetOk(names.AttrEndpointType); ok { input.EndpointType = aws.String(v.(string)) } - output, err := conn.DescribeEndpointWithContext(ctx, input) + output, err := conn.DescribeEndpoint(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "while describing iot endpoint: %s", err) + return sdkdiag.AppendErrorf(diags, "reading IoT Endpoint: %s", err) } - endpointAddress := aws.StringValue(output.EndpointAddress) + + endpointAddress := aws.ToString(output.EndpointAddress) d.SetId(endpointAddress) if err := d.Set("endpoint_address", endpointAddress); err != nil { return sdkdiag.AppendErrorf(diags, "setting endpoint_address: %s", err) } + return diags } diff --git a/internal/service/iot/event_configurations.go b/internal/service/iot/event_configurations.go index 1a8477be123..48cb55eebaf 100644 --- a/internal/service/iot/event_configurations.go +++ b/internal/service/iot/event_configurations.go @@ -7,12 +7,14 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -20,7 +22,7 @@ import ( ) // @SDKResource("aws_iot_event_configurations", name="Event Configurations") -func ResourceEventConfigurations() *schema.Resource { +func resourceEventConfigurations() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceEventConfigurationsPut, ReadWithoutTimeout: resourceEventConfigurationsRead, @@ -36,7 +38,7 @@ func ResourceEventConfigurations() *schema.Resource { Type: schema.TypeMap, Required: true, Elem: &schema.Schema{Type: schema.TypeBool}, - ValidateDiagFunc: verify.MapKeysAre(validation.ToDiagFunc(validation.StringInSlice(iot.EventType_Values(), false))), + ValidateDiagFunc: verify.MapKeysAre(enum.Validate[awstypes.EventType]()), }, }, } @@ -44,22 +46,22 @@ func ResourceEventConfigurations() *schema.Resource { func resourceEventConfigurationsPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) input := &iot.UpdateEventConfigurationsInput{} if v, ok := d.GetOk("event_configurations"); ok && len(v.(map[string]interface{})) > 0 { - input.EventConfigurations = tfmaps.ApplyToAllValues(v.(map[string]interface{}), func(v interface{}) *iot.Configuration { - return &iot.Configuration{ - Enabled: aws.Bool(v.(bool)), + input.EventConfigurations = tfmaps.ApplyToAllValues(v.(map[string]interface{}), func(v interface{}) awstypes.Configuration { + return awstypes.Configuration{ + Enabled: v.(bool), } }) } - _, err := conn.UpdateEventConfigurationsWithContext(ctx, input) + _, err := conn.UpdateEventConfigurations(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating IoT Event Configurations (%s): %s", meta.(*conns.AWSClient).Region, err) + return sdkdiag.AppendErrorf(diags, "updating IoT Event Configurations: %s", err) } if d.IsNewResource() { @@ -71,7 +73,7 @@ func resourceEventConfigurationsPut(ctx context.Context, d *schema.ResourceData, func resourceEventConfigurationsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) output, err := findEventConfigurations(ctx, conn) @@ -85,16 +87,23 @@ func resourceEventConfigurationsRead(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "reading IoT Event Configurations (%s): %s", d.Id(), err) } - d.Set("event_configurations", tfmaps.ApplyToAllValues(output, func(v *iot.Configuration) bool { - return aws.BoolValue(v.Enabled) + d.Set("event_configurations", tfmaps.ApplyToAllValues(output, func(v awstypes.Configuration) bool { + return v.Enabled })) return diags } -func findEventConfigurations(ctx context.Context, conn *iot.IoT) (map[string]*iot.Configuration, error) { +func findEventConfigurations(ctx context.Context, conn *iot.Client) (map[string]awstypes.Configuration, error) { input := &iot.DescribeEventConfigurationsInput{} - output, err := conn.DescribeEventConfigurationsWithContext(ctx, input) + output, err := conn.DescribeEventConfigurations(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } if err != nil { return nil, err diff --git a/internal/service/iot/exports_test.go b/internal/service/iot/exports_test.go new file mode 100644 index 00000000000..b87987edfa1 --- /dev/null +++ b/internal/service/iot/exports_test.go @@ -0,0 +1,44 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package iot + +// Exports for use in tests only. +var ( + ResourceAuthorizer = resourceAuthorizer + ResourceBillingGroup = resourceBillingGroup + ResourceCACertificate = resourceCACertificate + ResourceCertificate = resourceCertificate + ResourceDomainConfiguration = resourceDomainConfiguration + ResourceEventConfigurations = resourceEventConfigurations + ResourceIndexingConfiguration = resourceIndexingConfiguration + ResourceLoggingOptions = resourceLoggingOptions + ResourcePolicy = resourcePolicy + ResourcePolicyAttachment = resourcePolicyAttachment + ResourceProvisioningTemplate = resourceProvisioningTemplate + ResourceThing = resourceThing + ResourceThingGroup = resourceThingGroup + ResourceThingGroupMembership = resourceThingGroupMembership + ResourceThingPrincipalAttachment = resourceThingPrincipalAttachment + ResourceThingType = resourceThingType + ResourceTopicRule = resourceTopicRule + ResourceTopicRuleDestination = resourceTopicRuleDestination + + FindAttachedPolicyByTwoPartKey = findAttachedPolicyByTwoPartKey + FindAuthorizerByName = findAuthorizerByName + FindBillingGroupByName = findBillingGroupByName + FindCACertificateByID = findCACertificateByID + FindCertificateByID = findCertificateByID + FindDomainConfigurationByName = findDomainConfigurationByName + FindPolicyByName = findPolicyByName + FindPolicyVersionsByName = findPolicyVersionsByName + FindProvisioningTemplateByName = findProvisioningTemplateByName + FindRoleAliasByID = findRoleAliasByID + FindThingByName = findThingByName + FindThingGroupByName = findThingGroupByName + FindThingGroupMembershipByTwoPartKey = findThingGroupMembershipByTwoPartKey + FindThingPrincipalAttachmentByTwoPartKey = findThingPrincipalAttachmentByTwoPartKey + FindThingTypeByName = findThingTypeByName + FindTopicRuleDestinationByARN = findTopicRuleDestinationByARN + FindTopicRuleByName = findTopicRuleByName +) diff --git a/internal/service/iot/find.go b/internal/service/iot/find.go deleted file mode 100644 index ecfdbea6eec..00000000000 --- a/internal/service/iot/find.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iot - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindAuthorizerByName(ctx context.Context, conn *iot.IoT, name string) (*iot.AuthorizerDescription, error) { - input := &iot.DescribeAuthorizerInput{ - AuthorizerName: aws.String(name), - } - - output, err := conn.DescribeAuthorizerWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.AuthorizerDescription == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.AuthorizerDescription, nil -} - -func FindThingByName(ctx context.Context, conn *iot.IoT, name string) (*iot.DescribeThingOutput, error) { - input := &iot.DescribeThingInput{ - ThingName: aws.String(name), - } - - output, err := conn.DescribeThingWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - -func FindThingGroupByName(ctx context.Context, conn *iot.IoT, name string) (*iot.DescribeThingGroupOutput, error) { - input := &iot.DescribeThingGroupInput{ - ThingGroupName: aws.String(name), - } - - output, err := conn.DescribeThingGroupWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - -func FindThingGroupMembership(ctx context.Context, conn *iot.IoT, thingGroupName, thingName string) error { - input := &iot.ListThingGroupsForThingInput{ - ThingName: aws.String(thingName), - } - - var v *iot.GroupNameAndArn - - err := conn.ListThingGroupsForThingPagesWithContext(ctx, input, func(page *iot.ListThingGroupsForThingOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, group := range page.ThingGroups { - if aws.StringValue(group.GroupName) == thingGroupName { - v = group - - return false - } - } - - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if v == nil { - return tfresource.NewEmptyResultError(input) - } - - return nil -} - -func FindTopicRuleByName(ctx context.Context, conn *iot.IoT, name string) (*iot.GetTopicRuleOutput, error) { - // GetTopicRule returns unhelpful errors such as - // "An error occurred (UnauthorizedException) when calling the GetTopicRule operation: Access to topic rule 'xxxxxxxx' was denied" - // when querying for a rule that doesn't exist. - var rule *iot.TopicRuleListItem - - err := conn.ListTopicRulesPagesWithContext(ctx, &iot.ListTopicRulesInput{}, func(page *iot.ListTopicRulesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Rules { - if v == nil { - continue - } - - if aws.StringValue(v.RuleName) == name { - rule = v - - return false - } - } - - return !lastPage - }) - - if err != nil { - return nil, err - } - - if rule == nil { - return nil, tfresource.NewEmptyResultError(name) - } - - input := &iot.GetTopicRuleInput{ - RuleName: aws.String(name), - } - - output, err := conn.GetTopicRuleWithContext(ctx, input) - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - -func FindTopicRuleDestinationByARN(ctx context.Context, conn *iot.IoT, arn string) (*iot.TopicRuleDestination, error) { - // GetTopicRuleDestination returns unhelpful errors such as - // "UnauthorizedException: Access to TopicRuleDestination 'arn:aws:iot:us-west-2:123456789012:ruledestination/vpc/f267138a-7383-4670-9e44-a7fe2f48af5e' was denied" - // when querying for a rule destination that doesn't exist. - var destination *iot.TopicRuleDestinationSummary - - err := conn.ListTopicRuleDestinationsPagesWithContext(ctx, &iot.ListTopicRuleDestinationsInput{}, func(page *iot.ListTopicRuleDestinationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.DestinationSummaries { - if v == nil { - continue - } - - if aws.StringValue(v.Arn) == arn { - destination = v - - return false - } - } - - return !lastPage - }) - - if err != nil { - return nil, err - } - - if destination == nil { - return nil, tfresource.NewEmptyResultError(destination) - } - - input := &iot.GetTopicRuleDestinationInput{ - Arn: aws.String(arn), - } - - output, err := conn.GetTopicRuleDestinationWithContext(ctx, input) - - if err != nil { - return nil, err - } - - if output == nil || output.TopicRuleDestination == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.TopicRuleDestination, nil -} diff --git a/internal/service/iot/flex.go b/internal/service/iot/flex.go deleted file mode 100644 index 50b98f4739b..00000000000 --- a/internal/service/iot/flex.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iot - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-aws/internal/flex" - "github.com/hashicorp/terraform-provider-aws/names" -) - -func expandThingTypeProperties(config map[string]interface{}) *iot.ThingTypeProperties { - properties := &iot.ThingTypeProperties{ - SearchableAttributes: flex.ExpandStringSet(config["searchable_attributes"].(*schema.Set)), - } - - if v, ok := config[names.AttrDescription]; ok && v.(string) != "" { - properties.ThingTypeDescription = aws.String(v.(string)) - } - - return properties -} - -func flattenThingTypeProperties(s *iot.ThingTypeProperties) []map[string]interface{} { - m := map[string]interface{}{ - names.AttrDescription: "", - "searchable_attributes": flex.FlattenStringSet(nil), - } - - if s == nil { - return []map[string]interface{}{m} - } - - m[names.AttrDescription] = aws.StringValue(s.ThingTypeDescription) - m["searchable_attributes"] = flex.FlattenStringSet(s.SearchableAttributes) - - return []map[string]interface{}{m} -} diff --git a/internal/service/iot/generate.go b/internal/service/iot/generate.go index d93df36bc39..6585ee81cb7 100644 --- a/internal/service/iot/generate.go +++ b/internal/service/iot/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags +//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags -AWSSDKVersion=2 //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/iot/indexing_configuration.go b/internal/service/iot/indexing_configuration.go index 0328ed24c47..349ae0b3eea 100644 --- a/internal/service/iot/indexing_configuration.go +++ b/internal/service/iot/indexing_configuration.go @@ -7,19 +7,21 @@ import ( "context" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_iot_indexing_configuration") -func ResourceIndexingConfiguration() *schema.Resource { +// @SDKResource("aws_iot_indexing_configuration", name="Indexing Configuration") +func resourceIndexingConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceIndexingConfigurationPut, ReadWithoutTimeout: resourceIndexingConfigurationRead, @@ -48,9 +50,9 @@ func ResourceIndexingConfiguration() *schema.Resource { Optional: true, }, names.AttrType: { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(iot.FieldType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.FieldType](), }, }, }, @@ -66,17 +68,17 @@ func ResourceIndexingConfiguration() *schema.Resource { Optional: true, }, names.AttrType: { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(iot.FieldType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.FieldType](), }, }, }, }, "thing_group_indexing_mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(iot.ThingGroupIndexingMode_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ThingGroupIndexingMode](), }, }, }, @@ -99,18 +101,18 @@ func ResourceIndexingConfiguration() *schema.Resource { Optional: true, }, names.AttrType: { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(iot.FieldType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.FieldType](), }, }, }, }, "device_defender_indexing_mode": { - Type: schema.TypeString, - Optional: true, - Default: iot.DeviceDefenderIndexingModeOff, - ValidateFunc: validation.StringInSlice(iot.DeviceDefenderIndexingMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.DeviceDefenderIndexingModeOff, + ValidateDiagFunc: enum.Validate[awstypes.DeviceDefenderIndexingMode](), }, names.AttrFilter: { Type: schema.TypeList, @@ -145,29 +147,29 @@ func ResourceIndexingConfiguration() *schema.Resource { Optional: true, }, names.AttrType: { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(iot.FieldType_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.FieldType](), }, }, }, }, "named_shadow_indexing_mode": { - Type: schema.TypeString, - Optional: true, - Default: iot.NamedShadowIndexingModeOff, - ValidateFunc: validation.StringInSlice(iot.NamedShadowIndexingMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.NamedShadowIndexingModeOff, + ValidateDiagFunc: enum.Validate[awstypes.NamedShadowIndexingMode](), }, "thing_connectivity_indexing_mode": { - Type: schema.TypeString, - Optional: true, - Default: iot.ThingConnectivityIndexingModeOff, - ValidateFunc: validation.StringInSlice(iot.ThingConnectivityIndexingMode_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.ThingConnectivityIndexingModeOff, + ValidateDiagFunc: enum.Validate[awstypes.ThingConnectivityIndexingMode](), }, "thing_indexing_mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(iot.ThingIndexingMode_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.ThingIndexingMode](), }, }, }, @@ -180,7 +182,7 @@ func ResourceIndexingConfiguration() *schema.Resource { func resourceIndexingConfigurationPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) input := &iot.UpdateIndexingConfigurationInput{} @@ -192,13 +194,15 @@ func resourceIndexingConfigurationPut(ctx context.Context, d *schema.ResourceDat input.ThingIndexingConfiguration = expandThingIndexingConfiguration(v.([]interface{})[0].(map[string]interface{})) } - _, err := conn.UpdateIndexingConfigurationWithContext(ctx, input) + _, err := conn.UpdateIndexingConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Indexing Configuration: %s", err) } - d.SetId(meta.(*conns.AWSClient).Region) + if d.IsNewResource() { + d.SetId(meta.(*conns.AWSClient).Region) + } return append(diags, resourceIndexingConfigurationRead(ctx, d, meta)...) } @@ -206,12 +210,12 @@ func resourceIndexingConfigurationPut(ctx context.Context, d *schema.ResourceDat func resourceIndexingConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := conn.GetIndexingConfigurationWithContext(ctx, &iot.GetIndexingConfigurationInput{}) + output, err := conn.GetIndexingConfiguration(ctx, &iot.GetIndexingConfigurationInput{}) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading IoT Indexing Configuration: %s", err) + return sdkdiag.AppendErrorf(diags, "reading IoT Indexing Configuration (%s): %s", d.Id(), err) } if output.ThingGroupIndexingConfiguration != nil { @@ -232,12 +236,14 @@ func resourceIndexingConfigurationRead(ctx context.Context, d *schema.ResourceDa return diags } -func flattenThingGroupIndexingConfiguration(apiObject *iot.ThingGroupIndexingConfiguration) map[string]interface{} { +func flattenThingGroupIndexingConfiguration(apiObject *awstypes.ThingGroupIndexingConfiguration) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} + tfMap := map[string]interface{}{ + "thing_group_indexing_mode": apiObject.ThingGroupIndexingMode, + } if v := apiObject.CustomFields; v != nil { tfMap["custom_field"] = flattenFields(v) @@ -247,28 +253,25 @@ func flattenThingGroupIndexingConfiguration(apiObject *iot.ThingGroupIndexingCon tfMap["managed_field"] = flattenFields(v) } - if v := apiObject.ThingGroupIndexingMode; v != nil { - tfMap["thing_group_indexing_mode"] = aws.StringValue(v) - } - return tfMap } -func flattenThingIndexingConfiguration(apiObject *iot.ThingIndexingConfiguration) map[string]interface{} { +func flattenThingIndexingConfiguration(apiObject *awstypes.ThingIndexingConfiguration) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} + tfMap := map[string]interface{}{ + "device_defender_indexing_mode": apiObject.DeviceDefenderIndexingMode, + "named_shadow_indexing_mode": apiObject.NamedShadowIndexingMode, + "thing_connectivity_indexing_mode": apiObject.ThingConnectivityIndexingMode, + "thing_indexing_mode": apiObject.ThingIndexingMode, + } if v := apiObject.CustomFields; v != nil { tfMap["custom_field"] = flattenFields(v) } - if v := apiObject.DeviceDefenderIndexingMode; v != nil { - tfMap["device_defender_indexing_mode"] = aws.StringValue(v) - } - if v := apiObject.Filter; v != nil { tfMap[names.AttrFilter] = []interface{}{flattenIndexingFilter(v)} } @@ -277,22 +280,10 @@ func flattenThingIndexingConfiguration(apiObject *iot.ThingIndexingConfiguration tfMap["managed_field"] = flattenFields(v) } - if v := apiObject.NamedShadowIndexingMode; v != nil { - tfMap["named_shadow_indexing_mode"] = aws.StringValue(v) - } - - if v := apiObject.ThingConnectivityIndexingMode; v != nil { - tfMap["thing_connectivity_indexing_mode"] = aws.StringValue(v) - } - - if v := apiObject.ThingIndexingMode; v != nil { - tfMap["thing_indexing_mode"] = aws.StringValue(v) - } - return tfMap } -func flattenIndexingFilter(apiObject *iot.IndexingFilter) map[string]interface{} { +func flattenIndexingFilter(apiObject *awstypes.IndexingFilter) map[string]interface{} { if apiObject == nil { return nil } @@ -300,31 +291,25 @@ func flattenIndexingFilter(apiObject *iot.IndexingFilter) map[string]interface{} tfMap := map[string]interface{}{} if v := apiObject.NamedShadowNames; v != nil { - tfMap["named_shadow_names"] = aws.StringValueSlice(v) + tfMap["named_shadow_names"] = aws.StringSlice(v) } return tfMap } -func flattenField(apiObject *iot.Field) map[string]interface{} { - if apiObject == nil { - return nil +func flattenField(apiObject awstypes.Field) map[string]interface{} { + tfMap := map[string]interface{}{ + names.AttrType: apiObject.Type, } - tfMap := map[string]interface{}{} - if v := apiObject.Name; v != nil { - tfMap[names.AttrName] = aws.StringValue(v) - } - - if v := apiObject.Type; v != nil { - tfMap[names.AttrType] = aws.StringValue(v) + tfMap[names.AttrName] = aws.ToString(v) } return tfMap } -func flattenFields(apiObjects []*iot.Field) []interface{} { +func flattenFields(apiObjects []awstypes.Field) []interface{} { if len(apiObjects) == 0 { return nil } @@ -332,22 +317,18 @@ func flattenFields(apiObjects []*iot.Field) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenField(apiObject)) } return tfList } -func expandThingGroupIndexingConfiguration(tfMap map[string]interface{}) *iot.ThingGroupIndexingConfiguration { +func expandThingGroupIndexingConfiguration(tfMap map[string]interface{}) *awstypes.ThingGroupIndexingConfiguration { if tfMap == nil { return nil } - apiObject := &iot.ThingGroupIndexingConfiguration{} + apiObject := &awstypes.ThingGroupIndexingConfiguration{} if v, ok := tfMap["custom_field"].(*schema.Set); ok && v.Len() > 0 { apiObject.CustomFields = expandFields(v.List()) @@ -358,25 +339,25 @@ func expandThingGroupIndexingConfiguration(tfMap map[string]interface{}) *iot.Th } if v, ok := tfMap["thing_group_indexing_mode"].(string); ok && v != "" { - apiObject.ThingGroupIndexingMode = aws.String(v) + apiObject.ThingGroupIndexingMode = awstypes.ThingGroupIndexingMode(v) } return apiObject } -func expandThingIndexingConfiguration(tfMap map[string]interface{}) *iot.ThingIndexingConfiguration { +func expandThingIndexingConfiguration(tfMap map[string]interface{}) *awstypes.ThingIndexingConfiguration { if tfMap == nil { return nil } - apiObject := &iot.ThingIndexingConfiguration{} + apiObject := &awstypes.ThingIndexingConfiguration{} if v, ok := tfMap["custom_field"].(*schema.Set); ok && v.Len() > 0 { apiObject.CustomFields = expandFields(v.List()) } if v, ok := tfMap["device_defender_indexing_mode"].(string); ok && v != "" { - apiObject.DeviceDefenderIndexingMode = aws.String(v) + apiObject.DeviceDefenderIndexingMode = awstypes.DeviceDefenderIndexingMode(v) } if v, ok := tfMap[names.AttrFilter]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -388,58 +369,58 @@ func expandThingIndexingConfiguration(tfMap map[string]interface{}) *iot.ThingIn } if v, ok := tfMap["named_shadow_indexing_mode"].(string); ok && v != "" { - apiObject.NamedShadowIndexingMode = aws.String(v) + apiObject.NamedShadowIndexingMode = awstypes.NamedShadowIndexingMode(v) } if v, ok := tfMap["thing_connectivity_indexing_mode"].(string); ok && v != "" { - apiObject.ThingConnectivityIndexingMode = aws.String(v) + apiObject.ThingConnectivityIndexingMode = awstypes.ThingConnectivityIndexingMode(v) } if v, ok := tfMap["thing_indexing_mode"].(string); ok && v != "" { - apiObject.ThingIndexingMode = aws.String(v) + apiObject.ThingIndexingMode = awstypes.ThingIndexingMode(v) } return apiObject } -func expandIndexingFilter(tfMap map[string]interface{}) *iot.IndexingFilter { +func expandIndexingFilter(tfMap map[string]interface{}) *awstypes.IndexingFilter { if tfMap == nil { return nil } - apiObject := &iot.IndexingFilter{} + apiObject := &awstypes.IndexingFilter{} if v, ok := tfMap["named_shadow_names"].(*schema.Set); ok && v.Len() > 0 { - apiObject.NamedShadowNames = flex.ExpandStringSet(v) + apiObject.NamedShadowNames = flex.ExpandStringValueSet(v) } return apiObject } -func expandField(tfMap map[string]interface{}) *iot.Field { +func expandField(tfMap map[string]interface{}) *awstypes.Field { if tfMap == nil { return nil } - apiObject := &iot.Field{} + apiObject := &awstypes.Field{} if v, ok := tfMap[names.AttrName].(string); ok && v != "" { apiObject.Name = aws.String(v) } if v, ok := tfMap[names.AttrType].(string); ok && v != "" { - apiObject.Type = aws.String(v) + apiObject.Type = awstypes.FieldType(v) } return apiObject } -func expandFields(tfList []interface{}) []*iot.Field { +func expandFields(tfList []interface{}) []awstypes.Field { if len(tfList) == 0 { return nil } - var apiObjects []*iot.Field + var apiObjects []awstypes.Field for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -454,7 +435,7 @@ func expandFields(tfList []interface{}) []*iot.Field { continue } - apiObjects = append(apiObjects, apiObject) + apiObjects = append(apiObjects, *apiObject) } return apiObjects diff --git a/internal/service/iot/logging_options.go b/internal/service/iot/logging_options.go index edd184c7c02..6995e5e0edd 100644 --- a/internal/service/iot/logging_options.go +++ b/internal/service/iot/logging_options.go @@ -6,20 +6,21 @@ package iot import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_iot_logging_options") -func ResourceLoggingOptions() *schema.Resource { +// @SDKResource("aws_iot_logging_options", name="Logging Options") +func resourceLoggingOptions() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLoggingOptionsPut, ReadWithoutTimeout: resourceLoggingOptionsRead, @@ -28,9 +29,9 @@ func ResourceLoggingOptions() *schema.Resource { Schema: map[string]*schema.Schema{ "default_log_level": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(iot.LogLevel_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogLevel](), }, "disable_all_logs": { Type: schema.TypeBool, @@ -48,34 +49,33 @@ func ResourceLoggingOptions() *schema.Resource { func resourceLoggingOptionsPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) input := &iot.SetV2LoggingOptionsInput{} if v, ok := d.GetOk("default_log_level"); ok { - input.DefaultLogLevel = aws.String(v.(string)) + input.DefaultLogLevel = awstypes.LogLevel(v.(string)) } if v, ok := d.GetOk("disable_all_logs"); ok { - input.DisableAllLogs = aws.Bool(v.(bool)) + input.DisableAllLogs = v.(bool) } if v, ok := d.GetOk(names.AttrRoleARN); ok { input.RoleArn = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (interface{}, error) { - return conn.SetV2LoggingOptionsWithContext(ctx, input) - }, - iot.ErrCodeInvalidRequestException, "If the role was just created or updated, please try again in a few seconds.", - ) + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (interface{}, error) { + return conn.SetV2LoggingOptions(ctx, input) + }) if err != nil { - return sdkdiag.AppendErrorf(diags, "setting IoT logging options: %s", err) + return sdkdiag.AppendErrorf(diags, "setting IoT Logging Options: %s", err) } - d.SetId(meta.(*conns.AWSClient).Region) + if d.IsNewResource() { + d.SetId(meta.(*conns.AWSClient).Region) + } return append(diags, resourceLoggingOptionsRead(ctx, d, meta)...) } @@ -83,12 +83,12 @@ func resourceLoggingOptionsPut(ctx context.Context, d *schema.ResourceData, meta func resourceLoggingOptionsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := conn.GetV2LoggingOptionsWithContext(ctx, &iot.GetV2LoggingOptionsInput{}) + output, err := conn.GetV2LoggingOptions(ctx, &iot.GetV2LoggingOptionsInput{}) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading IoT logging options: %s", err) + return sdkdiag.AppendErrorf(diags, "reading IoT Logging Options (%s): %s", d.Id(), err) } d.Set("default_log_level", output.DefaultLogLevel) diff --git a/internal/service/iot/policy.go b/internal/service/iot/policy.go index 7604bb6cb8e..0a0bff650d0 100644 --- a/internal/service/iot/policy.go +++ b/internal/service/iot/policy.go @@ -11,15 +11,16 @@ import ( "strconv" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -27,9 +28,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_iot_policy") +// @SDKResource("aws_iot_policy", name="Policy") // @Tags(identifierAttribute="arn") -func ResourcePolicy() *schema.Resource { +func resourcePolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePolicyCreate, ReadWithoutTimeout: resourcePolicyRead, @@ -80,11 +81,11 @@ func ResourcePolicy() *schema.Resource { func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) policy, err := structure.NormalizeJsonString(d.Get(names.AttrPolicy).(string)) if err != nil { - return sdkdiag.AppendErrorf(diags, "policy (%s) is invalid JSON: %s", policy, err) + return sdkdiag.AppendFromErr(diags, err) } name := d.Get(names.AttrName).(string) @@ -94,22 +95,22 @@ func resourcePolicyCreate(ctx context.Context, d *schema.ResourceData, meta inte Tags: getTagsIn(ctx), } - output, err := conn.CreatePolicyWithContext(ctx, input) + output, err := conn.CreatePolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Policy (%s): %s", name, err) } - d.SetId(aws.StringValue(output.PolicyName)) + d.SetId(aws.ToString(output.PolicyName)) return append(diags, resourcePolicyRead(ctx, d, meta)...) } func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindPolicyByName(ctx, conn, d.Id()) + output, err := findPolicyByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Policy (%s) not found, removing from state", d.Id()) @@ -125,7 +126,7 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interf d.Set("default_version_id", output.DefaultVersionId) d.Set(names.AttrName, output.PolicyName) - policyToSet, err := verify.PolicyToSet(d.Get(names.AttrPolicy).(string), aws.StringValue(output.PolicyDocument)) + policyToSet, err := verify.PolicyToSet(d.Get(names.AttrPolicy).(string), aws.ToString(output.PolicyDocument)) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -137,26 +138,26 @@ func resourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interf func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { policy, err := structure.NormalizeJsonString(d.Get(names.AttrPolicy).(string)) if err != nil { - return sdkdiag.AppendErrorf(diags, "policy (%s) is invalid JSON: %s", policy, err) + return sdkdiag.AppendFromErr(diags, err) } input := &iot.CreatePolicyVersionInput{ PolicyDocument: aws.String(policy), PolicyName: aws.String(d.Id()), - SetAsDefault: aws.Bool(true), + SetAsDefault: true, } - _, errCreate := conn.CreatePolicyVersionWithContext(ctx, input) + _, errCreate := conn.CreatePolicyVersion(ctx, input) // "VersionsLimitExceededException: The policy ... already has the maximum number of versions (5)" - if tfawserr.ErrCodeEquals(errCreate, iot.ErrCodeVersionsLimitExceededException) { + if errs.IsA[*awstypes.VersionsLimitExceededException](errCreate) { // Prune the lowest version and retry. - policyVersions, err := FindPolicyVersionsByName(ctx, conn, d.Id()) + policyVersions, err := findPolicyVersionsByName(ctx, conn, d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "reading IoT Policy (%s) versions: %s", d.Id(), err) @@ -165,11 +166,11 @@ func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta inte var versionIDs []int for _, v := range policyVersions { - if aws.BoolValue(v.IsDefaultVersion) { + if v.IsDefaultVersion { continue } - v, err := strconv.Atoi(aws.StringValue(v.VersionId)) + v, err := strconv.Atoi(aws.ToString(v.VersionId)) if err != nil { continue @@ -183,15 +184,11 @@ func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta inte slices.Sort(versionIDs) versionID := strconv.Itoa(versionIDs[0]) - if err := deletePolicyVersion(ctx, conn, d.Id(), versionID, d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := deletePolicyVersion(ctx, conn, d.Id(), versionID); err != nil { return sdkdiag.AppendFromErr(diags, err) } - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for IoT Policy (%s) version (%s) delete: %s", d.Id(), versionID, err) - } - - _, errCreate = conn.CreatePolicyVersionWithContext(ctx, input) + _, errCreate = conn.CreatePolicyVersion(ctx, input) } } @@ -199,14 +196,15 @@ func resourcePolicyUpdate(ctx context.Context, d *schema.ResourceData, meta inte return sdkdiag.AppendErrorf(diags, "updating IoT Policy (%s): %s", d.Id(), errCreate) } } + return append(diags, resourcePolicyRead(ctx, d, meta)...) } func resourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - policyVersions, err := FindPolicyVersionsByName(ctx, conn, d.Id()) + policyVersions, err := findPolicyVersionsByName(ctx, conn, d.Id()) if tfresource.NotFound(err) { return diags @@ -218,31 +216,31 @@ func resourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta inte // Delete all non-default versions of the policy. for _, v := range policyVersions { - if aws.BoolValue(v.IsDefaultVersion) { + if v.IsDefaultVersion { continue } - if err := deletePolicyVersion(ctx, conn, d.Id(), aws.StringValue(v.VersionId), d.Timeout(schema.TimeoutDelete)); err != nil { + if err := deletePolicyVersion(ctx, conn, d.Id(), aws.ToString(v.VersionId)); err != nil { return sdkdiag.AppendFromErr(diags, err) } } // Delete default policy version. - if err := deletePolicy(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if err := deletePolicy(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } return diags } -func FindPolicyByName(ctx context.Context, conn *iot.IoT, name string) (*iot.GetPolicyOutput, error) { +func findPolicyByName(ctx context.Context, conn *iot.Client, name string) (*iot.GetPolicyOutput, error) { input := &iot.GetPolicyInput{ PolicyName: aws.String(name), } - output, err := conn.GetPolicyWithContext(ctx, input) + output, err := conn.GetPolicy(ctx, input) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -260,14 +258,14 @@ func FindPolicyByName(ctx context.Context, conn *iot.IoT, name string) (*iot.Get return output, nil } -func FindPolicyVersionsByName(ctx context.Context, conn *iot.IoT, name string) ([]*iot.PolicyVersion, error) { +func findPolicyVersionsByName(ctx context.Context, conn *iot.Client, name string) ([]awstypes.PolicyVersion, error) { input := &iot.ListPolicyVersionsInput{ PolicyName: aws.String(name), } - output, err := conn.ListPolicyVersionsWithContext(ctx, input) + output, err := conn.ListPolicyVersions(ctx, input) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -285,16 +283,17 @@ func FindPolicyVersionsByName(ctx context.Context, conn *iot.IoT, name string) ( return output.PolicyVersions, nil } -func deletePolicy(ctx context.Context, conn *iot.IoT, name string, timeout time.Duration) error { +func deletePolicy(ctx context.Context, conn *iot.Client, name string) error { input := &iot.DeletePolicyInput{ PolicyName: aws.String(name), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { - return conn.DeletePolicyWithContext(ctx, input) - }, iot.ErrCodeDeleteConflictException) + _, err := tfresource.RetryWhenIsA[*awstypes.DeleteConflictException](ctx, propagationTimeout, + func() (interface{}, error) { + return conn.DeletePolicy(ctx, input) + }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil } @@ -305,17 +304,18 @@ func deletePolicy(ctx context.Context, conn *iot.IoT, name string, timeout time. return nil } -func deletePolicyVersion(ctx context.Context, conn *iot.IoT, name, versionID string, timeout time.Duration) error { +func deletePolicyVersion(ctx context.Context, conn *iot.Client, name, versionID string) error { input := &iot.DeletePolicyVersionInput{ PolicyName: aws.String(name), PolicyVersionId: aws.String(versionID), } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { - return conn.DeletePolicyVersionWithContext(ctx, input) - }, iot.ErrCodeDeleteConflictException) + _, err := tfresource.RetryWhenIsA[*awstypes.DeleteConflictException](ctx, propagationTimeout, + func() (interface{}, error) { + return conn.DeletePolicyVersion(ctx, input) + }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil } diff --git a/internal/service/iot/policy_attachment.go b/internal/service/iot/policy_attachment.go index c57923c5595..2b294b5fea4 100644 --- a/internal/service/iot/policy_attachment.go +++ b/internal/service/iot/policy_attachment.go @@ -9,21 +9,21 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_iot_policy_attachment") -func ResourcePolicyAttachment() *schema.Resource { +// @SDKResource("aws_iot_policy_attachment", nmw="Policy Attachment") +func resourcePolicyAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourcePolicyAttachmentCreate, ReadWithoutTimeout: resourcePolicyAttachmentRead, @@ -46,7 +46,7 @@ func ResourcePolicyAttachment() *schema.Resource { func resourcePolicyAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) policyName := d.Get(names.AttrPolicy).(string) target := d.Get(names.AttrTarget).(string) @@ -56,7 +56,7 @@ func resourcePolicyAttachmentCreate(ctx context.Context, d *schema.ResourceData, Target: aws.String(target), } - _, err := conn.AttachPolicyWithContext(ctx, input) + _, err := conn.AttachPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Policy Attachment (%s): %s", id, err) @@ -69,14 +69,14 @@ func resourcePolicyAttachmentCreate(ctx context.Context, d *schema.ResourceData, func resourcePolicyAttachmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) policyName, target, err := policyAttachmentParseResourceID(d.Id()) if err != nil { return sdkdiag.AppendFromErr(diags, err) } - _, err = FindAttachedPolicyByTwoPartKey(ctx, conn, policyName, target) + _, err = findAttachedPolicyByTwoPartKey(ctx, conn, policyName, target) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Policy Attachment (%s) not found, removing from state", d.Id()) @@ -93,7 +93,7 @@ func resourcePolicyAttachmentRead(ctx context.Context, d *schema.ResourceData, m func resourcePolicyAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) policyName, target, err := policyAttachmentParseResourceID(d.Id()) if err != nil { @@ -101,14 +101,12 @@ func resourcePolicyAttachmentDelete(ctx context.Context, d *schema.ResourceData, } log.Printf("[DEBUG] Deleting IoT Policy Attachment: %s", d.Id()) - _, err = conn.DetachPolicyWithContext(ctx, &iot.DetachPolicyInput{ + _, err = conn.DetachPolicy(ctx, &iot.DetachPolicyInput{ PolicyName: aws.String(policyName), Target: aws.String(target), }) - // DetachPolicy doesn't return an error if the policy doesn't exist, - // but it returns an error if the Target is not found. - if tfawserr.ErrMessageContains(err, iot.ErrCodeInvalidRequestException, "Invalid Target") { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -119,54 +117,45 @@ func resourcePolicyAttachmentDelete(ctx context.Context, d *schema.ResourceData, return diags } -func FindAttachedPolicyByTwoPartKey(ctx context.Context, conn *iot.IoT, policyName, target string) (*iot.Policy, error) { +func findAttachedPolicyByTwoPartKey(ctx context.Context, conn *iot.Client, policyName, target string) (*awstypes.Policy, error) { input := &iot.ListAttachedPoliciesInput{ - PageSize: aws.Int64(250), - Recursive: aws.Bool(false), + PageSize: aws.Int32(250), + Recursive: false, Target: aws.String(target), } - return findAttachedPolicy(ctx, conn, input, func(v *iot.Policy) bool { - return aws.StringValue(v.PolicyName) == policyName - }) + return findAttachedPolicy(ctx, conn, input) } -func findAttachedPolicy(ctx context.Context, conn *iot.IoT, input *iot.ListAttachedPoliciesInput, filter tfslices.Predicate[*iot.Policy]) (*iot.Policy, error) { - output, err := findAttachedPolicies(ctx, conn, input, filter) +func findAttachedPolicy(ctx context.Context, conn *iot.Client, input *iot.ListAttachedPoliciesInput) (*awstypes.Policy, error) { + output, err := findAttachedPolicies(ctx, conn, input) if err != nil { return nil, err } - return tfresource.AssertSinglePtrResult(output) + return tfresource.AssertFirstValueResult(output) } -func findAttachedPolicies(ctx context.Context, conn *iot.IoT, input *iot.ListAttachedPoliciesInput, filter tfslices.Predicate[*iot.Policy]) ([]*iot.Policy, error) { - var output []*iot.Policy +func findAttachedPolicies(ctx context.Context, conn *iot.Client, input *iot.ListAttachedPoliciesInput) ([]awstypes.Policy, error) { + var output []awstypes.Policy - err := conn.ListAttachedPoliciesPagesWithContext(ctx, input, func(page *iot.ListAttachedPoliciesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + pages := iot.NewListAttachedPoliciesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - for _, v := range page.Policies { - if v != nil && filter(v) { - output = append(output, v) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.Policies...) } return output, nil diff --git a/internal/service/iot/policy_attachment_test.go b/internal/service/iot/policy_attachment_test.go index 96731013e61..6ae5dd40657 100644 --- a/internal/service/iot/policy_attachment_test.go +++ b/internal/service/iot/policy_attachment_test.go @@ -64,7 +64,7 @@ func TestAccIoTPolicyAttachment_basic(t *testing.T) { func testAccCheckPolicyAttchmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_policy_attachment" { continue @@ -94,7 +94,7 @@ func testAccCheckPolicyAttachmentExists(ctx context.Context, n string) resource. return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) _, err := tfiot.FindAttachedPolicyByTwoPartKey(ctx, conn, rs.Primary.Attributes[names.AttrPolicy], rs.Primary.Attributes[names.AttrTarget]) diff --git a/internal/service/iot/policy_test.go b/internal/service/iot/policy_test.go index 3ba87a59794..7e2faab3cf9 100644 --- a/internal/service/iot/policy_test.go +++ b/internal/service/iot/policy_test.go @@ -8,8 +8,9 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -244,7 +245,7 @@ func TestAccIoTPolicy_prune(t *testing.T) { func testAccCheckPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_policy" { @@ -275,7 +276,7 @@ func testAccCheckPolicyExists(ctx context.Context, n string, v *iot.GetPolicyOut return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) output, err := tfiot.FindPolicyByName(ctx, conn, rs.Primary.ID) @@ -296,7 +297,7 @@ func testAccCheckPolicyVersionIDs(ctx context.Context, n string, want []string) return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) output, err := tfiot.FindPolicyVersionsByName(ctx, conn, rs.Primary.ID) @@ -304,8 +305,8 @@ func testAccCheckPolicyVersionIDs(ctx context.Context, n string, want []string) return err } - got := tfslices.ApplyToAll(output, func(v *iot.PolicyVersion) string { - return aws.StringValue(v.VersionId) + got := tfslices.ApplyToAll(output, func(v awstypes.PolicyVersion) string { + return aws.ToString(v.VersionId) }) if !cmp.Equal(got, want, cmpopts.SortSlices(func(i, j string) bool { diff --git a/internal/service/iot/provisioning_template.go b/internal/service/iot/provisioning_template.go index d9dccea32fc..dcaa20c0f3f 100644 --- a/internal/service/iot/provisioning_template.go +++ b/internal/service/iot/provisioning_template.go @@ -8,14 +8,16 @@ import ( "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -23,19 +25,21 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) +type provisioningHookPayloadVersion string + const ( - provisioningHookPayloadVersion2020_04_01 = "2020-04-01" + provisioningHookPayloadVersion2020_04_01 provisioningHookPayloadVersion = "2020-04-01" ) -func provisioningHookPayloadVersion_Values() []string { - return []string{ +func (provisioningHookPayloadVersion) Values() []provisioningHookPayloadVersion { + return []provisioningHookPayloadVersion{ provisioningHookPayloadVersion2020_04_01, } } // @SDKResource("aws_iot_provisioning_template", name="Provisioning Template") // @Tags(identifierAttribute="arn") -func ResourceProvisioningTemplate() *schema.Resource { +func resourceProvisioningTemplate() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceProvisioningTemplateCreate, ReadWithoutTimeout: resourceProvisioningTemplateRead, @@ -81,10 +85,10 @@ func ResourceProvisioningTemplate() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "payload_version": { - Type: schema.TypeString, - Optional: true, - Default: provisioningHookPayloadVersion2020_04_01, - ValidateFunc: validation.StringInSlice(provisioningHookPayloadVersion_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: provisioningHookPayloadVersion2020_04_01, + ValidateDiagFunc: enum.Validate[provisioningHookPayloadVersion](), }, names.AttrTargetARN: { Type: schema.TypeString, @@ -110,11 +114,11 @@ func ResourceProvisioningTemplate() *schema.Resource { ), }, names.AttrType: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(iot.TemplateType_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.TemplateType](), }, }, @@ -124,8 +128,7 @@ func ResourceProvisioningTemplate() *schema.Resource { func resourceProvisioningTemplateCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) name := d.Get(names.AttrName).(string) input := &iot.CreateProvisioningTemplateInput{ @@ -150,31 +153,29 @@ func resourceProvisioningTemplateCreate(ctx context.Context, d *schema.ResourceD input.TemplateBody = aws.String(v.(string)) } - if v, ok := d.Get(names.AttrType).(string); ok && v != "" { - input.Type = aws.String(v) + if v, ok := d.Get(names.AttrType).(awstypes.TemplateType); ok && v != "" { + input.Type = v } - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, + outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateProvisioningTemplateWithContext(ctx, input) - }, - iot.ErrCodeInvalidRequestException, "The provisioning role cannot be assumed by AWS IoT") + return conn.CreateProvisioningTemplate(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Provisioning Template (%s): %s", name, err) } - d.SetId(aws.StringValue(outputRaw.(*iot.CreateProvisioningTemplateOutput).TemplateName)) + d.SetId(aws.ToString(outputRaw.(*iot.CreateProvisioningTemplateOutput).TemplateName)) return append(diags, resourceProvisioningTemplateRead(ctx, d, meta)...) } func resourceProvisioningTemplateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).IoTClient(ctx) - conn := meta.(*conns.AWSClient).IoTConn(ctx) - - output, err := FindProvisioningTemplateByName(ctx, conn, d.Id()) + output, err := findProvisioningTemplateByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Provisioning Template %s not found, removing from state", d.Id()) @@ -207,25 +208,23 @@ func resourceProvisioningTemplateRead(ctx context.Context, d *schema.ResourceDat func resourceProvisioningTemplateUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChange("template_body") { input := &iot.CreateProvisioningTemplateVersionInput{ - SetAsDefault: aws.Bool(true), + SetAsDefault: true, TemplateBody: aws.String(d.Get("template_body").(string)), TemplateName: aws.String(d.Id()), } - log.Printf("[DEBUG] Creating IoT Provisioning Template version: %s", input) - _, err := conn.CreateProvisioningTemplateVersionWithContext(ctx, input) + _, err := conn.CreateProvisioningTemplateVersion(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Provisioning Template (%s) version: %s", d.Id(), err) } } - if d.HasChanges(names.AttrDescription, names.AttrEnabled, "provisioning_role_arn") { + if d.HasChanges(names.AttrDescription, names.AttrEnabled, "provisioning_role_arn", "pre_provisioning_hook") { input := &iot.UpdateProvisioningTemplateInput{ Description: aws.String(d.Get(names.AttrDescription).(string)), Enabled: aws.Bool(d.Get(names.AttrEnabled).(bool)), @@ -233,12 +232,14 @@ func resourceProvisioningTemplateUpdate(ctx context.Context, d *schema.ResourceD TemplateName: aws.String(d.Id()), } - log.Printf("[DEBUG] Updating IoT Provisioning Template: %s", input) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, + if v, ok := d.GetOk("pre_provisioning_hook"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + input.PreProvisioningHook = expandProvisioningHook(v.([]interface{})[0].(map[string]interface{})) + } + + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (interface{}, error) { - return conn.UpdateProvisioningTemplateWithContext(ctx, input) - }, - iot.ErrCodeInvalidRequestException, "The provisioning role cannot be assumed by AWS IoT") + return conn.UpdateProvisioningTemplate(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Provisioning Template (%s): %s", d.Id(), err) @@ -251,14 +252,14 @@ func resourceProvisioningTemplateUpdate(ctx context.Context, d *schema.ResourceD func resourceProvisioningTemplateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) log.Printf("[INFO] Deleting IoT Provisioning Template: %s", d.Id()) - _, err := conn.DeleteProvisioningTemplateWithContext(ctx, &iot.DeleteProvisioningTemplateInput{ + _, err := conn.DeleteProvisioningTemplate(ctx, &iot.DeleteProvisioningTemplateInput{ TemplateName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -269,7 +270,7 @@ func resourceProvisioningTemplateDelete(ctx context.Context, d *schema.ResourceD return diags } -func flattenProvisioningHook(apiObject *iot.ProvisioningHook) map[string]interface{} { +func flattenProvisioningHook(apiObject *awstypes.ProvisioningHook) map[string]interface{} { if apiObject == nil { return nil } @@ -277,22 +278,22 @@ func flattenProvisioningHook(apiObject *iot.ProvisioningHook) map[string]interfa tfMap := map[string]interface{}{} if v := apiObject.PayloadVersion; v != nil { - tfMap["payload_version"] = aws.StringValue(v) + tfMap["payload_version"] = aws.ToString(v) } if v := apiObject.TargetArn; v != nil { - tfMap[names.AttrTargetARN] = aws.StringValue(v) + tfMap[names.AttrTargetARN] = aws.ToString(v) } return tfMap } -func expandProvisioningHook(tfMap map[string]interface{}) *iot.ProvisioningHook { +func expandProvisioningHook(tfMap map[string]interface{}) *awstypes.ProvisioningHook { if tfMap == nil { return nil } - apiObject := &iot.ProvisioningHook{} + apiObject := &awstypes.ProvisioningHook{} if v, ok := tfMap["payload_version"].(string); ok && v != "" { apiObject.PayloadVersion = aws.String(v) @@ -305,14 +306,14 @@ func expandProvisioningHook(tfMap map[string]interface{}) *iot.ProvisioningHook return apiObject } -func FindProvisioningTemplateByName(ctx context.Context, conn *iot.IoT, name string) (*iot.DescribeProvisioningTemplateOutput, error) { +func findProvisioningTemplateByName(ctx context.Context, conn *iot.Client, name string) (*iot.DescribeProvisioningTemplateOutput, error) { input := &iot.DescribeProvisioningTemplateInput{ TemplateName: aws.String(name), } - output, err := conn.DescribeProvisioningTemplateWithContext(ctx, input) + output, err := conn.DescribeProvisioningTemplate(ctx, input) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/iot/provisioning_template_test.go b/internal/service/iot/provisioning_template_test.go index 45848a40716..f4001612a71 100644 --- a/internal/service/iot/provisioning_template_test.go +++ b/internal/service/iot/provisioning_template_test.go @@ -8,8 +8,8 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -167,7 +167,7 @@ func TestAccIoTProvisioningTemplate_update(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "For testing"), resource.TestCheckResourceAttr(resourceName, names.AttrEnabled, acctest.CtTrue), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, "pre_provisioning_hook.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "pre_provisioning_hook.#", acctest.Ct1), resource.TestCheckResourceAttrSet(resourceName, "provisioning_role_arn"), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestCheckResourceAttrSet(resourceName, "template_body"), @@ -188,7 +188,7 @@ func testAccCheckProvisioningTemplateExists(ctx context.Context, n string) resou return fmt.Errorf("No IoT Provisioning Template ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) _, err := tfiot.FindProvisioningTemplateByName(ctx, conn, rs.Primary.ID) @@ -198,7 +198,7 @@ func testAccCheckProvisioningTemplateExists(ctx context.Context, n string) resou func testAccCheckProvisioningTemplateDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_provisioning_template" { @@ -224,25 +224,16 @@ func testAccCheckProvisioningTemplateDestroy(ctx context.Context) resource.TestC func testAccCheckProvisioningTemplateNumVersions(ctx context.Context, name string, want int) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) var got int - err := conn.ListProvisioningTemplateVersionsPagesWithContext(ctx, &iot.ListProvisioningTemplateVersionsInput{TemplateName: aws.String(name)}, - func(page *iot.ListProvisioningTemplateVersionsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - got += len(page.Versions) - - return !lastPage - }) + out, err := conn.ListProvisioningTemplateVersions(ctx, &iot.ListProvisioningTemplateVersionsInput{TemplateName: aws.String(name)}) if err != nil { return err } - if got != want { + if len(out.Versions) != want { return fmt.Errorf("Incorrect version count for IoT Provisioning Template %s; got: %d, want: %d", name, got, want) } @@ -396,13 +387,20 @@ resource "aws_iot_provisioning_template" "test" { } func testAccProvisioningTemplateConfig_updated(rName string) string { - return acctest.ConfigCompose(testAccProvisioningTemplateBaseConfig(rName), fmt.Sprintf(` + return acctest.ConfigCompose( + testAccProvisioningTemplateBaseConfig(rName), + testAccProvisioningTemplateConfig_preProvisioningHook(rName), + fmt.Sprintf(` resource "aws_iot_provisioning_template" "test" { name = %[1]q provisioning_role_arn = aws_iam_role.test.arn description = "For testing" enabled = true + pre_provisioning_hook { + target_arn = aws_lambda_function.test.arn + } + template_body = jsonencode({ Parameters = { SerialNumber = { Type = "String" } @@ -428,3 +426,43 @@ resource "aws_iot_provisioning_template" "test" { } `, rName)) } + +func testAccProvisioningTemplateConfig_preProvisioningHook(rName string) string { + return fmt.Sprintf(` +resource "aws_iam_role" "test2" { + name = "%[1]s-2" + + assume_role_policy = < 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*iot.Tag { } // setTagsOut sets iot service tags in Context. -func setTagsOut(ctx context.Context, tags []*iot.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*iot.Tag) { // updateTags updates iot service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn iotiface.IoTAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *iot.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*iot.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn iotiface.IoTAPI, identifier string, ol if len(removedTags) > 0 { input := &iot.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn iotiface.IoTAPI, identifier string, ol Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn iotiface.IoTAPI, identifier string, ol // UpdateTags updates iot service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).IoTConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).IoTClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/iot/test-fixtures/lambda-preprovisioninghook.js b/internal/service/iot/test-fixtures/lambda-preprovisioninghook.js new file mode 100644 index 00000000000..dfdb45ef98c --- /dev/null +++ b/internal/service/iot/test-fixtures/lambda-preprovisioninghook.js @@ -0,0 +1,16 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +//https://docs.aws.amazon.com/iot/latest/developerguide/pre-provisioning-hook.html#pre-provisioning-example +exports.handler = function (event, context, callback) { + console.log(JSON.stringify(event, null, 2)); + var reply = { + allowProvisioning: true, + parameterOverrides: { + DeviceLocation: 'Seattle' + } + }; + callback(null, reply); +} diff --git a/internal/service/iot/test-fixtures/lambda-preprovisioninghook.zip b/internal/service/iot/test-fixtures/lambda-preprovisioninghook.zip new file mode 100644 index 00000000000..87eb4dcd157 Binary files /dev/null and b/internal/service/iot/test-fixtures/lambda-preprovisioninghook.zip differ diff --git a/internal/service/iot/thing.go b/internal/service/iot/thing.go index bb4bdf9d722..a712f475a97 100644 --- a/internal/service/iot/thing.go +++ b/internal/service/iot/thing.go @@ -7,21 +7,23 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_iot_thing") -func ResourceThing() *schema.Resource { +// @SDKResource("aws_iot_thing", name="Thing") +func resourceThing() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceThingCreate, ReadWithoutTimeout: resourceThingRead, @@ -67,7 +69,7 @@ func ResourceThing() *schema.Resource { func resourceThingCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) name := d.Get(names.AttrName).(string) input := &iot.CreateThingInput{ @@ -75,8 +77,8 @@ func resourceThingCreate(ctx context.Context, d *schema.ResourceData, meta inter } if v, ok := d.GetOk(names.AttrAttributes); ok && len(v.(map[string]interface{})) > 0 { - input.AttributePayload = &iot.AttributePayload{ - Attributes: flex.ExpandStringMap(v.(map[string]interface{})), + input.AttributePayload = &awstypes.AttributePayload{ + Attributes: flex.ExpandStringValueMap(v.(map[string]interface{})), } } @@ -84,23 +86,22 @@ func resourceThingCreate(ctx context.Context, d *schema.ResourceData, meta inter input.ThingTypeName = aws.String(v.(string)) } - log.Printf("[DEBUG] Creating IoT Thing: %s", input) - output, err := conn.CreateThingWithContext(ctx, input) + output, err := conn.CreateThing(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Thing (%s): %s", name, err) } - d.SetId(aws.StringValue(output.ThingName)) + d.SetId(aws.ToString(output.ThingName)) return append(diags, resourceThingRead(ctx, d, meta)...) } func resourceThingRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindThingByName(ctx, conn, d.Id()) + output, err := findThingByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Thing (%s) not found, removing from state", d.Id()) @@ -115,7 +116,7 @@ func resourceThingRead(ctx context.Context, d *schema.ResourceData, meta interfa d.Set(names.AttrARN, output.ThingArn) d.Set("default_client_id", output.DefaultClientId) d.Set(names.AttrName, output.ThingName) - d.Set(names.AttrAttributes, aws.StringValueMap(output.Attributes)) + d.Set(names.AttrAttributes, aws.StringMap(output.Attributes)) d.Set("thing_type_name", output.ThingTypeName) d.Set(names.AttrVersion, output.Version) @@ -124,20 +125,20 @@ func resourceThingRead(ctx context.Context, d *schema.ResourceData, meta interfa func resourceThingUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) input := &iot.UpdateThingInput{ ThingName: aws.String(d.Get(names.AttrName).(string)), } if d.HasChange(names.AttrAttributes) { - attributes := map[string]*string{} + attributes := map[string]string{} if v, ok := d.GetOk(names.AttrAttributes); ok && len(v.(map[string]interface{})) > 0 { - attributes = flex.ExpandStringMap(v.(map[string]interface{})) + attributes = flex.ExpandStringValueMap(v.(map[string]interface{})) } - input.AttributePayload = &iot.AttributePayload{ + input.AttributePayload = &awstypes.AttributePayload{ Attributes: attributes, } } @@ -146,12 +147,11 @@ func resourceThingUpdate(ctx context.Context, d *schema.ResourceData, meta inter if v, ok := d.GetOk("thing_type_name"); ok { input.ThingTypeName = aws.String(v.(string)) } else { - input.RemoveThingType = aws.Bool(true) + input.RemoveThingType = true } } - log.Printf("[DEBUG] Updating IoT Thing: %s", input) - _, err := conn.UpdateThingWithContext(ctx, input) + _, err := conn.UpdateThing(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Thing (%s): %s", d.Id(), err) @@ -162,14 +162,14 @@ func resourceThingUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceThingDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) log.Printf("[DEBUG] Deleting IoT Thing: %s", d.Id()) - _, err := conn.DeleteThingWithContext(ctx, &iot.DeleteThingInput{ + _, err := conn.DeleteThing(ctx, &iot.DeleteThingInput{ ThingName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -179,3 +179,28 @@ func resourceThingDelete(ctx context.Context, d *schema.ResourceData, meta inter return diags } + +func findThingByName(ctx context.Context, conn *iot.Client, name string) (*iot.DescribeThingOutput, error) { + input := &iot.DescribeThingInput{ + ThingName: aws.String(name), + } + + output, err := conn.DescribeThing(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} diff --git a/internal/service/iot/thing_group.go b/internal/service/iot/thing_group.go index 4754b294d31..1f1df648503 100644 --- a/internal/service/iot/thing_group.go +++ b/internal/service/iot/thing_group.go @@ -8,13 +8,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -25,7 +27,7 @@ import ( // @SDKResource("aws_iot_thing_group", name="Thing Group") // @Tags(identifierAttribute="arn") -func ResourceThingGroup() *schema.Resource { +func resourceThingGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceThingGroupCreate, ReadWithoutTimeout: resourceThingGroupRead, @@ -124,13 +126,9 @@ func ResourceThingGroup() *schema.Resource { } } -const ( - thingGroupDeleteTimeout = 1 * time.Minute -) - func resourceThingGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) name := d.Get(names.AttrName).(string) input := &iot.CreateThingGroupInput{ @@ -146,22 +144,22 @@ func resourceThingGroupCreate(ctx context.Context, d *schema.ResourceData, meta input.ThingGroupProperties = expandThingGroupProperties(v.([]interface{})[0].(map[string]interface{})) } - output, err := conn.CreateThingGroupWithContext(ctx, input) + output, err := conn.CreateThingGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Thing Group (%s): %s", name, err) } - d.SetId(aws.StringValue(output.ThingGroupName)) + d.SetId(aws.ToString(output.ThingGroupName)) return append(diags, resourceThingGroupRead(ctx, d, meta)...) } func resourceThingGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindThingGroupByName(ctx, conn, d.Id()) + output, err := findThingGroupByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Thing Group (%s) not found, removing from state", d.Id()) @@ -203,7 +201,7 @@ func resourceThingGroupRead(ctx context.Context, d *schema.ResourceData, meta in func resourceThingGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &iot.UpdateThingGroupInput{ @@ -214,19 +212,18 @@ func resourceThingGroupUpdate(ctx context.Context, d *schema.ResourceData, meta if v, ok := d.GetOk(names.AttrProperties); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.ThingGroupProperties = expandThingGroupProperties(v.([]interface{})[0].(map[string]interface{})) } else { - input.ThingGroupProperties = &iot.ThingGroupProperties{} + input.ThingGroupProperties = &awstypes.ThingGroupProperties{} } // https://docs.aws.amazon.com/iot/latest/apireference/API_AttributePayload.html#API_AttributePayload_Contents: // "To remove an attribute, call UpdateThing with an empty attribute value." if input.ThingGroupProperties.AttributePayload == nil { - input.ThingGroupProperties.AttributePayload = &iot.AttributePayload{ - Attributes: map[string]*string{}, + input.ThingGroupProperties.AttributePayload = &awstypes.AttributePayload{ + Attributes: map[string]string{}, } } - log.Printf("[DEBUG] Updating IoT Thing Group: %s", input) - _, err := conn.UpdateThingGroupWithContext(ctx, input) + _, err := conn.UpdateThingGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Thing Group (%s): %s", d.Id(), err) @@ -238,24 +235,20 @@ func resourceThingGroupUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceThingGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) log.Printf("[DEBUG] Deleting IoT Thing Group: %s", d.Id()) - _, err := tfresource.RetryWhen(ctx, thingGroupDeleteTimeout, + const ( + timeout = 1 * time.Minute + ) + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, timeout, func() (interface{}, error) { - return conn.DeleteThingGroupWithContext(ctx, &iot.DeleteThingGroupInput{ + return conn.DeleteThingGroup(ctx, &iot.DeleteThingGroupInput{ ThingGroupName: aws.String(d.Id()), }) - }, - func(err error) (bool, error) { - if tfawserr.ErrMessageContains(err, iot.ErrCodeInvalidRequestException, "there are still child groups attached") { - return true, err - } - - return false, err }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -266,12 +259,37 @@ func resourceThingGroupDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func expandThingGroupProperties(tfMap map[string]interface{}) *iot.ThingGroupProperties { +func findThingGroupByName(ctx context.Context, conn *iot.Client, name string) (*iot.DescribeThingGroupOutput, error) { + input := &iot.DescribeThingGroupInput{ + ThingGroupName: aws.String(name), + } + + output, err := conn.DescribeThingGroup(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func expandThingGroupProperties(tfMap map[string]interface{}) *awstypes.ThingGroupProperties { if tfMap == nil { return nil } - apiObject := &iot.ThingGroupProperties{} + apiObject := &awstypes.ThingGroupProperties{} if v, ok := tfMap["attribute_payload"].([]interface{}); ok && len(v) > 0 { apiObject.AttributePayload = expandAttributePayload(v[0].(map[string]interface{})) @@ -284,21 +302,21 @@ func expandThingGroupProperties(tfMap map[string]interface{}) *iot.ThingGroupPro return apiObject } -func expandAttributePayload(tfMap map[string]interface{}) *iot.AttributePayload { +func expandAttributePayload(tfMap map[string]interface{}) *awstypes.AttributePayload { if tfMap == nil { return nil } - apiObject := &iot.AttributePayload{} + apiObject := &awstypes.AttributePayload{} if v, ok := tfMap[names.AttrAttributes].(map[string]interface{}); ok && len(v) > 0 { - apiObject.Attributes = flex.ExpandStringMap(v) + apiObject.Attributes = flex.ExpandStringValueMap(v) } return apiObject } -func flattenThingGroupMetadata(apiObject *iot.ThingGroupMetadata) map[string]interface{} { +func flattenThingGroupMetadata(apiObject *awstypes.ThingGroupMetadata) map[string]interface{} { if apiObject == nil { return nil } @@ -306,11 +324,11 @@ func flattenThingGroupMetadata(apiObject *iot.ThingGroupMetadata) map[string]int tfMap := map[string]interface{}{} if v := apiObject.CreationDate; v != nil { - tfMap[names.AttrCreationDate] = aws.TimeValue(v).Format(time.RFC3339) + tfMap[names.AttrCreationDate] = aws.ToTime(v).Format(time.RFC3339) } if v := apiObject.ParentGroupName; v != nil { - tfMap["parent_group_name"] = aws.StringValue(v) + tfMap["parent_group_name"] = aws.ToString(v) } if v := apiObject.RootToParentThingGroups; v != nil { @@ -320,25 +338,21 @@ func flattenThingGroupMetadata(apiObject *iot.ThingGroupMetadata) map[string]int return tfMap } -func flattenGroupNameAndARN(apiObject *iot.GroupNameAndArn) map[string]interface{} { - if apiObject == nil { - return nil - } - +func flattenGroupNameAndARN(apiObject awstypes.GroupNameAndArn) map[string]interface{} { tfMap := map[string]interface{}{} if v := apiObject.GroupArn; v != nil { - tfMap["group_arn"] = aws.StringValue(v) + tfMap["group_arn"] = aws.ToString(v) } if v := apiObject.GroupName; v != nil { - tfMap[names.AttrGroupName] = aws.StringValue(v) + tfMap[names.AttrGroupName] = aws.ToString(v) } return tfMap } -func flattenGroupNameAndARNs(apiObjects []*iot.GroupNameAndArn) []interface{} { +func flattenGroupNameAndARNs(apiObjects []awstypes.GroupNameAndArn) []interface{} { if len(apiObjects) == 0 { return nil } @@ -346,17 +360,13 @@ func flattenGroupNameAndARNs(apiObjects []*iot.GroupNameAndArn) []interface{} { var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenGroupNameAndARN(apiObject)) } return tfList } -func flattenThingGroupProperties(apiObject *iot.ThingGroupProperties) map[string]interface{} { +func flattenThingGroupProperties(apiObject *awstypes.ThingGroupProperties) map[string]interface{} { if apiObject == nil { return nil } @@ -368,13 +378,13 @@ func flattenThingGroupProperties(apiObject *iot.ThingGroupProperties) map[string } if v := apiObject.ThingGroupDescription; v != nil { - tfMap[names.AttrDescription] = aws.StringValue(v) + tfMap[names.AttrDescription] = aws.ToString(v) } return tfMap } -func flattenAttributePayload(apiObject *iot.AttributePayload) map[string]interface{} { +func flattenAttributePayload(apiObject *awstypes.AttributePayload) map[string]interface{} { if apiObject == nil { return nil } @@ -382,7 +392,7 @@ func flattenAttributePayload(apiObject *iot.AttributePayload) map[string]interfa tfMap := map[string]interface{}{} if v := apiObject.Attributes; v != nil { - tfMap[names.AttrAttributes] = aws.StringValueMap(v) + tfMap[names.AttrAttributes] = aws.StringMap(v) } return tfMap diff --git a/internal/service/iot/thing_group_membership.go b/internal/service/iot/thing_group_membership.go index df611d6e663..d7fad058e59 100644 --- a/internal/service/iot/thing_group_membership.go +++ b/internal/service/iot/thing_group_membership.go @@ -9,18 +9,21 @@ import ( "log" "strings" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -// @SDKResource("aws_iot_thing_group_membership") -func ResourceThingGroupMembership() *schema.Resource { +// @SDKResource("aws_iot_thing_group_membership", name="Thing Group Membership") +func resourceThingGroupMembership() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceThingGroupMembershipCreate, ReadWithoutTimeout: resourceThingGroupMembershipRead, @@ -52,7 +55,7 @@ func ResourceThingGroupMembership() *schema.Resource { func resourceThingGroupMembershipCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) thingGroupName := d.Get("thing_group_name").(string) thingName := d.Get("thing_name").(string) @@ -62,32 +65,30 @@ func resourceThingGroupMembershipCreate(ctx context.Context, d *schema.ResourceD } if v, ok := d.GetOk("override_dynamic_group"); ok { - input.OverrideDynamicGroups = aws.Bool(v.(bool)) + input.OverrideDynamicGroups = v.(bool) } - log.Printf("[DEBUG] Creating IoT Thing Group Membership: %s", input) - _, err := conn.AddThingToThingGroupWithContext(ctx, input) + _, err := conn.AddThingToThingGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "adding IoT Thing (%s) to IoT Thing Group (%s): %s", thingName, thingGroupName, err) } - d.SetId(ThingGroupMembershipCreateResourceID(thingGroupName, thingName)) + d.SetId(thingGroupMembershipCreateResourceID(thingGroupName, thingName)) return append(diags, resourceThingGroupMembershipRead(ctx, d, meta)...) } func resourceThingGroupMembershipRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) - - thingGroupName, thingName, err := ThingGroupMembershipParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).IoTClient(ctx) + thingGroupName, thingName, err := thingGroupMembershipParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading IoT Thing Group Membership (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - err = FindThingGroupMembership(ctx, conn, thingGroupName, thingName) + _, err = findThingGroupMembershipByTwoPartKey(ctx, conn, thingGroupName, thingName) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Thing Group Membership (%s) not found, removing from state", d.Id()) @@ -107,21 +108,20 @@ func resourceThingGroupMembershipRead(ctx context.Context, d *schema.ResourceDat func resourceThingGroupMembershipDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) - - thingGroupName, thingName, err := ThingGroupMembershipParseResourceID(d.Id()) + conn := meta.(*conns.AWSClient).IoTClient(ctx) + thingGroupName, thingName, err := thingGroupMembershipParseResourceID(d.Id()) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting IoT Thing Group Membership (%s): %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } log.Printf("[DEBUG] Deleting IoT Thing Group Membership: %s", d.Id()) - _, err = conn.RemoveThingFromThingGroupWithContext(ctx, &iot.RemoveThingFromThingGroupInput{ + _, err = conn.RemoveThingFromThingGroup(ctx, &iot.RemoveThingFromThingGroupInput{ ThingGroupName: aws.String(thingGroupName), ThingName: aws.String(thingName), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -132,16 +132,64 @@ func resourceThingGroupMembershipDelete(ctx context.Context, d *schema.ResourceD return diags } +func findThingGroupMembershipByTwoPartKey(ctx context.Context, conn *iot.Client, thingGroupName, thingName string) (*awstypes.GroupNameAndArn, error) { + input := &iot.ListThingGroupsForThingInput{ + ThingName: aws.String(thingName), + } + + return findThingGroup(ctx, conn, input, func(v *awstypes.GroupNameAndArn) bool { + return aws.ToString(v.GroupName) == thingGroupName + }) +} + +func findThingGroup(ctx context.Context, conn *iot.Client, input *iot.ListThingGroupsForThingInput, filter tfslices.Predicate[*awstypes.GroupNameAndArn]) (*awstypes.GroupNameAndArn, error) { + output, err := findThingGroups(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findThingGroups(ctx context.Context, conn *iot.Client, input *iot.ListThingGroupsForThingInput, filter tfslices.Predicate[*awstypes.GroupNameAndArn]) ([]awstypes.GroupNameAndArn, error) { + var output []awstypes.GroupNameAndArn + + pages := iot.NewListThingGroupsForThingPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.ThingGroups { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + const thingGroupMembershipResourceIDSeparator = "/" -func ThingGroupMembershipCreateResourceID(thingGroupName, thingName string) string { +func thingGroupMembershipCreateResourceID(thingGroupName, thingName string) string { parts := []string{thingGroupName, thingName} id := strings.Join(parts, thingGroupMembershipResourceIDSeparator) return id } -func ThingGroupMembershipParseResourceID(id string) (string, string, error) { +func thingGroupMembershipParseResourceID(id string) (string, string, error) { parts := strings.Split(id, thingGroupMembershipResourceIDSeparator) if len(parts) == 2 && parts[0] != "" && parts[1] != "" { diff --git a/internal/service/iot/thing_group_membership_test.go b/internal/service/iot/thing_group_membership_test.go index 123277359fc..5fc7d6b0283 100644 --- a/internal/service/iot/thing_group_membership_test.go +++ b/internal/service/iot/thing_group_membership_test.go @@ -160,38 +160,24 @@ func testAccCheckThingGroupMembershipExists(ctx context.Context, n string) resou return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No IoT Thing Group Membership ID is set") - } - - thingGroupName, thingName, err := tfiot.ThingGroupMembershipParseResourceID(rs.Primary.ID) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) - if err != nil { - return err - } + _, err := tfiot.FindThingGroupMembershipByTwoPartKey(ctx, conn, rs.Primary.Attributes["thing_group_name"], rs.Primary.Attributes["thing_name"]) - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) - - return tfiot.FindThingGroupMembership(ctx, conn, thingGroupName, thingName) + return err } } func testAccCheckThingGroupMembershipDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_thing_group_membership" { continue } - thingGroupName, thingName, err := tfiot.ThingGroupMembershipParseResourceID(rs.Primary.ID) - - if err != nil { - return err - } - - err = tfiot.FindThingGroupMembership(ctx, conn, thingGroupName, thingName) + _, err := tfiot.FindThingGroupMembershipByTwoPartKey(ctx, conn, rs.Primary.Attributes["thing_group_name"], rs.Primary.Attributes["thing_name"]) if tfresource.NotFound(err) { continue diff --git a/internal/service/iot/thing_group_test.go b/internal/service/iot/thing_group_test.go index 2a1779f9b22..b2130d39081 100644 --- a/internal/service/iot/thing_group_test.go +++ b/internal/service/iot/thing_group_test.go @@ -9,7 +9,6 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/iot" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -22,7 +21,6 @@ import ( func TestAccIoTThingGroup_basic(t *testing.T) { ctx := acctest.Context(t) - var thingGroup iot.DescribeThingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" @@ -35,7 +33,7 @@ func TestAccIoTThingGroup_basic(t *testing.T) { { Config: testAccThingGroupConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckThingGroupExists(ctx, resourceName, &thingGroup), + testAccCheckThingGroupExists(ctx, resourceName), acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "iot", regexache.MustCompile(fmt.Sprintf("thinggroup/%s$", rName))), resource.TestCheckResourceAttr(resourceName, "metadata.#", acctest.Ct1), resource.TestCheckResourceAttrSet(resourceName, "metadata.0.creation_date"), @@ -59,7 +57,6 @@ func TestAccIoTThingGroup_basic(t *testing.T) { func TestAccIoTThingGroup_disappears(t *testing.T) { ctx := acctest.Context(t) - var thingGroup iot.DescribeThingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" @@ -72,7 +69,7 @@ func TestAccIoTThingGroup_disappears(t *testing.T) { { Config: testAccThingGroupConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckThingGroupExists(ctx, resourceName, &thingGroup), + testAccCheckThingGroupExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfiot.ResourceThingGroup(), resourceName), ), ExpectNonEmptyPlan: true, @@ -83,7 +80,6 @@ func TestAccIoTThingGroup_disappears(t *testing.T) { func TestAccIoTThingGroup_tags(t *testing.T) { ctx := acctest.Context(t) - var thingGroup iot.DescribeThingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" @@ -96,7 +92,7 @@ func TestAccIoTThingGroup_tags(t *testing.T) { { Config: testAccThingGroupConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), Check: resource.ComposeTestCheckFunc( - testAccCheckThingGroupExists(ctx, resourceName, &thingGroup), + testAccCheckThingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), ), @@ -109,7 +105,7 @@ func TestAccIoTThingGroup_tags(t *testing.T) { { Config: testAccThingGroupConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckThingGroupExists(ctx, resourceName, &thingGroup), + testAccCheckThingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), @@ -118,7 +114,7 @@ func TestAccIoTThingGroup_tags(t *testing.T) { { Config: testAccThingGroupConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), Check: resource.ComposeTestCheckFunc( - testAccCheckThingGroupExists(ctx, resourceName, &thingGroup), + testAccCheckThingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), ), @@ -129,7 +125,6 @@ func TestAccIoTThingGroup_tags(t *testing.T) { func TestAccIoTThingGroup_parentGroup(t *testing.T) { ctx := acctest.Context(t) - var thingGroup iot.DescribeThingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" parentResourceName := "aws_iot_thing_group.parent" @@ -144,7 +139,7 @@ func TestAccIoTThingGroup_parentGroup(t *testing.T) { { Config: testAccThingGroupConfig_parent(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckThingGroupExists(ctx, resourceName, &thingGroup), + testAccCheckThingGroupExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "parent_group_name", parentResourceName, names.AttrName), resource.TestCheckResourceAttr(resourceName, "metadata.#", acctest.Ct1), resource.TestCheckResourceAttrPair(resourceName, "metadata.0.parent_group_name", parentResourceName, names.AttrName), @@ -166,7 +161,6 @@ func TestAccIoTThingGroup_parentGroup(t *testing.T) { func TestAccIoTThingGroup_properties(t *testing.T) { ctx := acctest.Context(t) - var thingGroup iot.DescribeThingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_iot_thing_group.test" @@ -179,7 +173,7 @@ func TestAccIoTThingGroup_properties(t *testing.T) { { Config: testAccThingGroupConfig_properties(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckThingGroupExists(ctx, resourceName, &thingGroup), + testAccCheckThingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "properties.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.0.attributes.%", acctest.Ct1), @@ -196,7 +190,7 @@ func TestAccIoTThingGroup_properties(t *testing.T) { { Config: testAccThingGroupConfig_propertiesUpdated(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckThingGroupExists(ctx, resourceName, &thingGroup), + testAccCheckThingGroupExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "properties.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "properties.0.attribute_payload.0.attributes.%", acctest.Ct2), @@ -210,7 +204,7 @@ func TestAccIoTThingGroup_properties(t *testing.T) { }) } -func testAccCheckThingGroupExists(ctx context.Context, n string, v *iot.DescribeThingGroupOutput) resource.TestCheckFunc { +func testAccCheckThingGroupExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -221,23 +215,17 @@ func testAccCheckThingGroupExists(ctx context.Context, n string, v *iot.Describe return fmt.Errorf("No IoT Thing Group ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) - output, err := tfiot.FindThingGroupByName(ctx, conn, rs.Primary.ID) + _, err := tfiot.FindThingGroupByName(ctx, conn, rs.Primary.ID) - if err != nil { - return err - } - - *v = *output - - return nil + return err } } func testAccCheckThingGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_thing_group" { diff --git a/internal/service/iot/thing_principal_attachment.go b/internal/service/iot/thing_principal_attachment.go index 856fcf2e0a3..b59ab492580 100644 --- a/internal/service/iot/thing_principal_attachment.go +++ b/internal/service/iot/thing_principal_attachment.go @@ -8,18 +8,22 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_iot_thing_principal_attachment") -func ResourceThingPrincipalAttachment() *schema.Resource { +// @SDKResource("aws_iot_thing_principal_attachment", name="Thing Principal Attachment") +func resourceThingPrincipalAttachment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceThingPrincipalAttachmentCreate, ReadWithoutTimeout: resourceThingPrincipalAttachmentRead, @@ -42,59 +46,44 @@ func ResourceThingPrincipalAttachment() *schema.Resource { func resourceThingPrincipalAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) principal := d.Get(names.AttrPrincipal).(string) thing := d.Get("thing").(string) - - _, err := conn.AttachThingPrincipalWithContext(ctx, &iot.AttachThingPrincipalInput{ + id := fmt.Sprintf("%s|%s", thing, principal) + input := &iot.AttachThingPrincipalInput{ Principal: aws.String(principal), ThingName: aws.String(thing), - }) + } + + _, err := conn.AttachThingPrincipal(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "attaching principal %s to thing %s: %s", principal, thing, err) + return sdkdiag.AppendErrorf(diags, "creating IoT Thing Principal Attachment (%s): %s", id, err) } - d.SetId(fmt.Sprintf("%s|%s", thing, principal)) - return append(diags, resourceThingPrincipalAttachmentRead(ctx, d, meta)...) -} + d.SetId(id) -func GetThingPricipalAttachment(ctx context.Context, conn *iot.IoT, thing, principal string) (bool, error) { - out, err := conn.ListThingPrincipalsWithContext(ctx, &iot.ListThingPrincipalsInput{ - ThingName: aws.String(thing), - }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - return false, nil - } else if err != nil { - return false, err - } - found := false - for _, name := range out.Principals { - if principal == aws.StringValue(name) { - found = true - break - } - } - return found, nil + return append(diags, resourceThingPrincipalAttachmentRead(ctx, d, meta)...) } func resourceThingPrincipalAttachmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) principal := d.Get(names.AttrPrincipal).(string) thing := d.Get("thing").(string) - found, err := GetThingPricipalAttachment(ctx, conn, thing, principal) + _, err := findThingPrincipalAttachmentByTwoPartKey(ctx, conn, thing, principal) - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing principals for thing %s: %s", thing, err) - } - - if !found { + if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Thing Principal Attachment (%s) not found, removing from state", d.Id()) d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading IoT Thing Principal Attachment (%s): %s", d.Id(), err) } return diags @@ -102,21 +91,69 @@ func resourceThingPrincipalAttachmentRead(ctx context.Context, d *schema.Resourc func resourceThingPrincipalAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - principal := d.Get(names.AttrPrincipal).(string) - thing := d.Get("thing").(string) + log.Printf("[DEBUG] Deleting IoT Thing Principal Attachment: %s", d.Id()) + _, err := conn.DetachThingPrincipal(ctx, &iot.DetachThingPrincipalInput{ + Principal: aws.String(d.Get(names.AttrPrincipal).(string)), + ThingName: aws.String(d.Get("thing").(string)), + }) - _, err := conn.DetachThingPrincipalWithContext(ctx, &iot.DetachThingPrincipalInput{ - Principal: aws.String(principal), + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting IoT Thing Principal Attachment (%s): %s", d.Id(), err) + } + + return diags +} + +func findThingPrincipalAttachmentByTwoPartKey(ctx context.Context, conn *iot.Client, thing, principal string) (*string, error) { + input := &iot.ListThingPrincipalsInput{ ThingName: aws.String(thing), + } + + return findThingPrincipal(ctx, conn, input, func(v string) bool { + return principal == v }) +} - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { - log.Printf("[WARN] IoT Principal %s or Thing %s not found, removing from state", principal, thing) - } else if err != nil { - return sdkdiag.AppendErrorf(diags, "detaching principal %s from thing %s: %s", principal, thing, err) +func findThingPrincipal(ctx context.Context, conn *iot.Client, input *iot.ListThingPrincipalsInput, filter tfslices.Predicate[string]) (*string, error) { + output, err := findThingPrincipals(ctx, conn, input, filter) + + if err != nil { + return nil, err } - return diags + return tfresource.AssertSingleValueResult(output) +} + +func findThingPrincipals(ctx context.Context, conn *iot.Client, input *iot.ListThingPrincipalsInput, filter tfslices.Predicate[string]) ([]string, error) { + var output []string + + pages := iot.NewListThingPrincipalsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Principals { + if filter(v) { + output = append(output, v) + } + } + } + + return output, nil } diff --git a/internal/service/iot/thing_principal_attachment_test.go b/internal/service/iot/thing_principal_attachment_test.go index bdd8d587339..544649b7d69 100644 --- a/internal/service/iot/thing_principal_attachment_test.go +++ b/internal/service/iot/thing_principal_attachment_test.go @@ -8,15 +8,17 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tfiot "github.com/hashicorp/terraform-provider-aws/internal/service/iot" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -77,27 +79,24 @@ func TestAccIoTThingPrincipalAttachment_basic(t *testing.T) { func testAccCheckThingPrincipalAttachmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_thing_principal_attachment" { continue } - principal := rs.Primary.Attributes[names.AttrPrincipal] - thing := rs.Primary.Attributes["thing"] + _, err := tfiot.FindThingPrincipalAttachmentByTwoPartKey(ctx, conn, rs.Primary.Attributes["thing"], rs.Primary.Attributes[names.AttrPrincipal]) - found, err := tfiot.GetThingPricipalAttachment(ctx, conn, thing, principal) - - if err != nil { - return fmt.Errorf("Error: Failed listing principals for thing (%s): %s", thing, err) + if tfresource.NotFound(err) { + continue } - if !found { - continue + if err != nil { + return err } - return fmt.Errorf("IOT Thing Principal Attachment (%s) still exists", rs.Primary.Attributes[names.AttrID]) + return fmt.Errorf("IoT Thing Principal Attachment %s still exists", rs.Primary.ID) } return nil @@ -111,31 +110,17 @@ func testAccCheckThingPrincipalAttachmentExists(ctx context.Context, n string) r return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No attachment") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) - thing := rs.Primary.Attributes["thing"] - principal := rs.Primary.Attributes[names.AttrPrincipal] + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) - found, err := tfiot.GetThingPricipalAttachment(ctx, conn, thing, principal) + _, err := tfiot.FindThingPrincipalAttachmentByTwoPartKey(ctx, conn, rs.Primary.Attributes["thing"], rs.Primary.Attributes[names.AttrPrincipal]) - if err != nil { - return fmt.Errorf("Error: Failed listing principals for thing (%s), resource (%s): %s", thing, n, err) - } - - if !found { - return fmt.Errorf("Error: Principal (%s) is not attached to thing (%s), resource (%s)", principal, thing, n) - } - - return nil + return err } } func testAccCheckThingPrincipalAttachmentStatus(ctx context.Context, thingName string, exists bool, principals []string) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) principalARNs := make(map[string]string) @@ -147,11 +132,11 @@ func testAccCheckThingPrincipalAttachmentStatus(ctx context.Context, thingName s principalARNs[pr.Primary.Attributes[names.AttrARN]] = p } - thing, err := conn.DescribeThingWithContext(ctx, &iot.DescribeThingInput{ + _, err := conn.DescribeThing(ctx, &iot.DescribeThingInput{ ThingName: aws.String(thingName), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { if exists { return fmt.Errorf("Error: Thing (%s) exists, but expected to be removed", thingName) } else { @@ -163,7 +148,7 @@ func testAccCheckThingPrincipalAttachmentStatus(ctx context.Context, thingName s return fmt.Errorf("Error: Thing (%s) does not exist, but expected to be", thingName) } - res, err := conn.ListThingPrincipalsWithContext(ctx, &iot.ListThingPrincipalsInput{ + res, err := conn.ListThingPrincipals(ctx, &iot.ListThingPrincipalsInput{ ThingName: aws.String(thingName), }) @@ -172,11 +157,11 @@ func testAccCheckThingPrincipalAttachmentStatus(ctx context.Context, thingName s } if len(res.Principals) != len(principalARNs) { - return fmt.Errorf("Error: Thing (%s) has wrong number of principals attached", thing) + return fmt.Errorf("Error: Thing (%s) has wrong number of principals attached", thingName) } for _, p := range res.Principals { - if principal, ok := principalARNs[aws.StringValue(p)]; !ok { + if principal, ok := principalARNs[p]; !ok { return fmt.Errorf("Error: Principal %s is not attached to thing %s", principal, thingName) } } diff --git a/internal/service/iot/thing_test.go b/internal/service/iot/thing_test.go index ad12901befd..b2deeab7e91 100644 --- a/internal/service/iot/thing_test.go +++ b/internal/service/iot/thing_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go-v2/service/iot" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -103,7 +103,7 @@ func TestAccIoTThing_full(t *testing.T) { ), }, { // Remove thing type association - Config: testAccThingConfig_basic(thingName), + Config: testAccThingConfig_fullUpdated(thingName, typeName), Check: resource.ComposeTestCheckFunc( testAccCheckThingExists(ctx, resourceName, &thing), resource.TestCheckResourceAttr(resourceName, names.AttrName, thingName), @@ -129,7 +129,7 @@ func testAccCheckThingExists(ctx context.Context, n string, v *iot.DescribeThing return fmt.Errorf("No IoT Thing ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) output, err := tfiot.FindThingByName(ctx, conn, rs.Primary.ID) @@ -145,7 +145,7 @@ func testAccCheckThingExists(ctx context.Context, n string, v *iot.DescribeThing func testAccCheckThingDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_thing" { @@ -196,3 +196,15 @@ resource "aws_iot_thing_type" "test" { } `, thingName, answer, typeName) } + +func testAccThingConfig_fullUpdated(thingName, typeName string) string { + return fmt.Sprintf(` +resource "aws_iot_thing" "test" { + name = %[1]q +} + +resource "aws_iot_thing_type" "test" { + name = %[2]q +} +`, thingName, typeName) +} diff --git a/internal/service/iot/thing_type.go b/internal/service/iot/thing_type.go index a0d20a4a6ed..f8a17f5349a 100644 --- a/internal/service/iot/thing_type.go +++ b/internal/service/iot/thing_type.go @@ -6,16 +6,17 @@ package iot import ( "context" "log" - "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -24,7 +25,7 @@ import ( // @SDKResource("aws_iot_thing_type", name="Thing Type") // @Tags(identifierAttribute="arn") -func ResourceThingType() *schema.Resource { +func resourceThingType() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceThingTypeCreate, ReadWithoutTimeout: resourceThingTypeRead, @@ -91,7 +92,7 @@ func ResourceThingType() *schema.Resource { func resourceThingTypeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) name := d.Get(names.AttrName).(string) input := &iot.CreateThingTypeInput{ @@ -106,21 +107,21 @@ func resourceThingTypeCreate(ctx context.Context, d *schema.ResourceData, meta i } } - out, err := conn.CreateThingTypeWithContext(ctx, input) + out, err := conn.CreateThingType(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Thing Type (%s): %s", name, err) } - d.SetId(aws.StringValue(out.ThingTypeName)) + d.SetId(aws.ToString(out.ThingTypeName)) if v := d.Get("deprecated").(bool); v { input := &iot.DeprecateThingTypeInput{ ThingTypeName: aws.String(d.Id()), - UndoDeprecate: aws.Bool(false), + UndoDeprecate: false, } - _, err := conn.DeprecateThingTypeWithContext(ctx, input) + _, err := conn.DeprecateThingType(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "deprecating IoT Thing Type (%s): %s", d.Id(), err) @@ -132,9 +133,9 @@ func resourceThingTypeCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceThingTypeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindThingTypeByName(ctx, conn, d.Id()) + output, err := findThingTypeByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Thing Type (%s) not found, removing from state", d.Id()) @@ -159,15 +160,15 @@ func resourceThingTypeRead(ctx context.Context, d *schema.ResourceData, meta int func resourceThingTypeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChange("deprecated") { input := &iot.DeprecateThingTypeInput{ ThingTypeName: aws.String(d.Id()), - UndoDeprecate: aws.Bool(!d.Get("deprecated").(bool)), + UndoDeprecate: !d.Get("deprecated").(bool), } - _, err := conn.DeprecateThingTypeWithContext(ctx, input) + _, err := conn.DeprecateThingType(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "deprecating IoT Thing Type (%s): %s", d.Id(), err) @@ -179,14 +180,14 @@ func resourceThingTypeUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceThingTypeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) // In order to delete an IoT Thing Type, you must deprecate it first and wait at least 5 minutes. - _, err := conn.DeprecateThingTypeWithContext(ctx, &iot.DeprecateThingTypeInput{ + _, err := conn.DeprecateThingType(ctx, &iot.DeprecateThingTypeInput{ ThingTypeName: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -195,13 +196,14 @@ func resourceThingTypeDelete(ctx context.Context, d *schema.ResourceData, meta i } log.Printf("[DEBUG] Deleting IoT Thing Type: %s", d.Id()) - _, err = tfresource.RetryWhenAWSErrMessageContains(ctx, 6*time.Minute, func() (interface{}, error) { - return conn.DeleteThingTypeWithContext(ctx, &iot.DeleteThingTypeInput{ - ThingTypeName: aws.String(d.Id()), + _, err = tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, deprecatePropagationTimeout, + func() (interface{}, error) { + return conn.DeleteThingType(ctx, &iot.DeleteThingTypeInput{ + ThingTypeName: aws.String(d.Id()), + }) }) - }, iot.ErrCodeInvalidRequestException, "Please wait for 5 minutes after deprecation and then retry") - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -212,14 +214,14 @@ func resourceThingTypeDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func FindThingTypeByName(ctx context.Context, conn *iot.IoT, name string) (*iot.DescribeThingTypeOutput, error) { +func findThingTypeByName(ctx context.Context, conn *iot.Client, name string) (*iot.DescribeThingTypeOutput, error) { input := &iot.DescribeThingTypeInput{ ThingTypeName: aws.String(name), } - output, err := conn.DescribeThingTypeWithContext(ctx, input) + output, err := conn.DescribeThingType(ctx, input) - if tfawserr.ErrCodeEquals(err, iot.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -236,3 +238,31 @@ func FindThingTypeByName(ctx context.Context, conn *iot.IoT, name string) (*iot. return output, nil } + +func expandThingTypeProperties(config map[string]interface{}) *awstypes.ThingTypeProperties { + properties := &awstypes.ThingTypeProperties{ + SearchableAttributes: flex.ExpandStringValueSet(config["searchable_attributes"].(*schema.Set)), + } + + if v, ok := config[names.AttrDescription]; ok && v.(string) != "" { + properties.ThingTypeDescription = aws.String(v.(string)) + } + + return properties +} + +func flattenThingTypeProperties(s *awstypes.ThingTypeProperties) []map[string]interface{} { + m := map[string]interface{}{ + names.AttrDescription: "", + "searchable_attributes": flex.FlattenStringSet(nil), + } + + if s == nil { + return []map[string]interface{}{m} + } + + m[names.AttrDescription] = aws.ToString(s.ThingTypeDescription) + m["searchable_attributes"] = s.SearchableAttributes + + return []map[string]interface{}{m} +} diff --git a/internal/service/iot/thing_type_test.go b/internal/service/iot/thing_type_test.go index 4cc235ed879..a3391405902 100644 --- a/internal/service/iot/thing_type_test.go +++ b/internal/service/iot/thing_type_test.go @@ -162,7 +162,7 @@ func testAccCheckThingTypeExists(ctx context.Context, n string) resource.TestChe return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) _, err := tfiot.FindThingTypeByName(ctx, conn, rs.Primary.ID) @@ -172,7 +172,7 @@ func testAccCheckThingTypeExists(ctx context.Context, n string) resource.TestChe func testAccCheckThingTypeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_thing_type" { diff --git a/internal/service/iot/topic_rule.go b/internal/service/iot/topic_rule.go index 60556106799..45521524398 100644 --- a/internal/service/iot/topic_rule.go +++ b/internal/service/iot/topic_rule.go @@ -8,12 +8,16 @@ import ( "log" "reflect" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -24,7 +28,7 @@ import ( // @SDKResource("aws_iot_topic_rule", name="Topic Rule") // @Tags(identifierAttribute="arn") -func ResourceTopicRule() *schema.Resource { +func resourceTopicRule() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTopicRuleCreate, ReadWithoutTimeout: resourceTopicRuleRead, @@ -35,1204 +39,1218 @@ func ResourceTopicRule() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - "cloudwatch_alarm": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ + SchemaFunc: func() map[string]*schema.Schema { + topicRuleErrorActionExactlyOneOf := []string{ + "error_action.0.cloudwatch_alarm", + "error_action.0.cloudwatch_logs", + "error_action.0.cloudwatch_metric", + "error_action.0.dynamodb", + "error_action.0.dynamodbv2", + "error_action.0.elasticsearch", + "error_action.0.firehose", + "error_action.0.http", + "error_action.0.iot_analytics", + "error_action.0.iot_events", + "error_action.0.kafka", + "error_action.0.kinesis", + "error_action.0.lambda", + "error_action.0.republish", + "error_action.0.s3", + "error_action.0.sns", + "error_action.0.sqs", + "error_action.0.step_functions", + "error_action.0.timestream", + } + + timestreamDimensionResource := func() *schema.Resource { + return &schema.Resource{ Schema: map[string]*schema.Schema{ - "alarm_name": { + names.AttrName: { Type: schema.TypeString, Required: true, }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "state_reason": { + names.AttrValue: { Type: schema.TypeString, Required: true, }, - "state_value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validTopicRuleCloudWatchAlarmStateValue, - }, }, + } + } + + return map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, }, - }, - names.AttrCloudWatchLogs: { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrLogGroupName: { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "cloudwatch_alarm": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_name": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "state_reason": { + Type: schema.TypeString, + Required: true, + }, + "state_value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validTopicRuleCloudWatchAlarmStateValue, + }, }, }, }, - }, - "cloudwatch_metric": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrMetricName: { - Type: schema.TypeString, - Required: true, - }, - "metric_namespace": { - Type: schema.TypeString, - Required: true, - }, - "metric_timestamp": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidUTCTimestamp, - }, - "metric_unit": { - Type: schema.TypeString, - Required: true, - }, - "metric_value": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + names.AttrCloudWatchLogs: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_mode": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + names.AttrLogGroupName: { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - names.AttrDescription: { - Type: schema.TypeString, - Optional: true, - }, - "dynamodb": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hash_key_field": { - Type: schema.TypeString, - Required: true, - }, - "hash_key_value": { - Type: schema.TypeString, - Required: true, - }, - "hash_key_type": { - Type: schema.TypeString, - Optional: true, - }, - "operation": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "DELETE", - "INSERT", - "UPDATE", - }, false), - }, - "payload_field": { - Type: schema.TypeString, - Optional: true, - }, - "range_key_field": { - Type: schema.TypeString, - Optional: true, - }, - "range_key_value": { - Type: schema.TypeString, - Optional: true, - }, - "range_key_type": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "cloudwatch_metric": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMetricName: { + Type: schema.TypeString, + Required: true, + }, + "metric_namespace": { + Type: schema.TypeString, + Required: true, + }, + "metric_timestamp": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidUTCTimestamp, + }, + "metric_unit": { + Type: schema.TypeString, + Required: true, + }, + "metric_value": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, - names.AttrTableName: { - Type: schema.TypeString, - Required: true, + }, + }, + names.AttrDescription: { + Type: schema.TypeString, + Optional: true, + }, + "dynamodb": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hash_key_field": { + Type: schema.TypeString, + Required: true, + }, + "hash_key_value": { + Type: schema.TypeString, + Required: true, + }, + "hash_key_type": { + Type: schema.TypeString, + Optional: true, + }, + "operation": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "DELETE", + "INSERT", + "UPDATE", + }, false), + }, + "payload_field": { + Type: schema.TypeString, + Optional: true, + }, + "range_key_field": { + Type: schema.TypeString, + Optional: true, + }, + "range_key_value": { + Type: schema.TypeString, + Optional: true, + }, + "range_key_type": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrTableName: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - "dynamodbv2": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "put_item": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrTableName: { - Type: schema.TypeString, - Required: true, + "dynamodbv2": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "put_item": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrTableName: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "elasticsearch": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrEndpoint: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validTopicRuleElasticsearchEndpoint, - }, - names.AttrID: { - Type: schema.TypeString, - Required: true, - }, - "index": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrType: { - Type: schema.TypeString, - Required: true, + "elasticsearch": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEndpoint: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validTopicRuleElasticsearchEndpoint, + }, + names.AttrID: { + Type: schema.TypeString, + Required: true, + }, + "index": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrType: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrEnabled: { - Type: schema.TypeBool, - Required: true, - }, - "error_action": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cloudwatch_alarm": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "alarm_name": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "state_reason": { - Type: schema.TypeString, - Required: true, - }, - "state_value": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validTopicRuleCloudWatchAlarmStateValue, + names.AttrEnabled: { + Type: schema.TypeBool, + Required: true, + }, + "error_action": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cloudwatch_alarm": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_name": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "state_reason": { + Type: schema.TypeString, + Required: true, + }, + "state_value": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validTopicRuleCloudWatchAlarmStateValue, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - names.AttrCloudWatchLogs: { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrLogGroupName: { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + names.AttrCloudWatchLogs: { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_mode": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + names.AttrLogGroupName: { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "cloudwatch_metric": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrMetricName: { - Type: schema.TypeString, - Required: true, - }, - "metric_namespace": { - Type: schema.TypeString, - Required: true, - }, - "metric_timestamp": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidUTCTimestamp, - }, - "metric_unit": { - Type: schema.TypeString, - Required: true, - }, - "metric_value": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "cloudwatch_metric": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrMetricName: { + Type: schema.TypeString, + Required: true, + }, + "metric_namespace": { + Type: schema.TypeString, + Required: true, + }, + "metric_timestamp": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidUTCTimestamp, + }, + "metric_unit": { + Type: schema.TypeString, + Required: true, + }, + "metric_value": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "dynamodb": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "hash_key_field": { - Type: schema.TypeString, - Required: true, - }, - "hash_key_value": { - Type: schema.TypeString, - Required: true, - }, - "hash_key_type": { - Type: schema.TypeString, - Optional: true, - }, - "operation": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "DELETE", - "INSERT", - "UPDATE", - }, false), - }, - "payload_field": { - Type: schema.TypeString, - Optional: true, - }, - "range_key_field": { - Type: schema.TypeString, - Optional: true, - }, - "range_key_value": { - Type: schema.TypeString, - Optional: true, - }, - "range_key_type": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrTableName: { - Type: schema.TypeString, - Required: true, + "dynamodb": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hash_key_field": { + Type: schema.TypeString, + Required: true, + }, + "hash_key_value": { + Type: schema.TypeString, + Required: true, + }, + "hash_key_type": { + Type: schema.TypeString, + Optional: true, + }, + "operation": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "DELETE", + "INSERT", + "UPDATE", + }, false), + }, + "payload_field": { + Type: schema.TypeString, + Optional: true, + }, + "range_key_field": { + Type: schema.TypeString, + Optional: true, + }, + "range_key_value": { + Type: schema.TypeString, + Optional: true, + }, + "range_key_type": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrTableName: { + Type: schema.TypeString, + Required: true, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "dynamodbv2": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "put_item": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrTableName: { - Type: schema.TypeString, - Required: true, + "dynamodbv2": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "put_item": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrTableName: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "elasticsearch": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrEndpoint: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validTopicRuleElasticsearchEndpoint, - }, - names.AttrID: { - Type: schema.TypeString, - Required: true, - }, - "index": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrType: { - Type: schema.TypeString, - Required: true, + "elasticsearch": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEndpoint: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validTopicRuleElasticsearchEndpoint, + }, + names.AttrID: { + Type: schema.TypeString, + Required: true, + }, + "index": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrType: { + Type: schema.TypeString, + Required: true, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "firehose": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "batch_mode": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "delivery_stream_name": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "separator": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validTopicRuleFirehoseSeparator, + "firehose": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_mode": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "delivery_stream_name": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "separator": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validTopicRuleFirehoseSeparator, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "http": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "confirmation_url": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.IsURLWithHTTPS, - }, - "http_header": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, + "http": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "confirmation_url": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.IsURLWithHTTPS, + }, + "http_header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrURL: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.IsURLWithHTTPS, + names.AttrURL: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.IsURLWithHTTPS, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "iot_analytics": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "batch_mode": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "channel_name": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "iot_analytics": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_mode": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "channel_name": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "iot_events": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "batch_mode": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "input_name": { - Type: schema.TypeString, - Required: true, - }, - "message_id": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "iot_events": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_mode": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "input_name": { + Type: schema.TypeString, + Required: true, + }, + "message_id": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "kafka": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_properties": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - names.AttrDestinationARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrHeader: { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, + "kafka": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_properties": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + names.AttrDestinationARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrHeader: { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrKey: { - Type: schema.TypeString, - Optional: true, - }, - "partition": { - Type: schema.TypeString, - Optional: true, - }, - "topic": { - Type: schema.TypeString, - Required: true, + names.AttrKey: { + Type: schema.TypeString, + Optional: true, + }, + "partition": { + Type: schema.TypeString, + Optional: true, + }, + "topic": { + Type: schema.TypeString, + Required: true, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "kinesis": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "partition_key": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "stream_name": { - Type: schema.TypeString, - Required: true, + "kinesis": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "partition_key": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "stream_name": { + Type: schema.TypeString, + Required: true, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "lambda": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrFunctionARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "lambda": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFunctionARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "republish": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "qos": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - ValidateFunc: validation.IntBetween(0, 1), - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "topic": { - Type: schema.TypeString, - Required: true, + "republish": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "qos": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntBetween(0, 1), + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "topic": { + Type: schema.TypeString, + Required: true, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "s3": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrBucketName: { - Type: schema.TypeString, - Required: true, - }, - "canned_acl": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(iot.CannedAccessControlList_Values(), false), - }, - names.AttrKey: { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "s3": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrBucketName: { + Type: schema.TypeString, + Required: true, + }, + "canned_acl": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.CannedAccessControlList](), + }, + names.AttrKey: { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "sns": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "message_format": { - Type: schema.TypeString, - Default: iot.MessageFormatRaw, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrTargetARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "sns": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message_format": { + Type: schema.TypeString, + Default: awstypes.MessageFormatRaw, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrTargetARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "sqs": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "queue_url": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "use_base64": { - Type: schema.TypeBool, - Required: true, + "sqs": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queue_url": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "use_base64": { + Type: schema.TypeBool, + Required: true, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "step_functions": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "execution_name_prefix": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "state_machine_name": { - Type: schema.TypeString, - Required: true, + "step_functions": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execution_name_prefix": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "state_machine_name": { + Type: schema.TypeString, + Required: true, + }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, - }, - "timestream": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrDatabaseName: { - Type: schema.TypeString, - Required: true, - }, - "dimension": { - Type: schema.TypeSet, - Required: true, - Elem: timestreamDimensionResource, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrTableName: { - Type: schema.TypeString, - Required: true, - }, - "timestamp": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrUnit: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "SECONDS", - "MILLISECONDS", - "MICROSECONDS", - "NANOSECONDS", - }, false), - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, + "timestream": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDatabaseName: { + Type: schema.TypeString, + Required: true, + }, + "dimension": { + Type: schema.TypeSet, + Required: true, + Elem: timestreamDimensionResource(), + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrTableName: { + Type: schema.TypeString, + Required: true, + }, + "timestamp": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrUnit: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "SECONDS", + "MILLISECONDS", + "MICROSECONDS", + "NANOSECONDS", + }, false), + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, }, }, }, }, }, + ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, - ExactlyOneOf: topicRuleErrorActionExactlyOneOf, }, }, }, - }, - "firehose": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "batch_mode": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "delivery_stream_name": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "separator": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validTopicRuleFirehoseSeparator, + "firehose": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_mode": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "delivery_stream_name": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "separator": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validTopicRuleFirehoseSeparator, + }, }, }, }, - }, - "http": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "confirmation_url": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.IsURLWithHTTPS, - }, - "http_header": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, + "http": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "confirmation_url": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.IsURLWithHTTPS, + }, + "http_header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrURL: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.IsURLWithHTTPS, + names.AttrURL: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.IsURLWithHTTPS, + }, }, }, }, - }, - "iot_analytics": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "batch_mode": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "channel_name": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "iot_analytics": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_mode": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "channel_name": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "iot_events": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "batch_mode": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - "input_name": { - Type: schema.TypeString, - Required: true, - }, - "message_id": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "iot_events": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_mode": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "input_name": { + Type: schema.TypeString, + Required: true, + }, + "message_id": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "kafka": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "client_properties": { - Type: schema.TypeMap, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - names.AttrDestinationARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrHeader: { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, + "kafka": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_properties": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + names.AttrDestinationARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrHeader: { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrKey: { - Type: schema.TypeString, - Optional: true, - }, - "partition": { - Type: schema.TypeString, - Optional: true, - }, - "topic": { - Type: schema.TypeString, - Required: true, + names.AttrKey: { + Type: schema.TypeString, + Optional: true, + }, + "partition": { + Type: schema.TypeString, + Optional: true, + }, + "topic": { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - "kinesis": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "partition_key": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "stream_name": { - Type: schema.TypeString, - Required: true, + "kinesis": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "partition_key": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "stream_name": { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - "lambda": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrFunctionARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "lambda": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFunctionARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - names.AttrName: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validTopicRuleName, - }, - "republish": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "qos": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - ValidateFunc: validation.IntBetween(0, 1), - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "topic": { - Type: schema.TypeString, - Required: true, + names.AttrName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validTopicRuleName, + }, + "republish": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "qos": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntBetween(0, 1), + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "topic": { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - "s3": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrBucketName: { - Type: schema.TypeString, - Required: true, - }, - "canned_acl": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(iot.CannedAccessControlList_Values(), false), - }, - names.AttrKey: { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "s3": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrBucketName: { + Type: schema.TypeString, + Required: true, + }, + "canned_acl": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.CannedAccessControlList](), + }, + names.AttrKey: { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "sns": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "message_format": { - Type: schema.TypeString, - Optional: true, - Default: iot.MessageFormatRaw, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrTargetARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "sns": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message_format": { + Type: schema.TypeString, + Optional: true, + Default: awstypes.MessageFormatRaw, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrTargetARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "sql": { - Type: schema.TypeString, - Required: true, - }, - "sql_version": { - Type: schema.TypeString, - Required: true, - }, - "sqs": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "queue_url": { - Type: schema.TypeString, - Required: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "use_base64": { - Type: schema.TypeBool, - Required: true, + "sql": { + Type: schema.TypeString, + Required: true, + }, + "sql_version": { + Type: schema.TypeString, + Required: true, + }, + "sqs": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queue_url": { + Type: schema.TypeString, + Required: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "use_base64": { + Type: schema.TypeBool, + Required: true, + }, }, }, }, - }, - "step_functions": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "execution_name_prefix": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "state_machine_name": { - Type: schema.TypeString, - Required: true, + "step_functions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execution_name_prefix": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "state_machine_name": { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), - "timestream": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrDatabaseName: { - Type: schema.TypeString, - Required: true, - }, - "dimension": { - Type: schema.TypeSet, - Required: true, - Elem: timestreamDimensionResource, - }, - names.AttrRoleARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrTableName: { - Type: schema.TypeString, - Required: true, - }, - "timestamp": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrUnit: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "SECONDS", - "MILLISECONDS", - "MICROSECONDS", - "NANOSECONDS", - }, false), - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "timestream": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDatabaseName: { + Type: schema.TypeString, + Required: true, + }, + "dimension": { + Type: schema.TypeSet, + Required: true, + Elem: timestreamDimensionResource(), + }, + names.AttrRoleARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrTableName: { + Type: schema.TypeString, + Required: true, + }, + "timestamp": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrUnit: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "SECONDS", + "MILLISECONDS", + "MICROSECONDS", + "NANOSECONDS", + }, false), + }, + names.AttrValue: { + Type: schema.TypeString, + Required: true, + }, }, }, }, }, }, }, - }, + } }, CustomizeDiff: verify.SetTagsDiff, } } -var topicRuleErrorActionExactlyOneOf = []string{ - "error_action.0.cloudwatch_alarm", - "error_action.0.cloudwatch_logs", - "error_action.0.cloudwatch_metric", - "error_action.0.dynamodb", - "error_action.0.dynamodbv2", - "error_action.0.elasticsearch", - "error_action.0.firehose", - "error_action.0.http", - "error_action.0.iot_analytics", - "error_action.0.iot_events", - "error_action.0.kafka", - "error_action.0.kinesis", - "error_action.0.lambda", - "error_action.0.republish", - "error_action.0.s3", - "error_action.0.sns", - "error_action.0.sqs", - "error_action.0.step_functions", - "error_action.0.timestream", -} - -var timestreamDimensionResource *schema.Resource = &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrName: { - Type: schema.TypeString, - Required: true, - }, - names.AttrValue: { - Type: schema.TypeString, - Required: true, - }, - }, -} - func resourceTopicRuleCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) ruleName := d.Get(names.AttrName).(string) input := &iot.CreateTopicRuleInput{ @@ -1241,11 +1259,10 @@ func resourceTopicRuleCreate(ctx context.Context, d *schema.ResourceData, meta i TopicRulePayload: expandTopicRulePayload(d), } - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateTopicRuleWithContext(ctx, input) - }, - iot.ErrCodeInvalidRequestException, "sts:AssumeRole") + return conn.CreateTopicRule(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Topic Rule (%s): %s", ruleName, err) @@ -1258,9 +1275,9 @@ func resourceTopicRuleCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceTopicRuleRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) - output, err := FindTopicRuleByName(ctx, conn, d.Id()) + output, err := findTopicRuleByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Topic Rule %s not found, removing from state", d.Id()) @@ -1275,7 +1292,7 @@ func resourceTopicRuleRead(ctx context.Context, d *schema.ResourceData, meta int d.Set(names.AttrARN, output.RuleArn) d.Set(names.AttrName, output.Rule.RuleName) d.Set(names.AttrDescription, output.Rule.Description) - d.Set(names.AttrEnabled, !aws.BoolValue(output.Rule.RuleDisabled)) + d.Set(names.AttrEnabled, !aws.ToBool(output.Rule.RuleDisabled)) d.Set("sql", output.Rule.Sql) d.Set("sql_version", output.Rule.AwsIotSqlVersion) @@ -1364,7 +1381,7 @@ func resourceTopicRuleRead(ctx context.Context, d *schema.ResourceData, meta int func resourceTopicRuleUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { input := &iot.ReplaceTopicRuleInput{ @@ -1372,7 +1389,10 @@ func resourceTopicRuleUpdate(ctx context.Context, d *schema.ResourceData, meta i TopicRulePayload: expandTopicRulePayload(d), } - _, err := conn.ReplaceTopicRuleWithContext(ctx, input) + _, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, + func() (interface{}, error) { + return conn.ReplaceTopicRule(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "replacing IoT Topic Rule (%s): %s", d.Id(), err) @@ -1384,10 +1404,10 @@ func resourceTopicRuleUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceTopicRuleDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) log.Printf("[INFO] Deleting IoT Topic Rule: %s", d.Id()) - _, err := conn.DeleteTopicRuleWithContext(ctx, &iot.DeleteTopicRuleInput{ + _, err := conn.DeleteTopicRule(ctx, &iot.DeleteTopicRuleInput{ RuleName: aws.String(d.Id()), }) @@ -1398,12 +1418,65 @@ func resourceTopicRuleDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func expandPutItemInput(tfList []interface{}) *iot.PutItemInput { +func findTopicRuleByName(ctx context.Context, conn *iot.Client, name string) (*iot.GetTopicRuleOutput, error) { + // GetTopicRule returns unhelpful errors such as + // "An error occurred (UnauthorizedException) when calling the GetTopicRule operation: Access to topic rule 'xxxxxxxx' was denied" + // when querying for a rule that doesn't exist. + inputL := &iot.ListTopicRulesInput{} + var rule *awstypes.TopicRuleListItem + + pages := iot.NewListTopicRulesPaginator(conn, inputL) +pageLoop: + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.Rules { + v := v + if aws.ToString(v.RuleName) == name { + rule = &v + break pageLoop + } + } + } + + if rule == nil { + return nil, tfresource.NewEmptyResultError(nil) + } + + inputG := &iot.GetTopicRuleInput{ + RuleName: aws.String(name), + } + + output, err := conn.GetTopicRule(ctx, inputG) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: inputG, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(inputG) + } + + return output, nil +} + +func expandPutItemInput(tfList []interface{}) *awstypes.PutItemInput { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.PutItemInput{} + apiObject := &awstypes.PutItemInput{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap[names.AttrTableName].(string); ok && v != "" { @@ -1413,12 +1486,12 @@ func expandPutItemInput(tfList []interface{}) *iot.PutItemInput { return apiObject } -func expandCloudWatchAlarmAction(tfList []interface{}) *iot.CloudwatchAlarmAction { +func expandCloudWatchAlarmAction(tfList []interface{}) *awstypes.CloudwatchAlarmAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.CloudwatchAlarmAction{} + apiObject := &awstypes.CloudwatchAlarmAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["alarm_name"].(string); ok && v != "" { @@ -1440,14 +1513,18 @@ func expandCloudWatchAlarmAction(tfList []interface{}) *iot.CloudwatchAlarmActio return apiObject } -func expandCloudWatchLogsAction(tfList []interface{}) *iot.CloudwatchLogsAction { +func expandCloudWatchLogsAction(tfList []interface{}) *awstypes.CloudwatchLogsAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.CloudwatchLogsAction{} + apiObject := &awstypes.CloudwatchLogsAction{} tfMap := tfList[0].(map[string]interface{}) + if v, ok := tfMap["batch_mode"].(bool); ok { + apiObject.BatchMode = aws.Bool(v) + } + if v, ok := tfMap[names.AttrLogGroupName].(string); ok && v != "" { apiObject.LogGroupName = aws.String(v) } @@ -1459,12 +1536,12 @@ func expandCloudWatchLogsAction(tfList []interface{}) *iot.CloudwatchLogsAction return apiObject } -func expandCloudWatchMetricAction(tfList []interface{}) *iot.CloudwatchMetricAction { +func expandCloudWatchMetricAction(tfList []interface{}) *awstypes.CloudwatchMetricAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.CloudwatchMetricAction{} + apiObject := &awstypes.CloudwatchMetricAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap[names.AttrMetricName].(string); ok && v != "" { @@ -1494,12 +1571,12 @@ func expandCloudWatchMetricAction(tfList []interface{}) *iot.CloudwatchMetricAct return apiObject } -func expandDynamoDBAction(tfList []interface{}) *iot.DynamoDBAction { +func expandDynamoDBAction(tfList []interface{}) *awstypes.DynamoDBAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.DynamoDBAction{} + apiObject := &awstypes.DynamoDBAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["hash_key_field"].(string); ok && v != "" { @@ -1507,7 +1584,7 @@ func expandDynamoDBAction(tfList []interface{}) *iot.DynamoDBAction { } if v, ok := tfMap["hash_key_type"].(string); ok && v != "" { - apiObject.HashKeyType = aws.String(v) + apiObject.HashKeyType = awstypes.DynamoKeyType(v) } if v, ok := tfMap["hash_key_value"].(string); ok && v != "" { @@ -1527,7 +1604,7 @@ func expandDynamoDBAction(tfList []interface{}) *iot.DynamoDBAction { } if v, ok := tfMap["range_key_type"].(string); ok && v != "" { - apiObject.RangeKeyType = aws.String(v) + apiObject.RangeKeyType = awstypes.DynamoKeyType(v) } if v, ok := tfMap["range_key_value"].(string); ok && v != "" { @@ -1545,12 +1622,12 @@ func expandDynamoDBAction(tfList []interface{}) *iot.DynamoDBAction { return apiObject } -func expandDynamoDBv2Action(tfList []interface{}) *iot.DynamoDBv2Action { +func expandDynamoDBv2Action(tfList []interface{}) *awstypes.DynamoDBv2Action { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.DynamoDBv2Action{} + apiObject := &awstypes.DynamoDBv2Action{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["put_item"].([]interface{}); ok { @@ -1564,12 +1641,12 @@ func expandDynamoDBv2Action(tfList []interface{}) *iot.DynamoDBv2Action { return apiObject } -func expandElasticsearchAction(tfList []interface{}) *iot.ElasticsearchAction { +func expandElasticsearchAction(tfList []interface{}) *awstypes.ElasticsearchAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.ElasticsearchAction{} + apiObject := &awstypes.ElasticsearchAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap[names.AttrEndpoint].(string); ok && v != "" { @@ -1595,12 +1672,12 @@ func expandElasticsearchAction(tfList []interface{}) *iot.ElasticsearchAction { return apiObject } -func expandFirehoseAction(tfList []interface{}) *iot.FirehoseAction { +func expandFirehoseAction(tfList []interface{}) *awstypes.FirehoseAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.FirehoseAction{} + apiObject := &awstypes.FirehoseAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["batch_mode"].(bool); ok { @@ -1622,12 +1699,12 @@ func expandFirehoseAction(tfList []interface{}) *iot.FirehoseAction { return apiObject } -func expandHTTPAction(tfList []interface{}) *iot.HttpAction { +func expandHTTPAction(tfList []interface{}) *awstypes.HttpAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.HttpAction{} + apiObject := &awstypes.HttpAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap[names.AttrURL].(string); ok && v != "" { @@ -1639,10 +1716,10 @@ func expandHTTPAction(tfList []interface{}) *iot.HttpAction { } if v, ok := tfMap["http_header"].([]interface{}); ok { - headerObjs := []*iot.HttpActionHeader{} + headerObjs := []awstypes.HttpActionHeader{} for _, val := range v { if m, ok := val.(map[string]interface{}); ok { - headerObj := &iot.HttpActionHeader{} + headerObj := awstypes.HttpActionHeader{} if v, ok := m[names.AttrKey].(string); ok && v != "" { headerObj.Key = aws.String(v) } @@ -1658,12 +1735,12 @@ func expandHTTPAction(tfList []interface{}) *iot.HttpAction { return apiObject } -func expandAnalyticsAction(tfList []interface{}) *iot.IotAnalyticsAction { +func expandAnalyticsAction(tfList []interface{}) *awstypes.IotAnalyticsAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.IotAnalyticsAction{} + apiObject := &awstypes.IotAnalyticsAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["batch_mode"].(bool); ok { @@ -1681,12 +1758,12 @@ func expandAnalyticsAction(tfList []interface{}) *iot.IotAnalyticsAction { return apiObject } -func expandEventsAction(tfList []interface{}) *iot.IotEventsAction { +func expandEventsAction(tfList []interface{}) *awstypes.IotEventsAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.IotEventsAction{} + apiObject := &awstypes.IotEventsAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["batch_mode"].(bool); ok { @@ -1708,16 +1785,16 @@ func expandEventsAction(tfList []interface{}) *iot.IotEventsAction { return apiObject } -func expandKafkaAction(tfList []interface{}) *iot.KafkaAction { +func expandKafkaAction(tfList []interface{}) *awstypes.KafkaAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.KafkaAction{} + apiObject := &awstypes.KafkaAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["client_properties"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.ClientProperties = flex.ExpandStringMap(v) + apiObject.ClientProperties = flex.ExpandStringValueMap(v) } if v, ok := tfMap[names.AttrDestinationARN].(string); ok && v != "" { @@ -1740,19 +1817,19 @@ func expandKafkaAction(tfList []interface{}) *iot.KafkaAction { apiObject.Topic = aws.String(v) } - if reflect.DeepEqual(&iot.KafkaAction{}, apiObject) { + if reflect.DeepEqual(&awstypes.KafkaAction{}, apiObject) { return nil } return apiObject } -func expandKafkaHeader(tfList []interface{}) []*iot.KafkaActionHeader { - var apiObjects []*iot.KafkaActionHeader +func expandKafkaHeader(tfList []interface{}) []awstypes.KafkaActionHeader { + var apiObjects []awstypes.KafkaActionHeader for _, elem := range tfList { tfMap := elem.(map[string]interface{}) - apiObject := &iot.KafkaActionHeader{} + apiObject := awstypes.KafkaActionHeader{} if v, ok := tfMap[names.AttrKey].(string); ok && v != "" { apiObject.Key = aws.String(v) } @@ -1767,12 +1844,12 @@ func expandKafkaHeader(tfList []interface{}) []*iot.KafkaActionHeader { return apiObjects } -func expandKinesisAction(tfList []interface{}) *iot.KinesisAction { +func expandKinesisAction(tfList []interface{}) *awstypes.KinesisAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.KinesisAction{} + apiObject := &awstypes.KinesisAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["partition_key"].(string); ok && v != "" { @@ -1790,12 +1867,12 @@ func expandKinesisAction(tfList []interface{}) *iot.KinesisAction { return apiObject } -func expandLambdaAction(tfList []interface{}) *iot.LambdaAction { +func expandLambdaAction(tfList []interface{}) *awstypes.LambdaAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.LambdaAction{} + apiObject := &awstypes.LambdaAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap[names.AttrFunctionARN].(string); ok && v != "" { @@ -1805,16 +1882,16 @@ func expandLambdaAction(tfList []interface{}) *iot.LambdaAction { return apiObject } -func expandRepublishAction(tfList []interface{}) *iot.RepublishAction { +func expandRepublishAction(tfList []interface{}) *awstypes.RepublishAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.RepublishAction{} + apiObject := &awstypes.RepublishAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["qos"].(int); ok { - apiObject.Qos = aws.Int64(int64(v)) + apiObject.Qos = aws.Int32(int32(v)) } if v, ok := tfMap[names.AttrRoleARN].(string); ok && v != "" { @@ -1828,12 +1905,12 @@ func expandRepublishAction(tfList []interface{}) *iot.RepublishAction { return apiObject } -func expandS3Action(tfList []interface{}) *iot.S3Action { +func expandS3Action(tfList []interface{}) *awstypes.S3Action { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.S3Action{} + apiObject := &awstypes.S3Action{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap[names.AttrBucketName].(string); ok && v != "" { @@ -1841,7 +1918,7 @@ func expandS3Action(tfList []interface{}) *iot.S3Action { } if v, ok := tfMap["canned_acl"].(string); ok && v != "" { - apiObject.CannedAcl = aws.String(v) + apiObject.CannedAcl = awstypes.CannedAccessControlList(v) } if v, ok := tfMap[names.AttrKey].(string); ok && v != "" { @@ -1855,16 +1932,16 @@ func expandS3Action(tfList []interface{}) *iot.S3Action { return apiObject } -func expandSNSAction(tfList []interface{}) *iot.SnsAction { +func expandSNSAction(tfList []interface{}) *awstypes.SnsAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.SnsAction{} + apiObject := &awstypes.SnsAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["message_format"].(string); ok && v != "" { - apiObject.MessageFormat = aws.String(v) + apiObject.MessageFormat = awstypes.MessageFormat(v) } if v, ok := tfMap[names.AttrRoleARN].(string); ok && v != "" { @@ -1878,12 +1955,12 @@ func expandSNSAction(tfList []interface{}) *iot.SnsAction { return apiObject } -func expandSQSAction(tfList []interface{}) *iot.SqsAction { +func expandSQSAction(tfList []interface{}) *awstypes.SqsAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.SqsAction{} + apiObject := &awstypes.SqsAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["queue_url"].(string); ok && v != "" { @@ -1901,12 +1978,12 @@ func expandSQSAction(tfList []interface{}) *iot.SqsAction { return apiObject } -func expandStepFunctionsAction(tfList []interface{}) *iot.StepFunctionsAction { +func expandStepFunctionsAction(tfList []interface{}) *awstypes.StepFunctionsAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.StepFunctionsAction{} + apiObject := &awstypes.StepFunctionsAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap["execution_name_prefix"].(string); ok && v != "" { @@ -1924,12 +2001,12 @@ func expandStepFunctionsAction(tfList []interface{}) *iot.StepFunctionsAction { return apiObject } -func expandTimestreamAction(tfList []interface{}) *iot.TimestreamAction { +func expandTimestreamAction(tfList []interface{}) *awstypes.TimestreamAction { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.TimestreamAction{} + apiObject := &awstypes.TimestreamAction{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap[names.AttrDatabaseName].(string); ok && v != "" { @@ -1955,15 +2032,15 @@ func expandTimestreamAction(tfList []interface{}) *iot.TimestreamAction { return apiObject } -func expandTimestreamDimensions(tfSet *schema.Set) []*iot.TimestreamDimension { +func expandTimestreamDimensions(tfSet *schema.Set) []awstypes.TimestreamDimension { if tfSet == nil || tfSet.Len() == 0 { return nil } - apiObjects := make([]*iot.TimestreamDimension, tfSet.Len()) + apiObjects := make([]awstypes.TimestreamDimension, tfSet.Len()) for i, elem := range tfSet.List() { if tfMap, ok := elem.(map[string]interface{}); ok { - apiObject := &iot.TimestreamDimension{} + apiObject := awstypes.TimestreamDimension{} if v, ok := tfMap[names.AttrName].(string); ok && v != "" { apiObject.Name = aws.String(v) @@ -1980,12 +2057,12 @@ func expandTimestreamDimensions(tfSet *schema.Set) []*iot.TimestreamDimension { return apiObjects } -func expandTimestreamTimestamp(tfList []interface{}) *iot.TimestreamTimestamp { +func expandTimestreamTimestamp(tfList []interface{}) *awstypes.TimestreamTimestamp { if len(tfList) == 0 || tfList[0] == nil { return nil } - apiObject := &iot.TimestreamTimestamp{} + apiObject := &awstypes.TimestreamTimestamp{} tfMap := tfList[0].(map[string]interface{}) if v, ok := tfMap[names.AttrUnit].(string); ok && v != "" { @@ -1999,8 +2076,8 @@ func expandTimestreamTimestamp(tfList []interface{}) *iot.TimestreamTimestamp { return apiObject } -func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { - var actions []*iot.Action +func expandTopicRulePayload(d *schema.ResourceData) *awstypes.TopicRulePayload { + var actions []awstypes.Action // Legacy root attribute handling for _, tfMapRaw := range d.Get("cloudwatch_alarm").(*schema.Set).List() { @@ -2010,7 +2087,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{CloudwatchAlarm: action}) + actions = append(actions, awstypes.Action{CloudwatchAlarm: action}) } // Legacy root attribute handling @@ -2021,7 +2098,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{CloudwatchLogs: action}) + actions = append(actions, awstypes.Action{CloudwatchLogs: action}) } // Legacy root attribute handling @@ -2032,7 +2109,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{CloudwatchMetric: action}) + actions = append(actions, awstypes.Action{CloudwatchMetric: action}) } // Legacy root attribute handling @@ -2043,7 +2120,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{DynamoDB: action}) + actions = append(actions, awstypes.Action{DynamoDB: action}) } // Legacy root attribute handling @@ -2054,7 +2131,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{DynamoDBv2: action}) + actions = append(actions, awstypes.Action{DynamoDBv2: action}) } // Legacy root attribute handling @@ -2065,7 +2142,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Elasticsearch: action}) + actions = append(actions, awstypes.Action{Elasticsearch: action}) } // Legacy root attribute handling @@ -2076,7 +2153,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Firehose: action}) + actions = append(actions, awstypes.Action{Firehose: action}) } // Legacy root attribute handling @@ -2087,7 +2164,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Http: action}) + actions = append(actions, awstypes.Action{Http: action}) } // Legacy root attribute handling @@ -2098,7 +2175,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{IotAnalytics: action}) + actions = append(actions, awstypes.Action{IotAnalytics: action}) } // Legacy root attribute handling @@ -2109,7 +2186,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{IotEvents: action}) + actions = append(actions, awstypes.Action{IotEvents: action}) } // Legacy root attribute handling @@ -2120,7 +2197,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Kafka: action}) + actions = append(actions, awstypes.Action{Kafka: action}) } // Legacy root attribute handling @@ -2131,7 +2208,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Kinesis: action}) + actions = append(actions, awstypes.Action{Kinesis: action}) } // Legacy root attribute handling @@ -2142,7 +2219,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Lambda: action}) + actions = append(actions, awstypes.Action{Lambda: action}) } // Legacy root attribute handling @@ -2153,7 +2230,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Republish: action}) + actions = append(actions, awstypes.Action{Republish: action}) } // Legacy root attribute handling @@ -2164,7 +2241,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{S3: action}) + actions = append(actions, awstypes.Action{S3: action}) } // Legacy root attribute handling @@ -2175,7 +2252,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Sns: action}) + actions = append(actions, awstypes.Action{Sns: action}) } // Legacy root attribute handling @@ -2186,7 +2263,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Sqs: action}) + actions = append(actions, awstypes.Action{Sqs: action}) } // Legacy root attribute handling @@ -2197,7 +2274,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{StepFunctions: action}) + actions = append(actions, awstypes.Action{StepFunctions: action}) } // Legacy root attribute handling @@ -2208,28 +2285,28 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - actions = append(actions, &iot.Action{Timestream: action}) + actions = append(actions, awstypes.Action{Timestream: action}) } // Prevent sending empty Actions: // - missing required field, CreateTopicRuleInput.TopicRulePayload.Actions if len(actions) == 0 { - actions = []*iot.Action{} + actions = []awstypes.Action{} } - var iotErrorAction *iot.Action - errorAction := d.Get("error_action").([]interface{}) - if len(errorAction) > 0 { + var iotErrorAction *awstypes.Action + if errorAction := d.Get("error_action").([]interface{}); len(errorAction) > 0 { for k, v := range errorAction[0].(map[string]interface{}) { switch k { case "cloudwatch_alarm": for _, tfMapRaw := range v.([]interface{}) { action := expandCloudWatchAlarmAction([]interface{}{tfMapRaw}) + if action == nil { continue } - iotErrorAction = &iot.Action{CloudwatchAlarm: action} + iotErrorAction = &awstypes.Action{CloudwatchAlarm: action} } case "cloudwatch_logs": for _, tfMapRaw := range v.([]interface{}) { @@ -2239,7 +2316,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{CloudwatchLogs: action} + iotErrorAction = &awstypes.Action{CloudwatchLogs: action} } case "cloudwatch_metric": for _, tfMapRaw := range v.([]interface{}) { @@ -2249,7 +2326,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{CloudwatchMetric: action} + iotErrorAction = &awstypes.Action{CloudwatchMetric: action} } case "dynamodb": for _, tfMapRaw := range v.([]interface{}) { @@ -2259,7 +2336,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{DynamoDB: action} + iotErrorAction = &awstypes.Action{DynamoDB: action} } case "dynamodbv2": for _, tfMapRaw := range v.([]interface{}) { @@ -2269,7 +2346,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{DynamoDBv2: action} + iotErrorAction = &awstypes.Action{DynamoDBv2: action} } case "elasticsearch": for _, tfMapRaw := range v.([]interface{}) { @@ -2279,7 +2356,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Elasticsearch: action} + iotErrorAction = &awstypes.Action{Elasticsearch: action} } case "firehose": for _, tfMapRaw := range v.([]interface{}) { @@ -2289,7 +2366,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Firehose: action} + iotErrorAction = &awstypes.Action{Firehose: action} } case "http": for _, tfMapRaw := range v.([]interface{}) { @@ -2299,7 +2376,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Http: action} + iotErrorAction = &awstypes.Action{Http: action} } case "iot_analytics": for _, tfMapRaw := range v.([]interface{}) { @@ -2309,7 +2386,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{IotAnalytics: action} + iotErrorAction = &awstypes.Action{IotAnalytics: action} } case "iot_events": for _, tfMapRaw := range v.([]interface{}) { @@ -2319,7 +2396,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{IotEvents: action} + iotErrorAction = &awstypes.Action{IotEvents: action} } case "kafka": for _, tfMapRaw := range v.([]interface{}) { @@ -2329,7 +2406,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Kafka: action} + iotErrorAction = &awstypes.Action{Kafka: action} } case "kinesis": for _, tfMapRaw := range v.([]interface{}) { @@ -2339,7 +2416,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Kinesis: action} + iotErrorAction = &awstypes.Action{Kinesis: action} } case "lambda": for _, tfMapRaw := range v.([]interface{}) { @@ -2349,7 +2426,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Lambda: action} + iotErrorAction = &awstypes.Action{Lambda: action} } case "republish": for _, tfMapRaw := range v.([]interface{}) { @@ -2359,7 +2436,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Republish: action} + iotErrorAction = &awstypes.Action{Republish: action} } case "s3": for _, tfMapRaw := range v.([]interface{}) { @@ -2369,7 +2446,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{S3: action} + iotErrorAction = &awstypes.Action{S3: action} } case "sns": for _, tfMapRaw := range v.([]interface{}) { @@ -2379,7 +2456,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Sns: action} + iotErrorAction = &awstypes.Action{Sns: action} } case "sqs": for _, tfMapRaw := range v.([]interface{}) { @@ -2389,7 +2466,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Sqs: action} + iotErrorAction = &awstypes.Action{Sqs: action} } case "step_functions": for _, tfMapRaw := range v.([]interface{}) { @@ -2399,7 +2476,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{StepFunctions: action} + iotErrorAction = &awstypes.Action{StepFunctions: action} } case "timestream": for _, tfMapRaw := range v.([]interface{}) { @@ -2409,13 +2486,13 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { continue } - iotErrorAction = &iot.Action{Timestream: action} + iotErrorAction = &awstypes.Action{Timestream: action} } } } } - return &iot.TopicRulePayload{ + return &awstypes.TopicRulePayload{ Actions: actions, AwsIotSqlVersion: aws.String(d.Get("sql_version").(string)), Description: aws.String(d.Get(names.AttrDescription).(string)), @@ -2425,7 +2502,7 @@ func expandTopicRulePayload(d *schema.ResourceData) *iot.TopicRulePayload { } } -func flattenCloudWatchAlarmAction(apiObject *iot.CloudwatchAlarmAction) []interface{} { +func flattenCloudWatchAlarmAction(apiObject *awstypes.CloudwatchAlarmAction) []interface{} { if apiObject == nil { return nil } @@ -2433,94 +2510,86 @@ func flattenCloudWatchAlarmAction(apiObject *iot.CloudwatchAlarmAction) []interf tfMap := make(map[string]interface{}) if v := apiObject.AlarmName; v != nil { - tfMap["alarm_name"] = aws.StringValue(v) + tfMap["alarm_name"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.StateReason; v != nil { - tfMap["state_reason"] = aws.StringValue(v) + tfMap["state_reason"] = aws.ToString(v) } if v := apiObject.StateValue; v != nil { - tfMap["state_value"] = aws.StringValue(v) + tfMap["state_value"] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenCloudWatchAlarmActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) +func flattenCloudWatchAlarmActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - for _, action := range actions { - if action == nil { - continue - } - - if v := action.CloudwatchAlarm; v != nil { - results = append(results, flattenCloudWatchAlarmAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.CloudwatchAlarm; v != nil { + tfList = append(tfList, flattenCloudWatchAlarmAction(v)...) } } - return results + return tfList } -func flattenCloudWatchLogsAction(apiObject *iot.CloudwatchLogsAction) []interface{} { +func flattenCloudWatchLogsAction(apiObject *awstypes.CloudwatchLogsAction) []interface{} { if apiObject == nil { return nil } tfMap := make(map[string]interface{}) + if v := apiObject.BatchMode; v != nil { + tfMap["batch_mode"] = aws.ToBool(v) + } + if v := apiObject.LogGroupName; v != nil { - tfMap[names.AttrLogGroupName] = aws.StringValue(v) + tfMap[names.AttrLogGroupName] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenCloudWatchLogsActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenCloudWatchLogsActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.CloudwatchLogs; v != nil { - results = append(results, flattenCloudWatchLogsAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.CloudwatchLogs; v != nil { + tfList = append(tfList, flattenCloudWatchLogsAction(v)...) } } - return results + return tfList } // Legacy root attribute handling -func flattenCloudWatchMetricActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenCloudWatchMetricActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.CloudwatchMetric; v != nil { - results = append(results, flattenCloudWatchMetricAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.CloudwatchMetric; v != nil { + tfList = append(tfList, flattenCloudWatchMetricAction(v)...) } } - return results + return tfList } -func flattenCloudWatchMetricAction(apiObject *iot.CloudwatchMetricAction) []interface{} { +func flattenCloudWatchMetricAction(apiObject *awstypes.CloudwatchMetricAction) []interface{} { if apiObject == nil { return nil } @@ -2528,50 +2597,46 @@ func flattenCloudWatchMetricAction(apiObject *iot.CloudwatchMetricAction) []inte tfMap := make(map[string]interface{}) if v := apiObject.MetricName; v != nil { - tfMap[names.AttrMetricName] = aws.StringValue(v) + tfMap[names.AttrMetricName] = aws.ToString(v) } if v := apiObject.MetricNamespace; v != nil { - tfMap["metric_namespace"] = aws.StringValue(v) + tfMap["metric_namespace"] = aws.ToString(v) } if v := apiObject.MetricTimestamp; v != nil { - tfMap["metric_timestamp"] = aws.StringValue(v) + tfMap["metric_timestamp"] = aws.ToString(v) } if v := apiObject.MetricUnit; v != nil { - tfMap["metric_unit"] = aws.StringValue(v) + tfMap["metric_unit"] = aws.ToString(v) } if v := apiObject.MetricValue; v != nil { - tfMap["metric_value"] = aws.StringValue(v) + tfMap["metric_value"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenDynamoDBActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) +func flattenDynamoDBActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - for _, action := range actions { - if action == nil { - continue - } - - if v := action.DynamoDB; v != nil { - results = append(results, flattenDynamoDBAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.DynamoDB; v != nil { + tfList = append(tfList, flattenDynamoDBAction(v)...) } } - return results + return tfList } -func flattenDynamoDBAction(apiObject *iot.DynamoDBAction) []interface{} { +func flattenDynamoDBAction(apiObject *awstypes.DynamoDBAction) []interface{} { if apiObject == nil { return nil } @@ -2579,66 +2644,58 @@ func flattenDynamoDBAction(apiObject *iot.DynamoDBAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.HashKeyField; v != nil { - tfMap["hash_key_field"] = aws.StringValue(v) + tfMap["hash_key_field"] = aws.ToString(v) } - if v := apiObject.HashKeyType; v != nil { - tfMap["hash_key_type"] = aws.StringValue(v) - } + tfMap["hash_key_type"] = apiObject.HashKeyType if v := apiObject.HashKeyValue; v != nil { - tfMap["hash_key_value"] = aws.StringValue(v) + tfMap["hash_key_value"] = aws.ToString(v) } if v := apiObject.PayloadField; v != nil { - tfMap["payload_field"] = aws.StringValue(v) + tfMap["payload_field"] = aws.ToString(v) } if v := apiObject.Operation; v != nil { - tfMap["operation"] = aws.StringValue(v) + tfMap["operation"] = aws.ToString(v) } if v := apiObject.RangeKeyField; v != nil { - tfMap["range_key_field"] = aws.StringValue(v) + tfMap["range_key_field"] = aws.ToString(v) } - if v := apiObject.RangeKeyType; v != nil { - tfMap["range_key_type"] = aws.StringValue(v) - } + tfMap["range_key_type"] = apiObject.RangeKeyType if v := apiObject.RangeKeyValue; v != nil { - tfMap["range_key_value"] = aws.StringValue(v) + tfMap["range_key_value"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.TableName; v != nil { - tfMap[names.AttrTableName] = aws.StringValue(v) + tfMap[names.AttrTableName] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenDynamoDBv2Actions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenDynamoDBv2Actions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.DynamoDBv2; v != nil { - results = append(results, flattenDynamoDBv2Action(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.DynamoDBv2; v != nil { + tfList = append(tfList, flattenDynamoDBv2Action(v)...) } } - return results + return tfList } -func flattenDynamoDBv2Action(apiObject *iot.DynamoDBv2Action) []interface{} { +func flattenDynamoDBv2Action(apiObject *awstypes.DynamoDBv2Action) []interface{} { if apiObject == nil { return nil } @@ -2650,30 +2707,26 @@ func flattenDynamoDBv2Action(apiObject *iot.DynamoDBv2Action) []interface{} { } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenElasticsearchActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) +func flattenElasticsearchActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - for _, action := range actions { - if action == nil { - continue - } - - if v := action.Elasticsearch; v != nil { - results = append(results, flattenElasticsearchAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Elasticsearch; v != nil { + tfList = append(tfList, flattenElasticsearchAction(v)...) } } - return results + return tfList } -func flattenElasticsearchAction(apiObject *iot.ElasticsearchAction) []interface{} { +func flattenElasticsearchAction(apiObject *awstypes.ElasticsearchAction) []interface{} { if apiObject == nil { return nil } @@ -2681,46 +2734,42 @@ func flattenElasticsearchAction(apiObject *iot.ElasticsearchAction) []interface{ tfMap := make(map[string]interface{}) if v := apiObject.Endpoint; v != nil { - tfMap[names.AttrEndpoint] = aws.StringValue(v) + tfMap[names.AttrEndpoint] = aws.ToString(v) } if v := apiObject.Id; v != nil { - tfMap[names.AttrID] = aws.StringValue(v) + tfMap[names.AttrID] = aws.ToString(v) } if v := apiObject.Index; v != nil { - tfMap["index"] = aws.StringValue(v) + tfMap["index"] = aws.ToString(v) } if v := apiObject.Type; v != nil { - tfMap[names.AttrType] = aws.StringValue(v) + tfMap[names.AttrType] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenFirehoseActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenFirehoseActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Firehose; v != nil { - results = append(results, flattenFirehoseAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Firehose; v != nil { + tfList = append(tfList, flattenFirehoseAction(v)...) } } - return results + return tfList } -func flattenFirehoseAction(apiObject *iot.FirehoseAction) []interface{} { +func flattenFirehoseAction(apiObject *awstypes.FirehoseAction) []interface{} { if apiObject == nil { return nil } @@ -2728,42 +2777,38 @@ func flattenFirehoseAction(apiObject *iot.FirehoseAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.BatchMode; v != nil { - tfMap["batch_mode"] = aws.BoolValue(v) + tfMap["batch_mode"] = aws.ToBool(v) } if v := apiObject.DeliveryStreamName; v != nil { - tfMap["delivery_stream_name"] = aws.StringValue(v) + tfMap["delivery_stream_name"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.Separator; v != nil { - tfMap["separator"] = aws.StringValue(v) + tfMap["separator"] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenHTTPActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenHTTPActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Http; v != nil { - results = append(results, flattenHTTPAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Http; v != nil { + tfList = append(tfList, flattenHTTPAction(v)...) } } - return results + return tfList } -func flattenHTTPAction(apiObject *iot.HttpAction) []interface{} { +func flattenHTTPAction(apiObject *awstypes.HttpAction) []interface{} { if apiObject == nil { return nil } @@ -2771,11 +2816,11 @@ func flattenHTTPAction(apiObject *iot.HttpAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.Url; v != nil { - tfMap[names.AttrURL] = aws.StringValue(v) + tfMap[names.AttrURL] = aws.ToString(v) } if v := apiObject.ConfirmationUrl; v != nil { - tfMap["confirmation_url"] = aws.StringValue(v) + tfMap["confirmation_url"] = aws.ToString(v) } if v := apiObject.Headers; v != nil { @@ -2783,8 +2828,8 @@ func flattenHTTPAction(apiObject *iot.HttpAction) []interface{} { for _, h := range v { m := map[string]string{ - names.AttrKey: aws.StringValue(h.Key), - names.AttrValue: aws.StringValue(h.Value), + names.AttrKey: aws.ToString(h.Key), + names.AttrValue: aws.ToString(h.Value), } headers = append(headers, m) } @@ -2795,23 +2840,19 @@ func flattenHTTPAction(apiObject *iot.HttpAction) []interface{} { } // Legacy root attribute handling -func flattenAnalyticsActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenAnalyticsActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.IotAnalytics; v != nil { - results = append(results, flattenAnalyticsAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.IotAnalytics; v != nil { + tfList = append(tfList, flattenAnalyticsAction(v)...) } } - return results + return tfList } -func flattenAnalyticsAction(apiObject *iot.IotAnalyticsAction) []interface{} { +func flattenAnalyticsAction(apiObject *awstypes.IotAnalyticsAction) []interface{} { if apiObject == nil { return nil } @@ -2819,38 +2860,34 @@ func flattenAnalyticsAction(apiObject *iot.IotAnalyticsAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.BatchMode; v != nil { - tfMap["batch_mode"] = aws.BoolValue(v) + tfMap["batch_mode"] = aws.ToBool(v) } if v := apiObject.ChannelName; v != nil { - tfMap["channel_name"] = aws.StringValue(v) + tfMap["channel_name"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenEventsActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenEventsActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.IotEvents; v != nil { - results = append(results, flattenEventsAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.IotEvents; v != nil { + tfList = append(tfList, flattenEventsAction(v)...) } } - return results + return tfList } -func flattenEventsAction(apiObject *iot.IotEventsAction) []interface{} { +func flattenEventsAction(apiObject *awstypes.IotEventsAction) []interface{} { if apiObject == nil { return nil } @@ -2858,42 +2895,38 @@ func flattenEventsAction(apiObject *iot.IotEventsAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.BatchMode; v != nil { - tfMap["batch_mode"] = aws.BoolValue(v) + tfMap["batch_mode"] = aws.ToBool(v) } if v := apiObject.InputName; v != nil { - tfMap["input_name"] = aws.StringValue(v) + tfMap["input_name"] = aws.ToString(v) } if v := apiObject.MessageId; v != nil { - tfMap["message_id"] = aws.StringValue(v) + tfMap["message_id"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenKafkaActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenKafkaActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Kafka; v != nil { - results = append(results, flattenKafkaAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Kafka; v != nil { + tfList = append(tfList, flattenKafkaAction(v)...) } } - return results + return tfList } -func flattenKafkaAction(apiObject *iot.KafkaAction) []interface{} { +func flattenKafkaAction(apiObject *awstypes.KafkaAction) []interface{} { if apiObject == nil { return nil } @@ -2901,11 +2934,11 @@ func flattenKafkaAction(apiObject *iot.KafkaAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.ClientProperties; v != nil { - tfMap["client_properties"] = aws.StringValueMap(v) + tfMap["client_properties"] = aws.StringMap(v) } if v := apiObject.DestinationArn; v != nil { - tfMap[names.AttrDestinationARN] = aws.StringValue(v) + tfMap[names.AttrDestinationARN] = aws.ToString(v) } if v := apiObject.Headers; v != nil { @@ -2913,60 +2946,53 @@ func flattenKafkaAction(apiObject *iot.KafkaAction) []interface{} { } if v := apiObject.Key; v != nil { - tfMap[names.AttrKey] = aws.StringValue(v) + tfMap[names.AttrKey] = aws.ToString(v) } if v := apiObject.Partition; v != nil { - tfMap["partition"] = aws.StringValue(v) + tfMap["partition"] = aws.ToString(v) } if v := apiObject.Topic; v != nil { - tfMap["topic"] = aws.StringValue(v) + tfMap["topic"] = aws.ToString(v) } return []interface{}{tfMap} } -func flattenKafkaHeaders(apiObjects []*iot.KafkaActionHeader) []interface{} { +func flattenKafkaHeaders(apiObjects []awstypes.KafkaActionHeader) []interface{} { results := make([]interface{}, 0) for _, apiObject := range apiObjects { - if apiObject != nil { - tfMap := make(map[string]interface{}) - - if v := apiObject.Key; v != nil { - tfMap[names.AttrKey] = aws.StringValue(v) - } + tfMap := make(map[string]interface{}) - if v := apiObject.Value; v != nil { - tfMap[names.AttrValue] = aws.StringValue(v) - } + if v := apiObject.Key; v != nil { + tfMap[names.AttrKey] = aws.ToString(v) + } - results = append(results, tfMap) + if v := apiObject.Value; v != nil { + tfMap[names.AttrValue] = aws.ToString(v) } + results = append(results, tfMap) } return results } // Legacy root attribute handling -func flattenKinesisActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenKinesisActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Kinesis; v != nil { - results = append(results, flattenKinesisAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Kinesis; v != nil { + tfList = append(tfList, flattenKinesisAction(v)...) } } - return results + return tfList } -func flattenKinesisAction(apiObject *iot.KinesisAction) []interface{} { +func flattenKinesisAction(apiObject *awstypes.KinesisAction) []interface{} { if apiObject == nil { return nil } @@ -2974,38 +3000,34 @@ func flattenKinesisAction(apiObject *iot.KinesisAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.PartitionKey; v != nil { - tfMap["partition_key"] = aws.StringValue(v) + tfMap["partition_key"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.StreamName; v != nil { - tfMap["stream_name"] = aws.StringValue(v) + tfMap["stream_name"] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenLambdaActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenLambdaActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Lambda; v != nil { - results = append(results, flattenLambdaAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Lambda; v != nil { + tfList = append(tfList, flattenLambdaAction(v)...) } } - return results + return tfList } -func flattenLambdaAction(apiObject *iot.LambdaAction) []interface{} { +func flattenLambdaAction(apiObject *awstypes.LambdaAction) []interface{} { if apiObject == nil { return nil } @@ -3013,13 +3035,13 @@ func flattenLambdaAction(apiObject *iot.LambdaAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.FunctionArn; v != nil { - tfMap[names.AttrFunctionARN] = aws.StringValue(v) + tfMap[names.AttrFunctionARN] = aws.ToString(v) } return []interface{}{tfMap} } -func flattenPutItemInput(apiObject *iot.PutItemInput) []interface{} { +func flattenPutItemInput(apiObject *awstypes.PutItemInput) []interface{} { if apiObject == nil { return nil } @@ -3027,30 +3049,26 @@ func flattenPutItemInput(apiObject *iot.PutItemInput) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.TableName; v != nil { - tfMap[names.AttrTableName] = aws.StringValue(v) + tfMap[names.AttrTableName] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenRepublishActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenRepublishActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Republish; v != nil { - results = append(results, flattenRepublishAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Republish; v != nil { + tfList = append(tfList, flattenRepublishAction(v)...) } } - return results + return tfList } -func flattenRepublishAction(apiObject *iot.RepublishAction) []interface{} { +func flattenRepublishAction(apiObject *awstypes.RepublishAction) []interface{} { if apiObject == nil { return nil } @@ -3058,38 +3076,34 @@ func flattenRepublishAction(apiObject *iot.RepublishAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.Qos; v != nil { - tfMap["qos"] = aws.Int64Value(v) + tfMap["qos"] = aws.ToInt32(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.Topic; v != nil { - tfMap["topic"] = aws.StringValue(v) + tfMap["topic"] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenS3Actions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenS3Actions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.S3; v != nil { - results = append(results, flattenS3Action(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.S3; v != nil { + tfList = append(tfList, flattenS3Action(v)...) } } - return results + return tfList } -func flattenS3Action(apiObject *iot.S3Action) []interface{} { +func flattenS3Action(apiObject *awstypes.S3Action) []interface{} { if apiObject == nil { return nil } @@ -3097,81 +3111,69 @@ func flattenS3Action(apiObject *iot.S3Action) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.BucketName; v != nil { - tfMap[names.AttrBucketName] = aws.StringValue(v) + tfMap[names.AttrBucketName] = aws.ToString(v) } - if v := apiObject.CannedAcl; v != nil { - tfMap["canned_acl"] = aws.StringValue(v) - } + tfMap["canned_acl"] = apiObject.CannedAcl if v := apiObject.Key; v != nil { - tfMap[names.AttrKey] = aws.StringValue(v) + tfMap[names.AttrKey] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenSNSActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenSNSActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Sns; v != nil { - results = append(results, flattenSNSAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Sns; v != nil { + tfList = append(tfList, flattenSNSAction(v)...) } } - return results + return tfList } -func flattenSNSAction(apiObject *iot.SnsAction) []interface{} { +func flattenSNSAction(apiObject *awstypes.SnsAction) []interface{} { if apiObject == nil { return nil } tfMap := make(map[string]interface{}) - if v := apiObject.MessageFormat; v != nil { - tfMap["message_format"] = aws.StringValue(v) - } + tfMap["message_format"] = apiObject.MessageFormat if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.TargetArn; v != nil { - tfMap[names.AttrTargetARN] = aws.StringValue(v) + tfMap[names.AttrTargetARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenSQSActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenSQSActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Sqs; v != nil { - results = append(results, flattenSQSAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Sqs; v != nil { + tfList = append(tfList, flattenSQSAction(v)...) } } - return results + return tfList } -func flattenSQSAction(apiObject *iot.SqsAction) []interface{} { +func flattenSQSAction(apiObject *awstypes.SqsAction) []interface{} { if apiObject == nil { return nil } @@ -3179,38 +3181,34 @@ func flattenSQSAction(apiObject *iot.SqsAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.QueueUrl; v != nil { - tfMap["queue_url"] = aws.StringValue(v) + tfMap["queue_url"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.UseBase64; v != nil { - tfMap["use_base64"] = aws.BoolValue(v) + tfMap["use_base64"] = aws.ToBool(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenStepFunctionsActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenStepFunctionsActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.StepFunctions; v != nil { - results = append(results, flattenStepFunctionsAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.StepFunctions; v != nil { + tfList = append(tfList, flattenStepFunctionsAction(v)...) } } - return results + return tfList } -func flattenStepFunctionsAction(apiObject *iot.StepFunctionsAction) []interface{} { +func flattenStepFunctionsAction(apiObject *awstypes.StepFunctionsAction) []interface{} { if apiObject == nil { return nil } @@ -3218,38 +3216,34 @@ func flattenStepFunctionsAction(apiObject *iot.StepFunctionsAction) []interface{ tfMap := make(map[string]interface{}) if v := apiObject.ExecutionNamePrefix; v != nil { - tfMap["execution_name_prefix"] = aws.StringValue(v) + tfMap["execution_name_prefix"] = aws.ToString(v) } if v := apiObject.StateMachineName; v != nil { - tfMap["state_machine_name"] = aws.StringValue(v) + tfMap["state_machine_name"] = aws.ToString(v) } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } return []interface{}{tfMap} } // Legacy root attribute handling -func flattenTimestreamActions(actions []*iot.Action) []interface{} { - results := make([]interface{}, 0) - - for _, action := range actions { - if action == nil { - continue - } +func flattenTimestreamActions(apiObjects []awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if v := action.Timestream; v != nil { - results = append(results, flattenTimestreamAction(v)...) + for _, apiObject := range apiObjects { + if v := apiObject.Timestream; v != nil { + tfList = append(tfList, flattenTimestreamAction(v)...) } } - return results + return tfList } -func flattenTimestreamAction(apiObject *iot.TimestreamAction) []interface{} { +func flattenTimestreamAction(apiObject *awstypes.TimestreamAction) []interface{} { if apiObject == nil { return nil } @@ -3257,7 +3251,7 @@ func flattenTimestreamAction(apiObject *iot.TimestreamAction) []interface{} { tfMap := make(map[string]interface{}) if v := apiObject.DatabaseName; v != nil { - tfMap[names.AttrDatabaseName] = aws.StringValue(v) + tfMap[names.AttrDatabaseName] = aws.ToString(v) } if v := apiObject.Dimensions; v != nil { @@ -3265,11 +3259,11 @@ func flattenTimestreamAction(apiObject *iot.TimestreamAction) []interface{} { } if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + tfMap[names.AttrRoleARN] = aws.ToString(v) } if v := apiObject.TableName; v != nil { - tfMap[names.AttrTableName] = aws.StringValue(v) + tfMap[names.AttrTableName] = aws.ToString(v) } if v := apiObject.Timestamp; v != nil { @@ -3279,33 +3273,31 @@ func flattenTimestreamAction(apiObject *iot.TimestreamAction) []interface{} { return []interface{}{tfMap} } -func flattenTimestreamDimensions(apiObjects []*iot.TimestreamDimension) *schema.Set { +func flattenTimestreamDimensions(apiObjects []awstypes.TimestreamDimension) []interface{} { if apiObjects == nil { return nil } - tfSet := schema.NewSet(schema.HashResource(timestreamDimensionResource), []interface{}{}) + tfList := make([]interface{}, 0) for _, apiObject := range apiObjects { - if apiObject != nil { - tfMap := make(map[string]interface{}) - - if v := apiObject.Name; v != nil { - tfMap[names.AttrName] = aws.StringValue(v) - } + tfMap := make(map[string]interface{}) - if v := apiObject.Value; v != nil { - tfMap[names.AttrValue] = aws.StringValue(v) - } + if v := apiObject.Name; v != nil { + tfMap[names.AttrName] = aws.ToString(v) + } - tfSet.Add(tfMap) + if v := apiObject.Value; v != nil { + tfMap[names.AttrValue] = aws.ToString(v) } + + tfList = append(tfList, tfMap) } - return tfSet + return tfList } -func flattenTimestreamTimestamp(apiObject *iot.TimestreamTimestamp) []interface{} { +func flattenTimestreamTimestamp(apiObject *awstypes.TimestreamTimestamp) []interface{} { if apiObject == nil { return nil } @@ -3313,99 +3305,100 @@ func flattenTimestreamTimestamp(apiObject *iot.TimestreamTimestamp) []interface{ tfMap := make(map[string]interface{}) if v := apiObject.Unit; v != nil { - tfMap[names.AttrUnit] = aws.StringValue(v) + tfMap[names.AttrUnit] = aws.ToString(v) } if v := apiObject.Value; v != nil { - tfMap[names.AttrValue] = aws.StringValue(v) + tfMap[names.AttrValue] = aws.ToString(v) } return []interface{}{tfMap} } -func flattenErrorAction(errorAction *iot.Action) []map[string]interface{} { - results := make([]map[string]interface{}, 0) +func flattenErrorAction(apiObject *awstypes.Action) []interface{} { + tfList := make([]interface{}, 0) - if errorAction == nil { - return results + if apiObject == nil { + return nil } - input := []*iot.Action{errorAction} - if errorAction.CloudwatchAlarm != nil { - results = append(results, map[string]interface{}{"cloudwatch_alarm": flattenCloudWatchAlarmActions(input)}) - return results + + input := []awstypes.Action{*apiObject} + if apiObject.CloudwatchAlarm != nil { + tfList = append(tfList, map[string]interface{}{"cloudwatch_alarm": flattenCloudWatchAlarmActions(input)}) + return tfList } - if errorAction.CloudwatchLogs != nil { - results = append(results, map[string]interface{}{names.AttrCloudWatchLogs: flattenCloudWatchLogsActions(input)}) - return results + if apiObject.CloudwatchLogs != nil { + tfList = append(tfList, map[string]interface{}{names.AttrCloudWatchLogs: flattenCloudWatchLogsActions(input)}) + return tfList } - if errorAction.CloudwatchMetric != nil { - results = append(results, map[string]interface{}{"cloudwatch_metric": flattenCloudWatchMetricActions(input)}) - return results + if apiObject.CloudwatchMetric != nil { + tfList = append(tfList, map[string]interface{}{"cloudwatch_metric": flattenCloudWatchMetricActions(input)}) + return tfList } - if errorAction.DynamoDB != nil { - results = append(results, map[string]interface{}{"dynamodb": flattenDynamoDBActions(input)}) - return results + if apiObject.DynamoDB != nil { + tfList = append(tfList, map[string]interface{}{"dynamodb": flattenDynamoDBActions(input)}) + return tfList } - if errorAction.DynamoDBv2 != nil { - results = append(results, map[string]interface{}{"dynamodbv2": flattenDynamoDBv2Actions(input)}) - return results + if apiObject.DynamoDBv2 != nil { + tfList = append(tfList, map[string]interface{}{"dynamodbv2": flattenDynamoDBv2Actions(input)}) + return tfList } - if errorAction.Elasticsearch != nil { - results = append(results, map[string]interface{}{"elasticsearch": flattenElasticsearchActions(input)}) - return results + if apiObject.Elasticsearch != nil { + tfList = append(tfList, map[string]interface{}{"elasticsearch": flattenElasticsearchActions(input)}) + return tfList } - if errorAction.Firehose != nil { - results = append(results, map[string]interface{}{"firehose": flattenFirehoseActions(input)}) - return results + if apiObject.Firehose != nil { + tfList = append(tfList, map[string]interface{}{"firehose": flattenFirehoseActions(input)}) + return tfList } - if errorAction.Http != nil { - results = append(results, map[string]interface{}{"http": flattenHTTPActions(input)}) - return results + if apiObject.Http != nil { + tfList = append(tfList, map[string]interface{}{"http": flattenHTTPActions(input)}) + return tfList } - if errorAction.IotAnalytics != nil { - results = append(results, map[string]interface{}{"iot_analytics": flattenAnalyticsActions(input)}) - return results + if apiObject.IotAnalytics != nil { + tfList = append(tfList, map[string]interface{}{"iot_analytics": flattenAnalyticsActions(input)}) + return tfList } - if errorAction.IotEvents != nil { - results = append(results, map[string]interface{}{"iot_events": flattenEventsActions(input)}) - return results + if apiObject.IotEvents != nil { + tfList = append(tfList, map[string]interface{}{"iot_events": flattenEventsActions(input)}) + return tfList } - if errorAction.Kafka != nil { - results = append(results, map[string]interface{}{"kafka": flattenKafkaActions(input)}) - return results + if apiObject.Kafka != nil { + tfList = append(tfList, map[string]interface{}{"kafka": flattenKafkaActions(input)}) + return tfList } - if errorAction.Kinesis != nil { - results = append(results, map[string]interface{}{"kinesis": flattenKinesisActions(input)}) - return results + if apiObject.Kinesis != nil { + tfList = append(tfList, map[string]interface{}{"kinesis": flattenKinesisActions(input)}) + return tfList } - if errorAction.Lambda != nil { - results = append(results, map[string]interface{}{"lambda": flattenLambdaActions(input)}) - return results + if apiObject.Lambda != nil { + tfList = append(tfList, map[string]interface{}{"lambda": flattenLambdaActions(input)}) + return tfList } - if errorAction.Republish != nil { - results = append(results, map[string]interface{}{"republish": flattenRepublishActions(input)}) - return results + if apiObject.Republish != nil { + tfList = append(tfList, map[string]interface{}{"republish": flattenRepublishActions(input)}) + return tfList } - if errorAction.S3 != nil { - results = append(results, map[string]interface{}{"s3": flattenS3Actions(input)}) - return results + if apiObject.S3 != nil { + tfList = append(tfList, map[string]interface{}{"s3": flattenS3Actions(input)}) + return tfList } - if errorAction.Sns != nil { - results = append(results, map[string]interface{}{"sns": flattenSNSActions(input)}) - return results + if apiObject.Sns != nil { + tfList = append(tfList, map[string]interface{}{"sns": flattenSNSActions(input)}) + return tfList } - if errorAction.Sqs != nil { - results = append(results, map[string]interface{}{"sqs": flattenSQSActions(input)}) - return results + if apiObject.Sqs != nil { + tfList = append(tfList, map[string]interface{}{"sqs": flattenSQSActions(input)}) + return tfList } - if errorAction.StepFunctions != nil { - results = append(results, map[string]interface{}{"step_functions": flattenStepFunctionsActions(input)}) - return results + if apiObject.StepFunctions != nil { + tfList = append(tfList, map[string]interface{}{"step_functions": flattenStepFunctionsActions(input)}) + return tfList } - if errorAction.Timestream != nil { - results = append(results, map[string]interface{}{"timestream": flattenTimestreamActions(input)}) - return results + if apiObject.Timestream != nil { + tfList = append(tfList, map[string]interface{}{"timestream": flattenTimestreamActions(input)}) + return tfList } - return results + return tfList } diff --git a/internal/service/iot/topic_rule_destination.go b/internal/service/iot/topic_rule_destination.go index f16ff561587..d6fb79585f4 100644 --- a/internal/service/iot/topic_rule_destination.go +++ b/internal/service/iot/topic_rule_destination.go @@ -9,13 +9,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iot" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iot" + awstypes "github.com/aws/aws-sdk-go-v2/service/iot/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -23,8 +25,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_iot_topic_rule_destination") -func ResourceTopicRuleDestination() *schema.Resource { +// @SDKResource("aws_iot_topic_rule_destination", name="Topic Rule Destination") +func resourceTopicRuleDestination() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceTopicRuleDestinationCreate, ReadWithoutTimeout: resourceTopicRuleDestinationRead, @@ -90,47 +92,38 @@ func ResourceTopicRuleDestination() *schema.Resource { func resourceTopicRuleDestinationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) input := &iot.CreateTopicRuleDestinationInput{ - DestinationConfiguration: &iot.TopicRuleDestinationConfiguration{}, + DestinationConfiguration: &awstypes.TopicRuleDestinationConfiguration{}, } if v, ok := d.GetOk(names.AttrVPCConfiguration); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { input.DestinationConfiguration.VpcConfiguration = expandVPCDestinationConfiguration(v.([]interface{})[0].(map[string]interface{})) } - log.Printf("[INFO] Creating IoT Topic Rule Destination: %s", input) - outputRaw, err := tfresource.RetryWhen(ctx, propagationTimeout, + outputRaw, err := tfresource.RetryWhenIsA[*awstypes.InvalidRequestException](ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateTopicRuleDestinationWithContext(ctx, input) - }, - func(err error) (bool, error) { - if tfawserr.ErrMessageContains(err, iot.ErrCodeInvalidRequestException, "sts:AssumeRole") || - tfawserr.ErrMessageContains(err, iot.ErrCodeInvalidRequestException, "Missing permission") { - return true, err - } - - return false, err - }, - ) + return conn.CreateTopicRuleDestination(ctx, input) + }) if err != nil { return sdkdiag.AppendErrorf(diags, "creating IoT Topic Rule Destination: %s", err) } - d.SetId(aws.StringValue(outputRaw.(*iot.CreateTopicRuleDestinationOutput).TopicRuleDestination.Arn)) + d.SetId(aws.ToString(outputRaw.(*iot.CreateTopicRuleDestinationOutput).TopicRuleDestination.Arn)) if _, err := waitTopicRuleDestinationCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for IoT Topic Rule Destination (%s) create: %s", d.Id(), err) } if _, ok := d.GetOk(names.AttrEnabled); !ok { - _, err := conn.UpdateTopicRuleDestinationWithContext(ctx, &iot.UpdateTopicRuleDestinationInput{ + input := &iot.UpdateTopicRuleDestinationInput{ Arn: aws.String(d.Id()), - Status: aws.String(iot.TopicRuleDestinationStatusDisabled), - }) + Status: awstypes.TopicRuleDestinationStatusDisabled, + } + + _, err := conn.UpdateTopicRuleDestination(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "disabling IoT Topic Rule Destination (%s): %s", d.Id(), err) @@ -146,10 +139,9 @@ func resourceTopicRuleDestinationCreate(ctx context.Context, d *schema.ResourceD func resourceTopicRuleDestinationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).IoTClient(ctx) - conn := meta.(*conns.AWSClient).IoTConn(ctx) - - output, err := FindTopicRuleDestinationByARN(ctx, conn, d.Id()) + output, err := findTopicRuleDestinationByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] IoT Topic Rule Destination %s not found, removing from state", d.Id()) @@ -162,7 +154,7 @@ func resourceTopicRuleDestinationRead(ctx context.Context, d *schema.ResourceDat } d.Set(names.AttrARN, output.Arn) - d.Set(names.AttrEnabled, aws.StringValue(output.Status) == iot.TopicRuleDestinationStatusEnabled) + d.Set(names.AttrEnabled, (output.Status == awstypes.TopicRuleDestinationStatusEnabled)) if output.VpcProperties != nil { if err := d.Set(names.AttrVPCConfiguration, []interface{}{flattenVPCDestinationProperties(output.VpcProperties)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting vpc_configuration: %s", err) @@ -176,22 +168,21 @@ func resourceTopicRuleDestinationRead(ctx context.Context, d *schema.ResourceDat func resourceTopicRuleDestinationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) if d.HasChange(names.AttrEnabled) { input := &iot.UpdateTopicRuleDestinationInput{ Arn: aws.String(d.Id()), - Status: aws.String(iot.TopicRuleDestinationStatusEnabled), + Status: awstypes.TopicRuleDestinationStatusEnabled, } waiter := waitTopicRuleDestinationEnabled if _, ok := d.GetOk(names.AttrEnabled); !ok { - input.Status = aws.String(iot.TopicRuleDestinationStatusDisabled) + input.Status = awstypes.TopicRuleDestinationStatusDisabled waiter = waitTopicRuleDestinationDisabled } - _, err := conn.UpdateTopicRuleDestinationWithContext(ctx, input) + _, err := conn.UpdateTopicRuleDestination(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating IoT Topic Rule Destination (%s): %s", d.Id(), err) @@ -207,14 +198,20 @@ func resourceTopicRuleDestinationUpdate(ctx context.Context, d *schema.ResourceD func resourceTopicRuleDestinationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).IoTConn(ctx) + conn := meta.(*conns.AWSClient).IoTClient(ctx) log.Printf("[INFO] Deleting IoT Topic Rule Destination: %s", d.Id()) - _, err := conn.DeleteTopicRuleDestinationWithContext(ctx, &iot.DeleteTopicRuleDestinationInput{ + + // DeleteTopicRuleDestination returns unhelpful errors such as + // "UnauthorizedException: Access to TopicRuleDestination 'xxx' was denied" when querying for a rule destination that doesn't exist. + _, err := conn.DeleteTopicRuleDestination(ctx, &iot.DeleteTopicRuleDestinationInput{ Arn: aws.String(d.Id()), }) + if errs.IsA[*awstypes.UnauthorizedException](err) { + return diags + } + if err != nil { return sdkdiag.AppendErrorf(diags, "deleting IoT Topic Rule Destination: %s", err) } @@ -226,61 +223,62 @@ func resourceTopicRuleDestinationDelete(ctx context.Context, d *schema.ResourceD return diags } -func expandVPCDestinationConfiguration(tfMap map[string]interface{}) *iot.VpcDestinationConfiguration { - if tfMap == nil { - return nil - } - - apiObject := &iot.VpcDestinationConfiguration{} +func findTopicRuleDestinationByARN(ctx context.Context, conn *iot.Client, arn string) (*awstypes.TopicRuleDestination, error) { + // GetTopicRuleDestination returns unhelpful errors such as + // "UnauthorizedException: Access to TopicRuleDestination 'arn:aws:iot:us-west-2:123456789012:ruledestination/vpc/f267138a-7383-4670-9e44-a7fe2f48af5e' was denied" + // when querying for a rule destination that doesn't exist. + inputL := &iot.ListTopicRuleDestinationsInput{} + var destination *awstypes.TopicRuleDestinationSummary - if v, ok := tfMap[names.AttrRoleARN].(string); ok && v != "" { - apiObject.RoleArn = aws.String(v) - } + pages := iot.NewListTopicRuleDestinationsPaginator(conn, inputL) +pageLoop: + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - if v, ok := tfMap[names.AttrSecurityGroups].(*schema.Set); ok && v.Len() > 0 { - apiObject.SecurityGroups = flex.ExpandStringSet(v) - } + if err != nil { + return nil, err + } - if v, ok := tfMap[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { - apiObject.SubnetIds = flex.ExpandStringSet(v) + for _, v := range page.DestinationSummaries { + v := v + if aws.ToString(v.Arn) == arn { + destination = &v + break pageLoop + } + } } - if v, ok := tfMap[names.AttrVPCID].(string); ok && v != "" { - apiObject.VpcId = aws.String(v) + if destination == nil { + return nil, tfresource.NewEmptyResultError(nil) } - return apiObject -} - -func flattenVPCDestinationProperties(apiObject *iot.VpcDestinationProperties) map[string]interface{} { - if apiObject == nil { - return nil + inputG := &iot.GetTopicRuleDestinationInput{ + Arn: aws.String(arn), } - tfMap := map[string]interface{}{} + output, err := conn.GetTopicRuleDestination(ctx, inputG) - if v := apiObject.RoleArn; v != nil { - tfMap[names.AttrRoleARN] = aws.StringValue(v) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: inputG, + } } - if v := apiObject.SecurityGroups; v != nil { - tfMap[names.AttrSecurityGroups] = aws.StringValueSlice(v) + if err != nil { + return nil, err } - if v := apiObject.SubnetIds; v != nil { - tfMap[names.AttrSubnetIDs] = aws.StringValueSlice(v) + if output == nil || output.TopicRuleDestination == nil { + return nil, tfresource.NewEmptyResultError(inputG) } - if v := apiObject.VpcId; v != nil { - tfMap[names.AttrVPCID] = aws.StringValue(v) - } - - return tfMap + return output.TopicRuleDestination, nil } -func statusTopicRuleDestination(ctx context.Context, conn *iot.IoT, arn string) retry.StateRefreshFunc { +func statusTopicRuleDestination(ctx context.Context, conn *iot.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindTopicRuleDestinationByARN(ctx, conn, arn) + output, err := findTopicRuleDestinationByARN(ctx, conn, arn) if tfresource.NotFound(err) { return nil, "", nil @@ -290,22 +288,22 @@ func statusTopicRuleDestination(ctx context.Context, conn *iot.IoT, arn string) return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitTopicRuleDestinationCreated(ctx context.Context, conn *iot.IoT, arn string, timeout time.Duration) (*iot.TopicRuleDestination, error) { +func waitTopicRuleDestinationCreated(ctx context.Context, conn *iot.Client, arn string, timeout time.Duration) (*awstypes.TopicRuleDestination, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{iot.TopicRuleDestinationStatusInProgress}, - Target: []string{iot.TopicRuleDestinationStatusEnabled}, + Pending: enum.Slice(string(awstypes.TopicRuleDestinationStatusInProgress)), + Target: enum.Slice(string(awstypes.TopicRuleDestinationStatusEnabled)), Refresh: statusTopicRuleDestination(ctx, conn, arn), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*iot.TopicRuleDestination); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) + if output, ok := outputRaw.(*awstypes.TopicRuleDestination); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusReason))) return output, err } @@ -313,9 +311,9 @@ func waitTopicRuleDestinationCreated(ctx context.Context, conn *iot.IoT, arn str return nil, err } -func waitTopicRuleDestinationDeleted(ctx context.Context, conn *iot.IoT, arn string, timeout time.Duration) (*iot.TopicRuleDestination, error) { +func waitTopicRuleDestinationDeleted(ctx context.Context, conn *iot.Client, arn string, timeout time.Duration) (*awstypes.TopicRuleDestination, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{iot.TopicRuleDestinationStatusDeleting}, + Pending: enum.Slice(string(awstypes.TopicRuleDestinationStatusDeleting)), Target: []string{}, Refresh: statusTopicRuleDestination(ctx, conn, arn), Timeout: timeout, @@ -323,8 +321,8 @@ func waitTopicRuleDestinationDeleted(ctx context.Context, conn *iot.IoT, arn str outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*iot.TopicRuleDestination); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) + if output, ok := outputRaw.(*awstypes.TopicRuleDestination); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusReason))) return output, err } @@ -332,18 +330,18 @@ func waitTopicRuleDestinationDeleted(ctx context.Context, conn *iot.IoT, arn str return nil, err } -func waitTopicRuleDestinationDisabled(ctx context.Context, conn *iot.IoT, arn string, timeout time.Duration) (*iot.TopicRuleDestination, error) { +func waitTopicRuleDestinationDisabled(ctx context.Context, conn *iot.Client, arn string, timeout time.Duration) (*awstypes.TopicRuleDestination, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{iot.TopicRuleDestinationStatusInProgress}, - Target: []string{iot.TopicRuleDestinationStatusDisabled}, + Pending: enum.Slice(string(awstypes.TopicRuleDestinationStatusInProgress)), + Target: enum.Slice(string(awstypes.TopicRuleDestinationStatusDisabled)), Refresh: statusTopicRuleDestination(ctx, conn, arn), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*iot.TopicRuleDestination); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) + if output, ok := outputRaw.(*awstypes.TopicRuleDestination); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusReason))) return output, err } @@ -351,21 +349,73 @@ func waitTopicRuleDestinationDisabled(ctx context.Context, conn *iot.IoT, arn st return nil, err } -func waitTopicRuleDestinationEnabled(ctx context.Context, conn *iot.IoT, arn string, timeout time.Duration) (*iot.TopicRuleDestination, error) { +func waitTopicRuleDestinationEnabled(ctx context.Context, conn *iot.Client, arn string, timeout time.Duration) (*awstypes.TopicRuleDestination, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{iot.TopicRuleDestinationStatusInProgress}, - Target: []string{iot.TopicRuleDestinationStatusEnabled}, + Pending: enum.Slice(string(awstypes.TopicRuleDestinationStatusInProgress)), + Target: enum.Slice(string(awstypes.TopicRuleDestinationStatusEnabled)), Refresh: statusTopicRuleDestination(ctx, conn, arn), Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*iot.TopicRuleDestination); ok { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) + if output, ok := outputRaw.(*awstypes.TopicRuleDestination); ok { + tfresource.SetLastError(err, errors.New(aws.ToString(output.StatusReason))) return output, err } return nil, err } + +func expandVPCDestinationConfiguration(tfMap map[string]interface{}) *awstypes.VpcDestinationConfiguration { + if tfMap == nil { + return nil + } + + apiObject := &awstypes.VpcDestinationConfiguration{} + + if v, ok := tfMap[names.AttrRoleARN].(string); ok && v != "" { + apiObject.RoleArn = aws.String(v) + } + + if v, ok := tfMap[names.AttrSecurityGroups].(*schema.Set); ok && v.Len() > 0 { + apiObject.SecurityGroups = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap[names.AttrSubnetIDs].(*schema.Set); ok && v.Len() > 0 { + apiObject.SubnetIds = flex.ExpandStringValueSet(v) + } + + if v, ok := tfMap[names.AttrVPCID].(string); ok && v != "" { + apiObject.VpcId = aws.String(v) + } + + return apiObject +} + +func flattenVPCDestinationProperties(apiObject *awstypes.VpcDestinationProperties) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.RoleArn; v != nil { + tfMap[names.AttrRoleARN] = aws.ToString(v) + } + + if v := apiObject.SecurityGroups; v != nil { + tfMap[names.AttrSecurityGroups] = aws.StringSlice(v) + } + + if v := apiObject.SubnetIds; v != nil { + tfMap[names.AttrSubnetIDs] = aws.StringSlice(v) + } + + if v := apiObject.VpcId; v != nil { + tfMap[names.AttrVPCID] = aws.ToString(v) + } + + return tfMap +} diff --git a/internal/service/iot/topic_rule_destination_test.go b/internal/service/iot/topic_rule_destination_test.go index 58132cf0d17..1bbef98699c 100644 --- a/internal/service/iot/topic_rule_destination_test.go +++ b/internal/service/iot/topic_rule_destination_test.go @@ -126,7 +126,7 @@ func TestAccIoTTopicRuleDestination_enabled(t *testing.T) { func testAccCheckTopicRuleDestinationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_topic_rule_destination" { @@ -161,7 +161,7 @@ func testAccCheckTopicRuleDestinationExists(ctx context.Context, n string) resou return fmt.Errorf("No IoT Topic Rule Destination ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) _, err := tfiot.FindTopicRuleDestinationByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/iot/topic_rule_test.go b/internal/service/iot/topic_rule_test.go index 67966585d6e..1d698ae97a5 100644 --- a/internal/service/iot/topic_rule_test.go +++ b/internal/service/iot/topic_rule_test.go @@ -319,6 +319,88 @@ func TestAccIoTTopicRule_cloudWatchLogs(t *testing.T) { }) } +func TestAccIoTTopicRule_cloudWatchLogs_batch_mode(t *testing.T) { + ctx := acctest.Context(t) + rName := testAccTopicRuleName() + resourceName := "aws_iot_topic_rule.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.IoTServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTopicRuleDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTopicRuleConfig_cloudWatchLogsBatchMode(rName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckTopicRuleExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "cloudwatch_alarm.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "cloudwatch_logs.#", acctest.Ct1), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cloudwatch_logs.*", map[string]string{ + "batch_mode": acctest.CtFalse, + names.AttrLogGroupName: "mylogs1", + }), + resource.TestCheckResourceAttr(resourceName, "cloudwatch_metric.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "dynamodb.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "dynamodbv2.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "elasticsearch.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, names.AttrEnabled, acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "error_action.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "firehose.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "http.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "iot_analytics.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "iot_events.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "kafka.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "kinesis.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "lambda.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "republish.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "s3.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "sns.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "sqs.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "step_functions.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "timestream.#", acctest.Ct0), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTopicRuleConfig_cloudWatchLogsBatchMode(rName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckTopicRuleExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "cloudwatch_alarm.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "cloudwatch_logs.#", acctest.Ct1), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "cloudwatch_logs.*", map[string]string{ + "batch_mode": acctest.CtTrue, + names.AttrLogGroupName: "mylogs1", + }), + resource.TestCheckResourceAttr(resourceName, "cloudwatch_metric.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "dynamodb.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "dynamodbv2.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "elasticsearch.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, names.AttrEnabled, acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "error_action.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "firehose.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "http.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "iot_analytics.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "iot_events.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "kafka.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "kinesis.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "lambda.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "republish.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "s3.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "sns.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "sqs.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "step_functions.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "timestream.#", acctest.Ct0), + ), + }, + }, + }) +} + func TestAccIoTTopicRule_cloudWatchMetric(t *testing.T) { ctx := acctest.Context(t) rName := testAccTopicRuleName() @@ -2338,7 +2420,7 @@ func TestAccIoTTopicRule_updateKinesisErrorAction(t *testing.T) { func testAccCheckTopicRuleDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_iot_topic_rule" { @@ -2373,7 +2455,7 @@ func testAccCheckTopicRuleExists(ctx context.Context, n string) resource.TestChe return fmt.Errorf("No IoT Topic Rule ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).IoTConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).IoTClient(ctx) _, err := tfiot.FindTopicRuleByName(ctx, conn, rs.Primary.ID) @@ -2517,6 +2599,25 @@ resource "aws_iot_topic_rule" "test" { `, rName, logGroupName)) } +func testAccTopicRuleConfig_cloudWatchLogsBatchMode(rName string, batchMode bool) string { + return acctest.ConfigCompose( + testAccTopicRuleConfig_destinationRole(rName), + fmt.Sprintf(` +resource "aws_iot_topic_rule" "test" { + name = %[1]q + enabled = false + sql = "SELECT * FROM 'topic/test'" + sql_version = "2015-10-08" + + cloudwatch_logs { + batch_mode = %[2]t + log_group_name = "mylogs1" + role_arn = aws_iam_role.test.arn + } +} +`, rName, batchMode)) +} + func testAccTopicRuleConfig_cloudWatchMetric(rName string, metricName string) string { return acctest.ConfigCompose( testAccTopicRuleConfig_destinationRole(rName), diff --git a/internal/service/iotanalytics/generate.go b/internal/service/iotanalytics/generate.go index 00e14896279..39475063214 100644 --- a/internal/service/iotanalytics/generate.go +++ b/internal/service/iotanalytics/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags +//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags -AWSSDKVersion=2 //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/iotanalytics/service_endpoint_resolver_gen.go b/internal/service/iotanalytics/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..eba81098a25 --- /dev/null +++ b/internal/service/iotanalytics/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package iotanalytics + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + iotanalytics_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotanalytics" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ iotanalytics_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver iotanalytics_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: iotanalytics_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params iotanalytics_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up iotanalytics endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*iotanalytics_sdkv2.Options) { + return func(o *iotanalytics_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/iotanalytics/service_endpoints_gen_test.go b/internal/service/iotanalytics/service_endpoints_gen_test.go index 23a2618e356..61915471201 100644 --- a/internal/service/iotanalytics/service_endpoints_gen_test.go +++ b/internal/service/iotanalytics/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package iotanalytics_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - iotanalytics_sdkv1 "github.com/aws/aws-sdk-go/service/iotanalytics" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + iotanalytics_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotanalytics" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := iotanalytics_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(iotanalytics_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), iotanalytics_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := iotanalytics_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(iotanalytics_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), iotanalytics_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.IoTAnalyticsConn(ctx) - - req, _ := client.ListChannelsRequest(&iotanalytics_sdkv1.ListChannelsInput{}) + client := meta.IoTAnalyticsClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListChannels(ctx, &iotanalytics_sdkv2.ListChannelsInput{}, + func(opts *iotanalytics_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/iotanalytics/service_package_gen.go b/internal/service/iotanalytics/service_package_gen.go index 2c067beb601..32fcb9814eb 100644 --- a/internal/service/iotanalytics/service_package_gen.go +++ b/internal/service/iotanalytics/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package iotanalytics import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - iotanalytics_sdkv1 "github.com/aws/aws-sdk-go/service/iotanalytics" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + iotanalytics_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotanalytics" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -37,25 +34,14 @@ func (p *servicePackage) ServicePackageName() string { return names.IoTAnalytics } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*iotanalytics_sdkv1.IoTAnalytics, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*iotanalytics_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return iotanalytics_sdkv1.New(sess.Copy(&cfg)), nil + return iotanalytics_sdkv2.NewFromConfig(cfg, + iotanalytics_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/iotanalytics/tags_gen.go b/internal/service/iotanalytics/tags_gen.go index c4803733f28..46b768a0fea 100644 --- a/internal/service/iotanalytics/tags_gen.go +++ b/internal/service/iotanalytics/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iotanalytics" - "github.com/aws/aws-sdk-go/service/iotanalytics/iotanalyticsiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iotanalytics" + awstypes "github.com/aws/aws-sdk-go-v2/service/iotanalytics/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists iotanalytics service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn iotanalyticsiface.IoTAnalyticsAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *iotanalytics.Client, identifier string, optFns ...func(*iotanalytics.Options)) (tftags.KeyValueTags, error) { input := &iotanalytics.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn iotanalyticsiface.IoTAnalyticsAPI, ident // ListTags lists iotanalytics service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).IoTAnalyticsConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).IoTAnalyticsClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns iotanalytics service tags. -func Tags(tags tftags.KeyValueTags) []*iotanalytics.Tag { - result := make([]*iotanalytics.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &iotanalytics.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*iotanalytics.Tag { } // KeyValueTags creates tftags.KeyValueTags from iotanalytics service tags. -func KeyValueTags(ctx context.Context, tags []*iotanalytics.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*iotanalytics.Tag) tftags.KeyValue // getTagsIn returns iotanalytics service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*iotanalytics.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*iotanalytics.Tag { } // setTagsOut sets iotanalytics service tags in Context. -func setTagsOut(ctx context.Context, tags []*iotanalytics.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*iotanalytics.Tag) { // updateTags updates iotanalytics service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn iotanalyticsiface.IoTAnalyticsAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *iotanalytics.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*iotanalytics.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn iotanalyticsiface.IoTAnalyticsAPI, ide if len(removedTags) > 0 { input := &iotanalytics.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn iotanalyticsiface.IoTAnalyticsAPI, ide Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn iotanalyticsiface.IoTAnalyticsAPI, ide // UpdateTags updates iotanalytics service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).IoTAnalyticsConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).IoTAnalyticsClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/iotevents/generate.go b/internal/service/iotevents/generate.go index 0cb770c5da9..ecaa54a5a45 100644 --- a/internal/service/iotevents/generate.go +++ b/internal/service/iotevents/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags +//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags -AWSSDKVersion=2 //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/iotevents/service_endpoint_resolver_gen.go b/internal/service/iotevents/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..013e6dc51c6 --- /dev/null +++ b/internal/service/iotevents/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package iotevents + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + iotevents_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotevents" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ iotevents_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver iotevents_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: iotevents_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params iotevents_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up iotevents endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*iotevents_sdkv2.Options) { + return func(o *iotevents_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/iotevents/service_endpoints_gen_test.go b/internal/service/iotevents/service_endpoints_gen_test.go index ab177d881a6..ecd246190b4 100644 --- a/internal/service/iotevents/service_endpoints_gen_test.go +++ b/internal/service/iotevents/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package iotevents_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - iotevents_sdkv1 "github.com/aws/aws-sdk-go/service/iotevents" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + iotevents_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotevents" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := iotevents_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(iotevents_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), iotevents_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := iotevents_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(iotevents_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), iotevents_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.IoTEventsConn(ctx) - - req, _ := client.ListAlarmModelsRequest(&iotevents_sdkv1.ListAlarmModelsInput{}) + client := meta.IoTEventsClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListAlarmModels(ctx, &iotevents_sdkv2.ListAlarmModelsInput{}, + func(opts *iotevents_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/iotevents/service_package_gen.go b/internal/service/iotevents/service_package_gen.go index 903ee5c35e7..c664bdbabc5 100644 --- a/internal/service/iotevents/service_package_gen.go +++ b/internal/service/iotevents/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package iotevents import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - iotevents_sdkv1 "github.com/aws/aws-sdk-go/service/iotevents" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + iotevents_sdkv2 "github.com/aws/aws-sdk-go-v2/service/iotevents" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -37,25 +34,14 @@ func (p *servicePackage) ServicePackageName() string { return names.IoTEvents } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*iotevents_sdkv1.IoTEvents, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*iotevents_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return iotevents_sdkv1.New(sess.Copy(&cfg)), nil + return iotevents_sdkv2.NewFromConfig(cfg, + iotevents_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/iotevents/tags_gen.go b/internal/service/iotevents/tags_gen.go index 1a6b7912d8b..2aad3d41338 100644 --- a/internal/service/iotevents/tags_gen.go +++ b/internal/service/iotevents/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/iotevents" - "github.com/aws/aws-sdk-go/service/iotevents/ioteventsiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iotevents" + awstypes "github.com/aws/aws-sdk-go-v2/service/iotevents/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists iotevents service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn ioteventsiface.IoTEventsAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *iotevents.Client, identifier string, optFns ...func(*iotevents.Options)) (tftags.KeyValueTags, error) { input := &iotevents.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn ioteventsiface.IoTEventsAPI, identifier // ListTags lists iotevents service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).IoTEventsConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).IoTEventsClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns iotevents service tags. -func Tags(tags tftags.KeyValueTags) []*iotevents.Tag { - result := make([]*iotevents.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &iotevents.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*iotevents.Tag { } // KeyValueTags creates tftags.KeyValueTags from iotevents service tags. -func KeyValueTags(ctx context.Context, tags []*iotevents.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*iotevents.Tag) tftags.KeyValueTag // getTagsIn returns iotevents service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*iotevents.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*iotevents.Tag { } // setTagsOut sets iotevents service tags in Context. -func setTagsOut(ctx context.Context, tags []*iotevents.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*iotevents.Tag) { // updateTags updates iotevents service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn ioteventsiface.IoTEventsAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *iotevents.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*iotevents.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn ioteventsiface.IoTEventsAPI, identifie if len(removedTags) > 0 { input := &iotevents.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn ioteventsiface.IoTEventsAPI, identifie Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn ioteventsiface.IoTEventsAPI, identifie // UpdateTags updates iotevents service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).IoTEventsConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).IoTEventsClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/ivs/service_endpoint_resolver_gen.go b/internal/service/ivs/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..3c733fde680 --- /dev/null +++ b/internal/service/ivs/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ivs + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/ivs/service_endpoints_gen_test.go b/internal/service/ivs/service_endpoints_gen_test.go index 6c3a50cb6e5..780ee904757 100644 --- a/internal/service/ivs/service_endpoints_gen_test.go +++ b/internal/service/ivs/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(ivs_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(ivs_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ivs/service_package_gen.go b/internal/service/ivs/service_package_gen.go index cb60aae4161..04946040bcd 100644 --- a/internal/service/ivs/service_package_gen.go +++ b/internal/service/ivs/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ivs @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" ivs_sdkv1 "github.com/aws/aws-sdk-go/service/ivs" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -78,11 +77,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*i "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return ivs_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/ivschat/service_endpoint_resolver_gen.go b/internal/service/ivschat/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..846167405ce --- /dev/null +++ b/internal/service/ivschat/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ivschat + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ivschat_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivschat" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ ivschat_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver ivschat_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: ivschat_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params ivschat_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up ivschat endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*ivschat_sdkv2.Options) { + return func(o *ivschat_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ivschat/service_endpoints_gen_test.go b/internal/service/ivschat/service_endpoints_gen_test.go index f194bc35a51..ce933e468d5 100644 --- a/internal/service/ivschat/service_endpoints_gen_test.go +++ b/internal/service/ivschat/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := ivschat_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ivschat_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := ivschat_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ivschat_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ivschat/service_package_gen.go b/internal/service/ivschat/service_package_gen.go index e56bc6927ce..e5309be335e 100644 --- a/internal/service/ivschat/service_package_gen.go +++ b/internal/service/ivschat/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ivschat @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" ivschat_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ivschat" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -56,19 +55,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*ivschat_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return ivschat_sdkv2.NewFromConfig(cfg, func(o *ivschat_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return ivschat_sdkv2.NewFromConfig(cfg, + ivschat_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/kafka/cluster_data_source.go b/internal/service/kafka/cluster_data_source.go index 48eb01eda69..ffb00afa1ff 100644 --- a/internal/service/kafka/cluster_data_source.go +++ b/internal/service/kafka/cluster_data_source.go @@ -58,6 +58,128 @@ func dataSourceCluster() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "broker_node_group_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "az_distribution": { + Type: schema.TypeString, + Computed: true, + }, + "client_subnets": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "connectivity_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "vpc_connectivity": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_authentication": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sasl": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iam": { + Type: schema.TypeBool, + Computed: true, + }, + "scram": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + "tls": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "public_access": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrType: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + names.AttrInstanceType: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrSecurityGroups: { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "storage_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ebs_storage_info": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "provisioned_throughput": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrEnabled: { + Type: schema.TypeBool, + Computed: true, + }, + "volume_throughput": { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + names.AttrVolumeSize: { + Type: schema.TypeInt, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, names.AttrClusterName: { Type: schema.TypeString, Required: true, @@ -121,6 +243,13 @@ func dataSourceClusterRead(ctx context.Context, d *schema.ResourceData, meta int d.Set("bootstrap_brokers_sasl_iam", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringSaslIam))) d.Set("bootstrap_brokers_sasl_scram", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringSaslScram))) d.Set("bootstrap_brokers_tls", SortEndpointsString(aws.ToString(bootstrapBrokersOutput.BootstrapBrokerStringTls))) + if cluster.BrokerNodeGroupInfo != nil { + if err := d.Set("broker_node_group_info", []interface{}{flattenBrokerNodeGroupInfo(cluster.BrokerNodeGroupInfo)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting broker_node_group_info: %s", err) + } + } else { + d.Set("broker_node_group_info", nil) + } d.Set(names.AttrClusterName, cluster.ClusterName) clusterUUID, _ := clusterUUIDFromARN(clusterARN) d.Set("cluster_uuid", clusterUUID) diff --git a/internal/service/kafka/cluster_data_source_test.go b/internal/service/kafka/cluster_data_source_test.go index 4267fa05b7d..4ee9888121d 100644 --- a/internal/service/kafka/cluster_data_source_test.go +++ b/internal/service/kafka/cluster_data_source_test.go @@ -7,6 +7,7 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/service/kafka/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -35,6 +36,29 @@ func TestAccKafkaClusterDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_sasl_iam", resourceName, "bootstrap_brokers_sasl_iam"), resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_sasl_scram", resourceName, "bootstrap_brokers_sasl_scram"), resource.TestCheckResourceAttrPair(dataSourceName, "bootstrap_brokers_tls", resourceName, "bootstrap_brokers_tls"), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.az_distribution", string(types.BrokerAZDistributionDefault)), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.client_subnets.#", acctest.Ct3), + resource.TestCheckTypeSetElemAttrPair(resourceName, "broker_node_group_info.0.client_subnets.*", "aws_subnet.test.0", names.AttrID), + resource.TestCheckTypeSetElemAttrPair(resourceName, "broker_node_group_info.0.client_subnets.*", "aws_subnet.test.1", names.AttrID), + resource.TestCheckTypeSetElemAttrPair(resourceName, "broker_node_group_info.0.client_subnets.*", "aws_subnet.test.2", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.0.public_access.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.0.public_access.0.type", "DISABLED"), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.0.vpc_connectivity.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.0.vpc_connectivity.0.client_authentication.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.0.vpc_connectivity.0.client_authentication.0.sasl.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.0.vpc_connectivity.0.client_authentication.0.sasl.0.iam", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.0.vpc_connectivity.0.client_authentication.0.sasl.0.scram", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.connectivity_info.0.vpc_connectivity.0.client_authentication.0.tls", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.instance_type", "kafka.t3.small"), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.security_groups.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttrPair(resourceName, "broker_node_group_info.0.security_groups.*", "aws_security_group.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.storage_info.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.storage_info.0.ebs_storage_info.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.storage_info.0.ebs_storage_info.0.volume_size", acctest.Ct10), + resource.TestCheckResourceAttr(resourceName, "broker_node_group_info.0.storage_info.0.ebs_storage_info.0.provisioned_throughput.#", acctest.Ct0), resource.TestCheckResourceAttrPair(dataSourceName, names.AttrClusterName, resourceName, names.AttrClusterName), resource.TestCheckResourceAttrPair(dataSourceName, "cluster_uuid", resourceName, "cluster_uuid"), resource.TestCheckResourceAttrPair(dataSourceName, "kafka_version", resourceName, "kafka_version"), diff --git a/internal/service/kafka/replicator.go b/internal/service/kafka/replicator.go index 00be0746a27..497bff16d86 100644 --- a/internal/service/kafka/replicator.go +++ b/internal/service/kafka/replicator.go @@ -159,6 +159,22 @@ func resourceReplicator() *schema.Resource { Optional: true, Default: true, }, + "starting_position": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrType: { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.ReplicationStartingPositionType](), + }, + }, + }, + }, "topics_to_exclude": { Type: schema.TypeSet, Optional: true, @@ -522,21 +538,21 @@ func flattenConsumerGroupReplication(apiObject *types.ConsumerGroupReplication) tfMap := map[string]interface{}{} if v := apiObject.ConsumerGroupsToReplicate; v != nil { - tfMap["consumer_groups_to_replicate"] = flex.FlattenStringValueSet(v) + tfMap["consumer_groups_to_replicate"] = v } if v := apiObject.ConsumerGroupsToExclude; v != nil { - tfMap["consumer_groups_to_exclude"] = flex.FlattenStringValueSet(v) - } - - if aws.ToBool(apiObject.SynchroniseConsumerGroupOffsets) { - tfMap["synchronise_consumer_group_offsets"] = apiObject.SynchroniseConsumerGroupOffsets + tfMap["consumer_groups_to_exclude"] = v } if aws.ToBool(apiObject.DetectAndCopyNewConsumerGroups) { tfMap["detect_and_copy_new_consumer_groups"] = apiObject.DetectAndCopyNewConsumerGroups } + if aws.ToBool(apiObject.SynchroniseConsumerGroupOffsets) { + tfMap["synchronise_consumer_group_offsets"] = apiObject.SynchroniseConsumerGroupOffsets + } + return tfMap } @@ -547,24 +563,42 @@ func flattenTopicReplication(apiObject *types.TopicReplication) map[string]inter tfMap := map[string]interface{}{} + if aws.ToBool(apiObject.CopyAccessControlListsForTopics) { + tfMap["copy_access_control_lists_for_topics"] = apiObject.CopyAccessControlListsForTopics + } + + if aws.ToBool(apiObject.CopyTopicConfigurations) { + tfMap["copy_topic_configurations"] = apiObject.CopyTopicConfigurations + } + + if aws.ToBool(apiObject.DetectAndCopyNewTopics) { + tfMap["detect_and_copy_new_topics"] = apiObject.DetectAndCopyNewTopics + } + + if v := apiObject.StartingPosition; v != nil { + tfMap["starting_position"] = []interface{}{flattenReplicationStartingPosition(v)} + } + if v := apiObject.TopicsToReplicate; v != nil { - tfMap["topics_to_replicate"] = flex.FlattenStringValueSet(v) + tfMap["topics_to_replicate"] = v } if v := apiObject.TopicsToExclude; v != nil { - tfMap["topics_to_exclude"] = flex.FlattenStringValueSet(v) + tfMap["topics_to_exclude"] = v } - if aws.ToBool(apiObject.CopyTopicConfigurations) { - tfMap["copy_topic_configurations"] = apiObject.CopyTopicConfigurations - } + return tfMap +} - if aws.ToBool(apiObject.CopyAccessControlListsForTopics) { - tfMap["copy_access_control_lists_for_topics"] = apiObject.CopyAccessControlListsForTopics +func flattenReplicationStartingPosition(apiObject *types.ReplicationStartingPosition) map[string]interface{} { + if apiObject == nil { + return nil } - if aws.ToBool(apiObject.DetectAndCopyNewTopics) { - tfMap["detect_and_copy_new_topics"] = apiObject.DetectAndCopyNewTopics + tfMap := map[string]interface{}{} + + if v := apiObject.Type; v != "" { + tfMap[names.AttrType] = v } return tfMap @@ -606,11 +640,11 @@ func flattenKafkaClusterClientVPCConfig(apiObject *types.KafkaClusterClientVpcCo tfMap := map[string]interface{}{} if v := apiObject.SecurityGroupIds; v != nil { - tfMap["security_groups_ids"] = flex.FlattenStringValueSet(v) + tfMap["security_groups_ids"] = v } if v := apiObject.SubnetIds; v != nil { - tfMap[names.AttrSubnetIDs] = flex.FlattenStringValueSet(v) + tfMap[names.AttrSubnetIDs] = v } return tfMap @@ -749,6 +783,22 @@ func expandConsumerGroupReplication(tfMap map[string]interface{}) *types.Consume func expandTopicReplication(tfMap map[string]interface{}) *types.TopicReplication { apiObject := &types.TopicReplication{} + if v, ok := tfMap["copy_access_control_lists_for_topics"].(bool); ok { + apiObject.CopyAccessControlListsForTopics = aws.Bool(v) + } + + if v, ok := tfMap["copy_topic_configurations"].(bool); ok { + apiObject.CopyTopicConfigurations = aws.Bool(v) + } + + if v, ok := tfMap["detect_and_copy_new_topics"].(bool); ok { + apiObject.DetectAndCopyNewTopics = aws.Bool(v) + } + + if v, ok := tfMap["starting_position"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.StartingPosition = expandReplicationStartingPosition(v[0].(map[string]interface{})) + } + if v, ok := tfMap["topics_to_replicate"].(*schema.Set); ok && v.Len() > 0 { apiObject.TopicsToReplicate = flex.ExpandStringValueSet(v) } @@ -757,16 +807,14 @@ func expandTopicReplication(tfMap map[string]interface{}) *types.TopicReplicatio apiObject.TopicsToExclude = flex.ExpandStringValueSet(v) } - if v, ok := tfMap["copy_topic_configurations"].(bool); ok { - apiObject.CopyTopicConfigurations = aws.Bool(v) - } + return apiObject +} - if v, ok := tfMap["copy_access_control_lists_for_topics"].(bool); ok { - apiObject.CopyAccessControlListsForTopics = aws.Bool(v) - } +func expandReplicationStartingPosition(tfMap map[string]interface{}) *types.ReplicationStartingPosition { + apiObject := &types.ReplicationStartingPosition{} - if v, ok := tfMap["detect_and_copy_new_topics"].(bool); ok { - apiObject.DetectAndCopyNewTopics = aws.Bool(v) + if v, ok := tfMap[names.AttrType].(string); ok { + apiObject.Type = types.ReplicationStartingPositionType(v) } return apiObject diff --git a/internal/service/kafka/replicator_test.go b/internal/service/kafka/replicator_test.go index 776a5933f96..aa67f061cef 100644 --- a/internal/service/kafka/replicator_test.go +++ b/internal/service/kafka/replicator_test.go @@ -53,9 +53,10 @@ func TestAccKafkaReplicator_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kafka_cluster.0.vpc_config.0.security_groups_ids.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "kafka_cluster.1.vpc_config.0.subnet_ids.#", acctest.Ct3), resource.TestCheckResourceAttr(resourceName, "kafka_cluster.1.vpc_config.0.security_groups_ids.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.consumer_groups_to_replicate.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.target_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.starting_position.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.topics_to_replicate.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.consumer_groups_to_replicate.#", acctest.Ct1), ), }, { @@ -66,6 +67,7 @@ func TestAccKafkaReplicator_basic(t *testing.T) { }, }) } + func TestAccKafkaReplicator_update(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -100,9 +102,10 @@ func TestAccKafkaReplicator_update(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kafka_cluster.0.vpc_config.0.security_groups_ids.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "kafka_cluster.1.vpc_config.0.subnet_ids.#", acctest.Ct3), resource.TestCheckResourceAttr(resourceName, "kafka_cluster.1.vpc_config.0.security_groups_ids.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.consumer_groups_to_replicate.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.target_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.starting_position.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.topics_to_replicate.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.consumer_groups_to_replicate.#", acctest.Ct1), ), }, { @@ -122,16 +125,18 @@ func TestAccKafkaReplicator_update(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "kafka_cluster.0.vpc_config.0.security_groups_ids.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "kafka_cluster.1.vpc_config.0.subnet_ids.#", acctest.Ct3), resource.TestCheckResourceAttr(resourceName, "kafka_cluster.1.vpc_config.0.security_groups_ids.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.consumer_groups_to_replicate.#", acctest.Ct3), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.consumer_groups_to_exclude.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.synchronise_consumer_group_offsets", acctest.CtFalse), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.detect_and_copy_new_consumer_groups", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.target_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.starting_position.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.starting_position.0.type", "EARLIEST"), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.topics_to_replicate.#", acctest.Ct3), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.topics_to_exclude.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.copy_topic_configurations", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.copy_access_control_lists_for_topics", acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.topic_replication.0.detect_and_copy_new_topics", acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.consumer_groups_to_replicate.#", acctest.Ct3), - resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.consumer_groups_to_exclude.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.synchronise_consumer_group_offsets", acctest.CtFalse), - resource.TestCheckResourceAttr(resourceName, "replication_info_list.0.consumer_group_replication.0.detect_and_copy_new_consumer_groups", acctest.CtFalse), ), }, { @@ -620,6 +625,10 @@ resource "aws_msk_replicator" "test" { copy_topic_configurations = false topics_to_replicate = ["topic1", "topic2", "topic3"] topics_to_exclude = ["topic-4"] + + starting_position { + type = "EARLIEST" + } } consumer_group_replication { diff --git a/internal/service/kafka/service_endpoint_resolver_gen.go b/internal/service/kafka/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..8b1b945be3f --- /dev/null +++ b/internal/service/kafka/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package kafka + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + kafka_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafka" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ kafka_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver kafka_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: kafka_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params kafka_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up kafka endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*kafka_sdkv2.Options) { + return func(o *kafka_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/kafka/service_endpoints_gen_test.go b/internal/service/kafka/service_endpoints_gen_test.go index f975264656d..1d7e1653858 100644 --- a/internal/service/kafka/service_endpoints_gen_test.go +++ b/internal/service/kafka/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := kafka_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), kafka_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := kafka_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), kafka_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/kafka/service_package.go b/internal/service/kafka/service_package.go index 0f4215efd87..7fb8e126004 100644 --- a/internal/service/kafka/service_package.go +++ b/internal/service/kafka/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/kafka" "github.com/aws/aws-sdk-go-v2/service/kafka/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,24 +19,16 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*kafka.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return kafka.NewFromConfig(cfg, func(o *kafka.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*types.TooManyRequestsException](err, "Too Many Requests") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return kafka.NewFromConfig(cfg, + kafka.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *kafka.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*types.TooManyRequestsException](err, "Too Many Requests") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/kafka/service_package_gen.go b/internal/service/kafka/service_package_gen.go index 4b03e87d8f1..f4d8944fa3d 100644 --- a/internal/service/kafka/service_package_gen.go +++ b/internal/service/kafka/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package kafka diff --git a/internal/service/kafkaconnect/connector.go b/internal/service/kafkaconnect/connector.go index 9f53e2c9ade..50657436bc5 100644 --- a/internal/service/kafkaconnect/connector.go +++ b/internal/service/kafkaconnect/connector.go @@ -5,25 +5,31 @@ package kafkaconnect import ( "context" + "fmt" "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_mskconnect_connector") -func ResourceConnector() *schema.Resource { +// @SDKResource("aws_mskconnect_connector", name="Connector") +// @Tags(identifierAttribute="arn") +func resourceConnector() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceConnectorCreate, ReadWithoutTimeout: resourceConnectorRead, @@ -200,11 +206,11 @@ func ResourceConnector() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "authentication_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: kafkaconnect.KafkaClusterClientAuthenticationTypeNone, - ValidateFunc: validation.StringInSlice(kafkaconnect.KafkaClusterClientAuthenticationType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.KafkaClusterClientAuthenticationTypeNone, + ValidateDiagFunc: enum.Validate[awstypes.KafkaClusterClientAuthenticationType](), }, }, }, @@ -217,11 +223,11 @@ func ResourceConnector() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "encryption_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: kafkaconnect.KafkaClusterEncryptionInTransitTypePlaintext, - ValidateFunc: validation.StringInSlice(kafkaconnect.KafkaClusterEncryptionInTransitType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.KafkaClusterEncryptionInTransitTypePlaintext, + ValidateDiagFunc: enum.Validate[awstypes.KafkaClusterEncryptionInTransitType](), }, }, }, @@ -358,6 +364,8 @@ func ResourceConnector() *schema.Resource { ForceNew: true, ValidateFunc: verify.ValidARN, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), names.AttrVersion: { Type: schema.TypeString, Computed: true, @@ -384,18 +392,19 @@ func ResourceConnector() *schema.Resource { }, }, }, + + CustomizeDiff: verify.SetTagsDiff, } } func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) name := d.Get(names.AttrName).(string) input := &kafkaconnect.CreateConnectorInput{ Capacity: expandCapacity(d.Get("capacity").([]interface{})[0].(map[string]interface{})), - ConnectorConfiguration: flex.ExpandStringMap(d.Get("connector_configuration").(map[string]interface{})), + ConnectorConfiguration: flex.ExpandStringValueMap(d.Get("connector_configuration").(map[string]interface{})), ConnectorName: aws.String(name), KafkaCluster: expandCluster(d.Get("kafka_cluster").([]interface{})[0].(map[string]interface{})), KafkaClusterClientAuthentication: expandClusterClientAuthentication(d.Get("kafka_cluster_client_authentication").([]interface{})[0].(map[string]interface{})), @@ -403,6 +412,7 @@ func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta i KafkaConnectVersion: aws.String(d.Get("kafkaconnect_version").(string)), Plugins: expandPlugins(d.Get("plugin").(*schema.Set).List()), ServiceExecutionRoleArn: aws.String(d.Get("service_execution_role_arn").(string)), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -417,18 +427,15 @@ func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta i input.WorkerConfiguration = expandWorkerConfiguration(v.([]interface{})[0].(map[string]interface{})) } - log.Printf("[DEBUG] Creating MSK Connect Connector: %s", input) - output, err := conn.CreateConnectorWithContext(ctx, input) + output, err := conn.CreateConnector(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Connect Connector (%s): %s", name, err) } - d.SetId(aws.StringValue(output.ConnectorArn)) - - _, err = waitConnectorCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) + d.SetId(aws.ToString(output.ConnectorArn)) - if err != nil { + if _, err := waitConnectorCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) create: %s", d.Id(), err) } @@ -437,10 +444,9 @@ func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta i func resourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - connector, err := FindConnectorByARN(ctx, conn, d.Id()) + connector, err := findConnectorByARN(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] MSK Connect Connector (%s) not found, removing from state", d.Id()) @@ -460,7 +466,7 @@ func resourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta int } else { d.Set("capacity", nil) } - d.Set("connector_configuration", aws.StringValueMap(connector.ConnectorConfiguration)) + d.Set("connector_configuration", connector.ConnectorConfiguration) d.Set(names.AttrDescription, connector.ConnectorDescription) if connector.KafkaCluster != nil { if err := d.Set("kafka_cluster", []interface{}{flattenClusterDescription(connector.KafkaCluster)}); err != nil { @@ -510,26 +516,24 @@ func resourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta int func resourceConnectorUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - input := &kafkaconnect.UpdateConnectorInput{ - Capacity: expandCapacityUpdate(d.Get("capacity").([]interface{})[0].(map[string]interface{})), - ConnectorArn: aws.String(d.Id()), - CurrentVersion: aws.String(d.Get(names.AttrVersion).(string)), - } + if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + input := &kafkaconnect.UpdateConnectorInput{ + Capacity: expandCapacityUpdate(d.Get("capacity").([]interface{})[0].(map[string]interface{})), + ConnectorArn: aws.String(d.Id()), + CurrentVersion: aws.String(d.Get(names.AttrVersion).(string)), + } - log.Printf("[DEBUG] Updating MSK Connect Connector: %s", input) - _, err := conn.UpdateConnectorWithContext(ctx, input) + _, err := conn.UpdateConnector(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating MSK Connect Connector (%s): %s", d.Id(), err) - } - - _, err = waitConnectorUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating MSK Connect Connector (%s): %s", d.Id(), err) + } - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) update: %s", d.Id(), err) + if _, err := waitConnectorUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) update: %s", d.Id(), err) + } } return append(diags, resourceConnectorRead(ctx, d, meta)...) @@ -537,15 +541,14 @@ func resourceConnectorUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceConnectorDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) log.Printf("[DEBUG] Deleting MSK Connect Connector: %s", d.Id()) - _, err := conn.DeleteConnectorWithContext(ctx, &kafkaconnect.DeleteConnectorInput{ + _, err := conn.DeleteConnector(ctx, &kafkaconnect.DeleteConnectorInput{ ConnectorArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } @@ -553,305 +556,406 @@ func resourceConnectorDelete(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "deleting MSK Connect Connector (%s): %s", d.Id(), err) } - _, err = waitConnectorDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) - - if err != nil { + if _, err := waitConnectorDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Connector (%s) delete: %s", d.Id(), err) } return diags } -func expandCapacity(tfMap map[string]interface{}) *kafkaconnect.Capacity { +func findConnectorByARN(ctx context.Context, conn *kafkaconnect.Client, arn string) (*kafkaconnect.DescribeConnectorOutput, error) { + input := &kafkaconnect.DescribeConnectorInput{ + ConnectorArn: aws.String(arn), + } + + output, err := conn.DescribeConnector(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusConnector(ctx context.Context, conn *kafkaconnect.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findConnectorByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.ConnectorState), nil + } +} + +func waitConnectorCreated(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ConnectorStateCreating), + Target: enum.Slice(awstypes.ConnectorStateRunning), + Refresh: statusConnector(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { + if state, stateDescription := output.ConnectorState, output.StateDescription; state == awstypes.ConnectorStateFailed && stateDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateDescription.Code), aws.ToString(stateDescription.Message))) + } + + return output, err + } + + return nil, err +} + +func waitConnectorUpdated(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ConnectorStateUpdating), + Target: enum.Slice(awstypes.ConnectorStateRunning), + Refresh: statusConnector(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { + if state, stateDescription := output.ConnectorState, output.StateDescription; state == awstypes.ConnectorStateFailed && stateDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateDescription.Code), aws.ToString(stateDescription.Message))) + } + + return output, err + } + + return nil, err +} + +func waitConnectorDeleted(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ConnectorStateDeleting), + Target: []string{}, + Refresh: statusConnector(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { + if state, stateDescription := output.ConnectorState, output.StateDescription; state == awstypes.ConnectorStateFailed && stateDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateDescription.Code), aws.ToString(stateDescription.Message))) + } + + return output, err + } + + return nil, err +} + +func expandCapacity(tfMap map[string]interface{}) *awstypes.Capacity { if tfMap == nil { return nil } - apiObject := &kafkaconnect.Capacity{} + apiObject := &awstypes.Capacity{} - if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.AutoScaling = expandAutoScaling(v[0].(map[string]interface{})) } - if v, ok := tfMap["provisioned_capacity"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["provisioned_capacity"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ProvisionedCapacity = expandProvisionedCapacity(v[0].(map[string]interface{})) } return apiObject } -func expandAutoScaling(tfMap map[string]interface{}) *kafkaconnect.AutoScaling { +func expandAutoScaling(tfMap map[string]interface{}) *awstypes.AutoScaling { if tfMap == nil { return nil } - apiObject := &kafkaconnect.AutoScaling{} + apiObject := &awstypes.AutoScaling{} if v, ok := tfMap["max_worker_count"].(int); ok && v != 0 { - apiObject.MaxWorkerCount = aws.Int64(int64(v)) + apiObject.MaxWorkerCount = int32(v) } if v, ok := tfMap["mcu_count"].(int); ok && v != 0 { - apiObject.McuCount = aws.Int64(int64(v)) + apiObject.McuCount = int32(v) } if v, ok := tfMap["min_worker_count"].(int); ok && v != 0 { - apiObject.MinWorkerCount = aws.Int64(int64(v)) + apiObject.MinWorkerCount = int32(v) } - if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ScaleInPolicy = expandScaleInPolicy(v[0].(map[string]interface{})) } - if v, ok := tfMap["scale_out_policy"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["scale_out_policy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ScaleOutPolicy = expandScaleOutPolicy(v[0].(map[string]interface{})) } return apiObject } -func expandScaleInPolicy(tfMap map[string]interface{}) *kafkaconnect.ScaleInPolicy { +func expandScaleInPolicy(tfMap map[string]interface{}) *awstypes.ScaleInPolicy { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ScaleInPolicy{} + apiObject := &awstypes.ScaleInPolicy{} if v, ok := tfMap["cpu_utilization_percentage"].(int); ok && v != 0 { - apiObject.CpuUtilizationPercentage = aws.Int64(int64(v)) + apiObject.CpuUtilizationPercentage = int32(v) } return apiObject } -func expandScaleOutPolicy(tfMap map[string]interface{}) *kafkaconnect.ScaleOutPolicy { +func expandScaleOutPolicy(tfMap map[string]interface{}) *awstypes.ScaleOutPolicy { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ScaleOutPolicy{} + apiObject := &awstypes.ScaleOutPolicy{} if v, ok := tfMap["cpu_utilization_percentage"].(int); ok && v != 0 { - apiObject.CpuUtilizationPercentage = aws.Int64(int64(v)) + apiObject.CpuUtilizationPercentage = int32(v) } return apiObject } -func expandProvisionedCapacity(tfMap map[string]interface{}) *kafkaconnect.ProvisionedCapacity { +func expandProvisionedCapacity(tfMap map[string]interface{}) *awstypes.ProvisionedCapacity { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ProvisionedCapacity{} + apiObject := &awstypes.ProvisionedCapacity{} if v, ok := tfMap["mcu_count"].(int); ok && v != 0 { - apiObject.McuCount = aws.Int64(int64(v)) + apiObject.McuCount = int32(v) } if v, ok := tfMap["worker_count"].(int); ok && v != 0 { - apiObject.WorkerCount = aws.Int64(int64(v)) + apiObject.WorkerCount = int32(v) } return apiObject } -func expandCapacityUpdate(tfMap map[string]interface{}) *kafkaconnect.CapacityUpdate { +func expandCapacityUpdate(tfMap map[string]interface{}) *awstypes.CapacityUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.CapacityUpdate{} + apiObject := &awstypes.CapacityUpdate{} - if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["autoscaling"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.AutoScaling = expandAutoScalingUpdate(v[0].(map[string]interface{})) } - if v, ok := tfMap["provisioned_capacity"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["provisioned_capacity"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ProvisionedCapacity = expandProvisionedCapacityUpdate(v[0].(map[string]interface{})) } return apiObject } -func expandAutoScalingUpdate(tfMap map[string]interface{}) *kafkaconnect.AutoScalingUpdate { +func expandAutoScalingUpdate(tfMap map[string]interface{}) *awstypes.AutoScalingUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.AutoScalingUpdate{} + apiObject := &awstypes.AutoScalingUpdate{} if v, ok := tfMap["max_worker_count"].(int); ok { - apiObject.MaxWorkerCount = aws.Int64(int64(v)) + apiObject.MaxWorkerCount = int32(v) } if v, ok := tfMap["mcu_count"].(int); ok { - apiObject.McuCount = aws.Int64(int64(v)) + apiObject.McuCount = int32(v) } if v, ok := tfMap["min_worker_count"].(int); ok { - apiObject.MinWorkerCount = aws.Int64(int64(v)) + apiObject.MinWorkerCount = int32(v) } - if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["scale_in_policy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ScaleInPolicy = expandScaleInPolicyUpdate(v[0].(map[string]interface{})) } - if v, ok := tfMap["scale_out_policy"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["scale_out_policy"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ScaleOutPolicy = expandScaleOutPolicyUpdate(v[0].(map[string]interface{})) } return apiObject } -func expandScaleInPolicyUpdate(tfMap map[string]interface{}) *kafkaconnect.ScaleInPolicyUpdate { +func expandScaleInPolicyUpdate(tfMap map[string]interface{}) *awstypes.ScaleInPolicyUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ScaleInPolicyUpdate{} + apiObject := &awstypes.ScaleInPolicyUpdate{} if v, ok := tfMap["cpu_utilization_percentage"].(int); ok { - apiObject.CpuUtilizationPercentage = aws.Int64(int64(v)) + apiObject.CpuUtilizationPercentage = int32(v) } return apiObject } -func expandScaleOutPolicyUpdate(tfMap map[string]interface{}) *kafkaconnect.ScaleOutPolicyUpdate { +func expandScaleOutPolicyUpdate(tfMap map[string]interface{}) *awstypes.ScaleOutPolicyUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ScaleOutPolicyUpdate{} + apiObject := &awstypes.ScaleOutPolicyUpdate{} if v, ok := tfMap["cpu_utilization_percentage"].(int); ok { - apiObject.CpuUtilizationPercentage = aws.Int64(int64(v)) + apiObject.CpuUtilizationPercentage = int32(v) } return apiObject } -func expandProvisionedCapacityUpdate(tfMap map[string]interface{}) *kafkaconnect.ProvisionedCapacityUpdate { +func expandProvisionedCapacityUpdate(tfMap map[string]interface{}) *awstypes.ProvisionedCapacityUpdate { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ProvisionedCapacityUpdate{} + apiObject := &awstypes.ProvisionedCapacityUpdate{} if v, ok := tfMap["mcu_count"].(int); ok { - apiObject.McuCount = aws.Int64(int64(v)) + apiObject.McuCount = int32(v) } if v, ok := tfMap["worker_count"].(int); ok { - apiObject.WorkerCount = aws.Int64(int64(v)) + apiObject.WorkerCount = int32(v) } return apiObject } -func expandCluster(tfMap map[string]interface{}) *kafkaconnect.KafkaCluster { +func expandCluster(tfMap map[string]interface{}) *awstypes.KafkaCluster { if tfMap == nil { return nil } - apiObject := &kafkaconnect.KafkaCluster{} + apiObject := &awstypes.KafkaCluster{} - if v, ok := tfMap["apache_kafka_cluster"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["apache_kafka_cluster"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.ApacheKafkaCluster = expandApacheCluster(v[0].(map[string]interface{})) } return apiObject } -func expandApacheCluster(tfMap map[string]interface{}) *kafkaconnect.ApacheKafkaCluster { +func expandApacheCluster(tfMap map[string]interface{}) *awstypes.ApacheKafkaCluster { if tfMap == nil { return nil } - apiObject := &kafkaconnect.ApacheKafkaCluster{} + apiObject := &awstypes.ApacheKafkaCluster{} if v, ok := tfMap["bootstrap_servers"].(string); ok && v != "" { apiObject.BootstrapServers = aws.String(v) } - if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["vpc"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Vpc = expandVPC(v[0].(map[string]interface{})) } return apiObject } -func expandVPC(tfMap map[string]interface{}) *kafkaconnect.Vpc { +func expandVPC(tfMap map[string]interface{}) *awstypes.Vpc { if tfMap == nil { return nil } - apiObject := &kafkaconnect.Vpc{} + apiObject := &awstypes.Vpc{} if v, ok := tfMap[names.AttrSecurityGroups].(*schema.Set); ok && v.Len() > 0 { - apiObject.SecurityGroups = flex.ExpandStringSet(v) + apiObject.SecurityGroups = flex.ExpandStringValueSet(v) } if v, ok := tfMap[names.AttrSubnets].(*schema.Set); ok && v.Len() > 0 { - apiObject.Subnets = flex.ExpandStringSet(v) + apiObject.Subnets = flex.ExpandStringValueSet(v) } return apiObject } -func expandClusterClientAuthentication(tfMap map[string]interface{}) *kafkaconnect.KafkaClusterClientAuthentication { +func expandClusterClientAuthentication(tfMap map[string]interface{}) *awstypes.KafkaClusterClientAuthentication { if tfMap == nil { return nil } - apiObject := &kafkaconnect.KafkaClusterClientAuthentication{} + apiObject := &awstypes.KafkaClusterClientAuthentication{} if v, ok := tfMap["authentication_type"].(string); ok && v != "" { - apiObject.AuthenticationType = aws.String(v) + apiObject.AuthenticationType = awstypes.KafkaClusterClientAuthenticationType(v) } return apiObject } -func expandClusterEncryptionInTransit(tfMap map[string]interface{}) *kafkaconnect.KafkaClusterEncryptionInTransit { +func expandClusterEncryptionInTransit(tfMap map[string]interface{}) *awstypes.KafkaClusterEncryptionInTransit { if tfMap == nil { return nil } - apiObject := &kafkaconnect.KafkaClusterEncryptionInTransit{} + apiObject := &awstypes.KafkaClusterEncryptionInTransit{} if v, ok := tfMap["encryption_type"].(string); ok && v != "" { - apiObject.EncryptionType = aws.String(v) + apiObject.EncryptionType = awstypes.KafkaClusterEncryptionInTransitType(v) } return apiObject } -func expandPlugin(tfMap map[string]interface{}) *kafkaconnect.Plugin { +func expandPlugin(tfMap map[string]interface{}) *awstypes.Plugin { if tfMap == nil { return nil } - apiObject := &kafkaconnect.Plugin{} + apiObject := &awstypes.Plugin{} - if v, ok := tfMap["custom_plugin"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["custom_plugin"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.CustomPlugin = expandCustomPlugin(v[0].(map[string]interface{})) } return apiObject } -func expandPlugins(tfList []interface{}) []*kafkaconnect.Plugin { +func expandPlugins(tfList []interface{}) []awstypes.Plugin { if len(tfList) == 0 { return nil } - var apiObjects []*kafkaconnect.Plugin + var apiObjects []awstypes.Plugin for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { continue } @@ -862,75 +966,75 @@ func expandPlugins(tfList []interface{}) []*kafkaconnect.Plugin { continue } - apiObjects = append(apiObjects, apiObject) + apiObjects = append(apiObjects, *apiObject) } return apiObjects } -func expandCustomPlugin(tfMap map[string]interface{}) *kafkaconnect.CustomPlugin { +func expandCustomPlugin(tfMap map[string]interface{}) *awstypes.CustomPlugin { if tfMap == nil { return nil } - apiObject := &kafkaconnect.CustomPlugin{} + apiObject := &awstypes.CustomPlugin{} if v, ok := tfMap[names.AttrARN].(string); ok && v != "" { apiObject.CustomPluginArn = aws.String(v) } if v, ok := tfMap["revision"].(int); ok && v != 0 { - apiObject.Revision = aws.Int64(int64(v)) + apiObject.Revision = int64(v) } return apiObject } -func expandLogDelivery(tfMap map[string]interface{}) *kafkaconnect.LogDelivery { +func expandLogDelivery(tfMap map[string]interface{}) *awstypes.LogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.LogDelivery{} + apiObject := &awstypes.LogDelivery{} - if v, ok := tfMap["worker_log_delivery"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["worker_log_delivery"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.WorkerLogDelivery = expandWorkerLogDelivery(v[0].(map[string]interface{})) } return apiObject } -func expandWorkerLogDelivery(tfMap map[string]interface{}) *kafkaconnect.WorkerLogDelivery { +func expandWorkerLogDelivery(tfMap map[string]interface{}) *awstypes.WorkerLogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.WorkerLogDelivery{} + apiObject := &awstypes.WorkerLogDelivery{} - if v, ok := tfMap[names.AttrCloudWatchLogs].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap[names.AttrCloudWatchLogs].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.CloudWatchLogs = expandCloudWatchLogsLogDelivery(v[0].(map[string]interface{})) } - if v, ok := tfMap["firehose"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["firehose"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.Firehose = expandFirehoseLogDelivery(v[0].(map[string]interface{})) } - if v, ok := tfMap["s3"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["s3"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.S3 = expandS3LogDelivery(v[0].(map[string]interface{})) } return apiObject } -func expandCloudWatchLogsLogDelivery(tfMap map[string]interface{}) *kafkaconnect.CloudWatchLogsLogDelivery { +func expandCloudWatchLogsLogDelivery(tfMap map[string]interface{}) *awstypes.CloudWatchLogsLogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.CloudWatchLogsLogDelivery{} + apiObject := &awstypes.CloudWatchLogsLogDelivery{} if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObject.Enabled = aws.Bool(v) + apiObject.Enabled = v } if v, ok := tfMap["log_group"].(string); ok && v != "" { @@ -940,37 +1044,37 @@ func expandCloudWatchLogsLogDelivery(tfMap map[string]interface{}) *kafkaconnect return apiObject } -func expandFirehoseLogDelivery(tfMap map[string]interface{}) *kafkaconnect.FirehoseLogDelivery { +func expandFirehoseLogDelivery(tfMap map[string]interface{}) *awstypes.FirehoseLogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.FirehoseLogDelivery{} + apiObject := &awstypes.FirehoseLogDelivery{} if v, ok := tfMap["delivery_stream"].(string); ok && v != "" { apiObject.DeliveryStream = aws.String(v) } if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObject.Enabled = aws.Bool(v) + apiObject.Enabled = v } return apiObject } -func expandS3LogDelivery(tfMap map[string]interface{}) *kafkaconnect.S3LogDelivery { +func expandS3LogDelivery(tfMap map[string]interface{}) *awstypes.S3LogDelivery { if tfMap == nil { return nil } - apiObject := &kafkaconnect.S3LogDelivery{} + apiObject := &awstypes.S3LogDelivery{} if v, ok := tfMap[names.AttrBucket].(string); ok && v != "" { apiObject.Bucket = aws.String(v) } if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObject.Enabled = aws.Bool(v) + apiObject.Enabled = v } if v, ok := tfMap[names.AttrPrefix].(string); ok && v != "" { @@ -980,15 +1084,15 @@ func expandS3LogDelivery(tfMap map[string]interface{}) *kafkaconnect.S3LogDelive return apiObject } -func expandWorkerConfiguration(tfMap map[string]interface{}) *kafkaconnect.WorkerConfiguration { +func expandWorkerConfiguration(tfMap map[string]interface{}) *awstypes.WorkerConfiguration { if tfMap == nil { return nil } - apiObject := &kafkaconnect.WorkerConfiguration{} + apiObject := &awstypes.WorkerConfiguration{} if v, ok := tfMap["revision"].(int); ok && v != 0 { - apiObject.Revision = aws.Int64(int64(v)) + apiObject.Revision = int64(v) } if v, ok := tfMap[names.AttrARN].(string); ok && v != "" { @@ -998,7 +1102,7 @@ func expandWorkerConfiguration(tfMap map[string]interface{}) *kafkaconnect.Worke return apiObject } -func flattenCapacityDescription(apiObject *kafkaconnect.CapacityDescription) map[string]interface{} { +func flattenCapacityDescription(apiObject *awstypes.CapacityDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1016,23 +1120,15 @@ func flattenCapacityDescription(apiObject *kafkaconnect.CapacityDescription) map return tfMap } -func flattenAutoScalingDescription(apiObject *kafkaconnect.AutoScalingDescription) map[string]interface{} { +func flattenAutoScalingDescription(apiObject *awstypes.AutoScalingDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.MaxWorkerCount; v != nil { - tfMap["max_worker_count"] = aws.Int64Value(v) - } - - if v := apiObject.McuCount; v != nil { - tfMap["mcu_count"] = aws.Int64Value(v) - } - - if v := apiObject.MinWorkerCount; v != nil { - tfMap["min_worker_count"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "max_worker_count": apiObject.MaxWorkerCount, + "mcu_count": apiObject.McuCount, + "min_worker_count": apiObject.MinWorkerCount, } if v := apiObject.ScaleInPolicy; v != nil { @@ -1046,53 +1142,44 @@ func flattenAutoScalingDescription(apiObject *kafkaconnect.AutoScalingDescriptio return tfMap } -func flattenScaleInPolicyDescription(apiObject *kafkaconnect.ScaleInPolicyDescription) map[string]interface{} { +func flattenScaleInPolicyDescription(apiObject *awstypes.ScaleInPolicyDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.CpuUtilizationPercentage; v != nil { - tfMap["cpu_utilization_percentage"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "cpu_utilization_percentage": apiObject.CpuUtilizationPercentage, } return tfMap } -func flattenScaleOutPolicyDescription(apiObject *kafkaconnect.ScaleOutPolicyDescription) map[string]interface{} { +func flattenScaleOutPolicyDescription(apiObject *awstypes.ScaleOutPolicyDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.CpuUtilizationPercentage; v != nil { - tfMap["cpu_utilization_percentage"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "cpu_utilization_percentage": apiObject.CpuUtilizationPercentage, } return tfMap } -func flattenProvisionedCapacityDescription(apiObject *kafkaconnect.ProvisionedCapacityDescription) map[string]interface{} { +func flattenProvisionedCapacityDescription(apiObject *awstypes.ProvisionedCapacityDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.McuCount; v != nil { - tfMap["mcu_count"] = aws.Int64Value(v) - } - - if v := apiObject.WorkerCount; v != nil { - tfMap["worker_count"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "mcu_count": apiObject.McuCount, + "worker_count": apiObject.WorkerCount, } return tfMap } -func flattenClusterDescription(apiObject *kafkaconnect.KafkaClusterDescription) map[string]interface{} { +func flattenClusterDescription(apiObject *awstypes.KafkaClusterDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1106,7 +1193,7 @@ func flattenClusterDescription(apiObject *kafkaconnect.KafkaClusterDescription) return tfMap } -func flattenApacheClusterDescription(apiObject *kafkaconnect.ApacheKafkaClusterDescription) map[string]interface{} { +func flattenApacheClusterDescription(apiObject *awstypes.ApacheKafkaClusterDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1114,7 +1201,7 @@ func flattenApacheClusterDescription(apiObject *kafkaconnect.ApacheKafkaClusterD tfMap := map[string]interface{}{} if v := apiObject.BootstrapServers; v != nil { - tfMap["bootstrap_servers"] = aws.StringValue(v) + tfMap["bootstrap_servers"] = aws.ToString(v) } if v := apiObject.Vpc; v != nil { @@ -1124,7 +1211,7 @@ func flattenApacheClusterDescription(apiObject *kafkaconnect.ApacheKafkaClusterD return tfMap } -func flattenVPCDescription(apiObject *kafkaconnect.VpcDescription) map[string]interface{} { +func flattenVPCDescription(apiObject *awstypes.VpcDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1132,45 +1219,41 @@ func flattenVPCDescription(apiObject *kafkaconnect.VpcDescription) map[string]in tfMap := map[string]interface{}{} if v := apiObject.SecurityGroups; v != nil { - tfMap[names.AttrSecurityGroups] = aws.StringValueSlice(v) + tfMap[names.AttrSecurityGroups] = v } if v := apiObject.Subnets; v != nil { - tfMap[names.AttrSubnets] = aws.StringValueSlice(v) + tfMap[names.AttrSubnets] = v } return tfMap } -func flattenClusterClientAuthenticationDescription(apiObject *kafkaconnect.KafkaClusterClientAuthenticationDescription) map[string]interface{} { +func flattenClusterClientAuthenticationDescription(apiObject *awstypes.KafkaClusterClientAuthenticationDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.AuthenticationType; v != nil { - tfMap["authentication_type"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "authentication_type": apiObject.AuthenticationType, } return tfMap } -func flattenClusterEncryptionInTransitDescription(apiObject *kafkaconnect.KafkaClusterEncryptionInTransitDescription) map[string]interface{} { +func flattenClusterEncryptionInTransitDescription(apiObject *awstypes.KafkaClusterEncryptionInTransitDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.EncryptionType; v != nil { - tfMap["encryption_type"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "encryption_type": apiObject.EncryptionType, } return tfMap } -func flattenPluginDescription(apiObject *kafkaconnect.PluginDescription) map[string]interface{} { +func flattenPluginDescription(apiObject *awstypes.PluginDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1184,7 +1267,7 @@ func flattenPluginDescription(apiObject *kafkaconnect.PluginDescription) map[str return tfMap } -func flattenPluginDescriptions(apiObjects []*kafkaconnect.PluginDescription) []interface{} { +func flattenPluginDescriptions(apiObjects []awstypes.PluginDescription) []interface{} { if len(apiObjects) == 0 { return nil } @@ -1192,35 +1275,29 @@ func flattenPluginDescriptions(apiObjects []*kafkaconnect.PluginDescription) []i var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - - tfList = append(tfList, flattenPluginDescription(apiObject)) + tfList = append(tfList, flattenPluginDescription(&apiObject)) } return tfList } -func flattenCustomPluginDescription(apiObject *kafkaconnect.CustomPluginDescription) map[string]interface{} { +func flattenCustomPluginDescription(apiObject *awstypes.CustomPluginDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.CustomPluginArn; v != nil { - tfMap[names.AttrARN] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "revision": apiObject.Revision, } - if v := apiObject.Revision; v != nil { - tfMap["revision"] = aws.Int64Value(v) + if v := apiObject.CustomPluginArn; v != nil { + tfMap[names.AttrARN] = aws.ToString(v) } return tfMap } -func flattenLogDeliveryDescription(apiObject *kafkaconnect.LogDeliveryDescription) map[string]interface{} { +func flattenLogDeliveryDescription(apiObject *awstypes.LogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1234,7 +1311,7 @@ func flattenLogDeliveryDescription(apiObject *kafkaconnect.LogDeliveryDescriptio return tfMap } -func flattenWorkerLogDeliveryDescription(apiObject *kafkaconnect.WorkerLogDeliveryDescription) map[string]interface{} { +func flattenWorkerLogDeliveryDescription(apiObject *awstypes.WorkerLogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -1256,77 +1333,69 @@ func flattenWorkerLogDeliveryDescription(apiObject *kafkaconnect.WorkerLogDelive return tfMap } -func flattenCloudWatchLogsLogDeliveryDescription(apiObject *kafkaconnect.CloudWatchLogsLogDeliveryDescription) map[string]interface{} { +func flattenCloudWatchLogsLogDeliveryDescription(apiObject *awstypes.CloudWatchLogsLogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Enabled; v != nil { - tfMap[names.AttrEnabled] = aws.BoolValue(v) + tfMap := map[string]interface{}{ + names.AttrEnabled: apiObject.Enabled, } if v := apiObject.LogGroup; v != nil { - tfMap["log_group"] = aws.StringValue(v) + tfMap["log_group"] = aws.ToString(v) } return tfMap } -func flattenFirehoseLogDeliveryDescription(apiObject *kafkaconnect.FirehoseLogDeliveryDescription) map[string]interface{} { +func flattenFirehoseLogDeliveryDescription(apiObject *awstypes.FirehoseLogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.DeliveryStream; v != nil { - tfMap["delivery_stream"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + names.AttrEnabled: apiObject.Enabled, } - if v := apiObject.Enabled; v != nil { - tfMap[names.AttrEnabled] = aws.BoolValue(v) + if v := apiObject.DeliveryStream; v != nil { + tfMap["delivery_stream"] = aws.ToString(v) } return tfMap } -func flattenS3LogDeliveryDescription(apiObject *kafkaconnect.S3LogDeliveryDescription) map[string]interface{} { +func flattenS3LogDeliveryDescription(apiObject *awstypes.S3LogDeliveryDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Bucket; v != nil { - tfMap[names.AttrBucket] = aws.StringValue(v) + tfMap := map[string]interface{}{ + names.AttrEnabled: apiObject.Enabled, } - if v := apiObject.Enabled; v != nil { - tfMap[names.AttrEnabled] = aws.BoolValue(v) + if v := apiObject.Bucket; v != nil { + tfMap[names.AttrBucket] = aws.ToString(v) } if v := apiObject.Prefix; v != nil { - tfMap[names.AttrPrefix] = aws.StringValue(v) + tfMap[names.AttrPrefix] = aws.ToString(v) } return tfMap } -func flattenWorkerConfigurationDescription(apiObject *kafkaconnect.WorkerConfigurationDescription) map[string]interface{} { +func flattenWorkerConfigurationDescription(apiObject *awstypes.WorkerConfigurationDescription) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Revision; v != nil { - tfMap["revision"] = aws.Int64Value(v) + tfMap := map[string]interface{}{ + "revision": apiObject.Revision, } if v := apiObject.WorkerConfigurationArn; v != nil { - tfMap[names.AttrARN] = aws.StringValue(v) + tfMap[names.AttrARN] = aws.ToString(v) } return tfMap diff --git a/internal/service/kafkaconnect/connector_data_source.go b/internal/service/kafkaconnect/connector_data_source.go index 7526daeb9f9..0e42ee45d7f 100644 --- a/internal/service/kafkaconnect/connector_data_source.go +++ b/internal/service/kafkaconnect/connector_data_source.go @@ -6,18 +6,22 @@ package kafkaconnect import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_mskconnect_connector") -func DataSourceConnector() *schema.Resource { +// @SDKDataSource("aws_mskconnect_connector", name="Connector") +// @Tags(identifierAttribute="arn") +func dataSourceConnector() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceConnectorRead, @@ -38,54 +42,66 @@ func DataSourceConnector() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchemaComputed(), }, } } func dataSourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + connector, err := findConnectorByName(ctx, conn, d.Get(names.AttrName).(string)) - name := d.Get(names.AttrName) - var output []*kafkaconnect.ConnectorSummary + if err != nil { + return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Connect Connector", err)) + } - err := conn.ListConnectorsPagesWithContext(ctx, &kafkaconnect.ListConnectorsInput{}, func(page *kafkaconnect.ListConnectorsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + arn := aws.ToString(connector.ConnectorArn) + d.SetId(arn) + d.Set(names.AttrARN, arn) + d.Set(names.AttrDescription, connector.ConnectorDescription) + d.Set(names.AttrName, connector.ConnectorName) + d.Set(names.AttrVersion, connector.CurrentVersion) - for _, v := range page.Connectors { - if aws.StringValue(v.ConnectorName) == name { - output = append(output, v) - } - } + return diags +} - return !lastPage - }) +func findConnector(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListConnectorsInput, filter tfslices.Predicate[*awstypes.ConnectorSummary]) (*awstypes.ConnectorSummary, error) { + output, err := findConnectors(ctx, conn, input, filter) if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Connect Connectors: %s", err) + return nil, err } - if len(output) == 0 || output[0] == nil { - err = tfresource.NewEmptyResultError(name) - } else if count := len(output); count > 1 { - err = tfresource.NewTooManyResultsError(count, name) - } + return tfresource.AssertSingleValueResult(output) +} - if err != nil { - return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Connect Connector", err)) - } +func findConnectors(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListConnectorsInput, filter tfslices.Predicate[*awstypes.ConnectorSummary]) ([]awstypes.ConnectorSummary, error) { + var output []awstypes.ConnectorSummary - connector := output[0] + pages := kafkaconnect.NewListConnectorsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) - d.SetId(aws.StringValue(connector.ConnectorArn)) + if err != nil { + return nil, err + } - d.Set(names.AttrARN, connector.ConnectorArn) - d.Set(names.AttrDescription, connector.ConnectorDescription) - d.Set(names.AttrName, connector.ConnectorName) - d.Set(names.AttrVersion, connector.CurrentVersion) + for _, v := range page.Connectors { + if filter(&v) { + output = append(output, v) + } + } + } - return diags + return output, nil +} + +func findConnectorByName(ctx context.Context, conn *kafkaconnect.Client, name string) (*awstypes.ConnectorSummary, error) { + input := &kafkaconnect.ListConnectorsInput{} + + return findConnector(ctx, conn, input, func(v *awstypes.ConnectorSummary) bool { + return aws.ToString(v.ConnectorName) == name + }) } diff --git a/internal/service/kafkaconnect/connector_data_source_test.go b/internal/service/kafkaconnect/connector_data_source_test.go index 6c7abd35e50..7cbe376cdcf 100644 --- a/internal/service/kafkaconnect/connector_data_source_test.go +++ b/internal/service/kafkaconnect/connector_data_source_test.go @@ -22,7 +22,6 @@ func TestAccKafkaConnectConnectorDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -32,6 +31,7 @@ func TestAccKafkaConnectConnectorDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, names.AttrDescription, dataSourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrName), resource.TestCheckResourceAttrPair(resourceName, names.AttrVersion, dataSourceName, names.AttrVersion), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), ), }, }, diff --git a/internal/service/kafkaconnect/connector_test.go b/internal/service/kafkaconnect/connector_test.go index 6ccfd01e66a..90a0d3197f5 100644 --- a/internal/service/kafkaconnect/connector_test.go +++ b/internal/service/kafkaconnect/connector_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafkaconnect" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,7 +24,7 @@ func TestAccKafkaConnectConnector_basic(t *testing.T) { resourceName := "aws_mskconnect_connector.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckConnectorDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -68,6 +67,8 @@ func TestAccKafkaConnectConnector_basic(t *testing.T) { "custom_plugin.#": acctest.Ct1, }), resource.TestCheckResourceAttrSet(resourceName, "service_execution_role_arn"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), resource.TestCheckResourceAttrSet(resourceName, names.AttrVersion), resource.TestCheckResourceAttr(resourceName, "worker_configuration.#", acctest.Ct0), ), @@ -87,7 +88,7 @@ func TestAccKafkaConnectConnector_disappears(t *testing.T) { resourceName := "aws_mskconnect_connector.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckConnectorDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -110,7 +111,7 @@ func TestAccKafkaConnectConnector_update(t *testing.T) { resourceName := "aws_mskconnect_connector.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckConnectorDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -229,6 +230,51 @@ func TestAccKafkaConnectConnector_update(t *testing.T) { }) } +func TestAccKafkaConnectConnector_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_mskconnect_connector.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), + CheckDestroy: testAccCheckConnectorDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccConnectorConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccConnectorConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccConnectorConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckConnectorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func testAccCheckConnectorExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -236,11 +282,7 @@ func testAccCheckConnectorExists(ctx context.Context, n string) resource.TestChe return fmt.Errorf("not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No MSK Connect Connector ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) _, err := tfkafkaconnect.FindConnectorByARN(ctx, conn, rs.Primary.ID) @@ -250,7 +292,7 @@ func testAccCheckConnectorExists(ctx context.Context, n string) resource.TestChe func testAccCheckConnectorDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_mskconnect_connector" { @@ -274,46 +316,8 @@ func testAccCheckConnectorDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccConnectorBaseConfig(rName string) string { - return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "10.10.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - vpc_id = aws_vpc.test.id - cidr_block = "10.10.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test2" { - vpc_id = aws_vpc.test.id - cidr_block = "10.10.2.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test3" { - vpc_id = aws_vpc.test.id - cidr_block = "10.10.3.0/24" - availability_zone = data.aws_availability_zones.available.names[2] - - tags = { - Name = %[1]q - } -} - +func testAccConnectorConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 3), fmt.Sprintf(` resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id name = %[1]q @@ -396,11 +400,11 @@ EOF resource "aws_msk_cluster" "test" { cluster_name = %[1]q - kafka_version = "2.2.1" + kafka_version = "2.7.1" number_of_broker_nodes = 3 broker_node_group_info { - client_subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + client_subnets = aws_subnet.test[*].id instance_type = "kafka.m5.large" security_groups = [aws_security_group.test.id] @@ -417,7 +421,7 @@ resource "aws_msk_cluster" "test" { func testAccConnectorConfig_basic(rName string) string { return acctest.ConfigCompose( testAccCustomPluginConfig_basic(rName), - testAccConnectorBaseConfig(rName), + testAccConnectorConfig_base(rName), fmt.Sprintf(` resource "aws_mskconnect_connector" "test" { name = %[1]q @@ -443,7 +447,7 @@ resource "aws_mskconnect_connector" "test" { vpc { security_groups = [aws_security_group.test.id] - subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + subnets = aws_subnet.test[*].id } } } @@ -465,6 +469,10 @@ resource "aws_mskconnect_connector" "test" { service_execution_role_arn = aws_iam_role.test.arn + tags = { + key1 = "value1" + } + depends_on = [aws_iam_role_policy.test, aws_vpc_endpoint.test] } `, rName)) @@ -474,7 +482,7 @@ func testAccConnectorConfig_allAttributes(rName string) string { return acctest.ConfigCompose( testAccCustomPluginConfig_basic(rName), testAccWorkerConfigurationConfig_basic(rName), - testAccConnectorBaseConfig(rName), + testAccConnectorConfig_base(rName), fmt.Sprintf(` resource "aws_cloudwatch_log_group" "test" { name = %[1]q @@ -513,7 +521,7 @@ resource "aws_mskconnect_connector" "test" { vpc { security_groups = [aws_security_group.test.id] - subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + subnets = aws_subnet.test[*].id } } } @@ -566,7 +574,7 @@ func testAccConnectorConfig_allAttributesCapacityUpdated(rName string) string { return acctest.ConfigCompose( testAccCustomPluginConfig_basic(rName), testAccWorkerConfigurationConfig_basic(rName), - testAccConnectorBaseConfig(rName), + testAccConnectorConfig_base(rName), fmt.Sprintf(` resource "aws_cloudwatch_log_group" "test" { name = %[1]q @@ -595,7 +603,7 @@ resource "aws_mskconnect_connector" "test" { vpc { security_groups = [aws_security_group.test.id] - subnets = [aws_subnet.test1.id, aws_subnet.test2.id, aws_subnet.test3.id] + subnets = aws_subnet.test[*].id } } } @@ -643,3 +651,124 @@ resource "aws_mskconnect_connector" "test" { } `, rName)) } + +func testAccConnectorConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose( + testAccCustomPluginConfig_basic(rName), + testAccConnectorConfig_base(rName), + fmt.Sprintf(` +resource "aws_mskconnect_connector" "test" { + name = %[1]q + + kafkaconnect_version = "2.7.1" + + capacity { + autoscaling { + min_worker_count = 1 + max_worker_count = 2 + } + } + + connector_configuration = { + "connector.class" = "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector" + "tasks.max" = "1" + "topics" = "t1" + } + + kafka_cluster { + apache_kafka_cluster { + bootstrap_servers = aws_msk_cluster.test.bootstrap_brokers_tls + + vpc { + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + } + } + } + + kafka_cluster_client_authentication { + authentication_type = "NONE" + } + + kafka_cluster_encryption_in_transit { + encryption_type = "TLS" + } + + plugin { + custom_plugin { + arn = aws_mskconnect_custom_plugin.test.arn + revision = aws_mskconnect_custom_plugin.test.latest_revision + } + } + + service_execution_role_arn = aws_iam_role.test.arn + + tags = { + %[2]q = %[3]q + } + + depends_on = [aws_iam_role_policy.test, aws_vpc_endpoint.test] +} +`, rName, tagKey1, tagValue1)) +} + +func testAccConnectorConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose( + testAccCustomPluginConfig_basic(rName), + testAccConnectorConfig_base(rName), + fmt.Sprintf(` +resource "aws_mskconnect_connector" "test" { + name = %[1]q + + kafkaconnect_version = "2.7.1" + + capacity { + autoscaling { + min_worker_count = 1 + max_worker_count = 2 + } + } + + connector_configuration = { + "connector.class" = "com.github.jcustenborder.kafka.connect.simulator.SimulatorSinkConnector" + "tasks.max" = "1" + "topics" = "t1" + } + + kafka_cluster { + apache_kafka_cluster { + bootstrap_servers = aws_msk_cluster.test.bootstrap_brokers_tls + + vpc { + security_groups = [aws_security_group.test.id] + subnets = aws_subnet.test[*].id + } + } + } + + kafka_cluster_client_authentication { + authentication_type = "NONE" + } + + kafka_cluster_encryption_in_transit { + encryption_type = "TLS" + } + + plugin { + custom_plugin { + arn = aws_mskconnect_custom_plugin.test.arn + revision = aws_mskconnect_custom_plugin.test.latest_revision + } + } + + service_execution_role_arn = aws_iam_role.test.arn + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } + + depends_on = [aws_iam_role_policy.test, aws_vpc_endpoint.test] +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/kafkaconnect/custom_plugin.go b/internal/service/kafkaconnect/custom_plugin.go index 2ec5438e504..f86c357c382 100644 --- a/internal/service/kafkaconnect/custom_plugin.go +++ b/internal/service/kafkaconnect/custom_plugin.go @@ -5,27 +5,33 @@ package kafkaconnect import ( "context" + "fmt" "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_mskconnect_custom_plugin") -func ResourceCustomPlugin() *schema.Resource { +// @SDKResource("aws_mskconnect_custom_plugin", name="Custom Plugin") +// @Tags(identifierAttribute="arn") +func resourceCustomPlugin() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceCustomPluginCreate, ReadWithoutTimeout: resourceCustomPluginRead, + UpdateWithoutTimeout: resourceCustomPluginUpdate, DeleteWithoutTimeout: resourceCustomPluginDelete, Importer: &schema.ResourceImporter{ @@ -43,10 +49,10 @@ func ResourceCustomPlugin() *schema.Resource { Computed: true, }, names.AttrContentType: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice(kafkaconnect.CustomPluginContentType_Values(), false), + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[awstypes.CustomPluginContentType](), }, names.AttrDescription: { Type: schema.TypeString, @@ -102,38 +108,39 @@ func ResourceCustomPlugin() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, + + CustomizeDiff: verify.SetTagsDiff, } } func resourceCustomPluginCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) name := d.Get(names.AttrName).(string) input := &kafkaconnect.CreateCustomPluginInput{ - ContentType: aws.String(d.Get(names.AttrContentType).(string)), + ContentType: awstypes.CustomPluginContentType(d.Get(names.AttrContentType).(string)), Location: expandCustomPluginLocation(d.Get(names.AttrLocation).([]interface{})[0].(map[string]interface{})), Name: aws.String(name), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk(names.AttrDescription); ok { input.Description = aws.String(v.(string)) } - log.Printf("[DEBUG] Creating MSK Connect Custom Plugin: %s", input) - output, err := conn.CreateCustomPluginWithContext(ctx, input) + output, err := conn.CreateCustomPlugin(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Connect Custom Plugin (%s): %s", name, err) } - d.SetId(aws.StringValue(output.CustomPluginArn)) + d.SetId(aws.ToString(output.CustomPluginArn)) - _, err = waitCustomPluginCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)) - - if err != nil { + if _, err := waitCustomPluginCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Custom Plugin (%s) create: %s", d.Id(), err) } @@ -142,10 +149,9 @@ func resourceCustomPluginCreate(ctx context.Context, d *schema.ResourceData, met func resourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - plugin, err := FindCustomPluginByARN(ctx, conn, d.Id()) + plugin, err := findCustomPluginByARN(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] MSK Connect Custom Plugin (%s) not found, removing from state", d.Id()) @@ -181,17 +187,24 @@ func resourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, meta return diags } -func resourceCustomPluginDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func resourceCustomPluginUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + // This update function is for updating tags only - there is no update action for this resource. + + return append(diags, resourceCustomPluginRead(ctx, d, meta)...) +} + +func resourceCustomPluginDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) log.Printf("[DEBUG] Deleting MSK Connect Custom Plugin: %s", d.Id()) - _, err := conn.DeleteCustomPluginWithContext(ctx, &kafkaconnect.DeleteCustomPluginInput{ + _, err := conn.DeleteCustomPlugin(ctx, &kafkaconnect.DeleteCustomPluginInput{ CustomPluginArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { + if errs.IsA[*awstypes.NotFoundException](err) { return diags } @@ -199,21 +212,98 @@ func resourceCustomPluginDelete(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "deleting MSK Connect Custom Plugin (%s): %s", d.Id(), err) } - _, err = waitCustomPluginDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) - - if err != nil { + if _, err := waitCustomPluginDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Custom Plugin (%s) delete: %s", d.Id(), err) } return diags } -func expandCustomPluginLocation(tfMap map[string]interface{}) *kafkaconnect.CustomPluginLocation { +func findCustomPluginByARN(ctx context.Context, conn *kafkaconnect.Client, arn string) (*kafkaconnect.DescribeCustomPluginOutput, error) { + input := &kafkaconnect.DescribeCustomPluginInput{ + CustomPluginArn: aws.String(arn), + } + + output, err := conn.DescribeCustomPlugin(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusCustomPlugin(ctx context.Context, conn *kafkaconnect.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findCustomPluginByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.CustomPluginState), nil + } +} + +func waitCustomPluginCreated(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.CustomPluginStateCreating), + Target: enum.Slice(awstypes.CustomPluginStateActive), + Refresh: statusCustomPlugin(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeCustomPluginOutput); ok { + if state, stateDescription := output.CustomPluginState, output.StateDescription; state == awstypes.CustomPluginStateCreateFailed && stateDescription != nil { + tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.ToString(stateDescription.Code), aws.ToString(stateDescription.Message))) + } + + return output, err + } + + return nil, err +} + +func waitCustomPluginDeleted(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.CustomPluginStateDeleting), + Target: []string{}, + Refresh: statusCustomPlugin(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeCustomPluginOutput); ok { + return output, err + } + + return nil, err +} + +func expandCustomPluginLocation(tfMap map[string]interface{}) *awstypes.CustomPluginLocation { if tfMap == nil { return nil } - apiObject := &kafkaconnect.CustomPluginLocation{} + apiObject := &awstypes.CustomPluginLocation{} if v, ok := tfMap["s3"].([]interface{}); ok && len(v) > 0 { apiObject.S3Location = expandS3Location(v[0].(map[string]interface{})) @@ -222,12 +312,12 @@ func expandCustomPluginLocation(tfMap map[string]interface{}) *kafkaconnect.Cust return apiObject } -func expandS3Location(tfMap map[string]interface{}) *kafkaconnect.S3Location { +func expandS3Location(tfMap map[string]interface{}) *awstypes.S3Location { if tfMap == nil { return nil } - apiObject := &kafkaconnect.S3Location{} + apiObject := &awstypes.S3Location{} if v, ok := tfMap["bucket_arn"].(string); ok && v != "" { apiObject.BucketArn = aws.String(v) @@ -244,7 +334,7 @@ func expandS3Location(tfMap map[string]interface{}) *kafkaconnect.S3Location { return apiObject } -func flattenCustomPluginLocationDescription(apiObject *kafkaconnect.CustomPluginLocationDescription) map[string]interface{} { +func flattenCustomPluginLocationDescription(apiObject *awstypes.CustomPluginLocationDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -258,7 +348,7 @@ func flattenCustomPluginLocationDescription(apiObject *kafkaconnect.CustomPlugin return tfMap } -func flattenS3LocationDescription(apiObject *kafkaconnect.S3LocationDescription) map[string]interface{} { +func flattenS3LocationDescription(apiObject *awstypes.S3LocationDescription) map[string]interface{} { if apiObject == nil { return nil } @@ -266,15 +356,15 @@ func flattenS3LocationDescription(apiObject *kafkaconnect.S3LocationDescription) tfMap := map[string]interface{}{} if v := apiObject.BucketArn; v != nil { - tfMap["bucket_arn"] = aws.StringValue(v) + tfMap["bucket_arn"] = aws.ToString(v) } if v := apiObject.FileKey; v != nil { - tfMap["file_key"] = aws.StringValue(v) + tfMap["file_key"] = aws.ToString(v) } if v := apiObject.ObjectVersion; v != nil { - tfMap["object_version"] = aws.StringValue(v) + tfMap["object_version"] = aws.ToString(v) } return tfMap diff --git a/internal/service/kafkaconnect/custom_plugin_data_source.go b/internal/service/kafkaconnect/custom_plugin_data_source.go index 792bd560029..ccde1fd7734 100644 --- a/internal/service/kafkaconnect/custom_plugin_data_source.go +++ b/internal/service/kafkaconnect/custom_plugin_data_source.go @@ -6,18 +6,22 @@ package kafkaconnect import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_mskconnect_custom_plugin") -func DataSourceCustomPlugin() *schema.Resource { +// @SDKDataSource("aws_mskconnect_custom_plugin", name="Custom Plugin") +// @Tags(identifierAttribute="arn") +func dataSourceCustomPlugin() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceCustomPluginRead, @@ -42,51 +46,24 @@ func DataSourceCustomPlugin() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchemaComputed(), }, } } func dataSourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - name := d.Get(names.AttrName) - var output []*kafkaconnect.CustomPluginSummary - - err := conn.ListCustomPluginsPagesWithContext(ctx, &kafkaconnect.ListCustomPluginsInput{}, func(page *kafkaconnect.ListCustomPluginsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.CustomPlugins { - if aws.StringValue(v.Name) == name { - output = append(output, v) - } - } - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Connect Custom Plugins: %s", err) - } - - if len(output) == 0 || output[0] == nil { - err = tfresource.NewEmptyResultError(name) - } else if count := len(output); count > 1 { - err = tfresource.NewTooManyResultsError(count, name) - } + plugin, err := findCustomPluginByName(ctx, conn, d.Get(names.AttrName).(string)) if err != nil { return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Connect Custom Plugin", err)) } - plugin := output[0] - - d.SetId(aws.StringValue(plugin.CustomPluginArn)) - - d.Set(names.AttrARN, plugin.CustomPluginArn) + arn := aws.ToString(plugin.CustomPluginArn) + d.SetId(arn) + d.Set(names.AttrARN, arn) d.Set(names.AttrDescription, plugin.Description) d.Set(names.AttrName, plugin.Name) d.Set(names.AttrState, plugin.CustomPluginState) @@ -99,3 +76,42 @@ func dataSourceCustomPluginRead(ctx context.Context, d *schema.ResourceData, met return diags } + +func findCustomPlugin(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListCustomPluginsInput, filter tfslices.Predicate[*awstypes.CustomPluginSummary]) (*awstypes.CustomPluginSummary, error) { + output, err := findCustomPlugins(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findCustomPlugins(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListCustomPluginsInput, filter tfslices.Predicate[*awstypes.CustomPluginSummary]) ([]awstypes.CustomPluginSummary, error) { + var output []awstypes.CustomPluginSummary + + pages := kafkaconnect.NewListCustomPluginsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.CustomPlugins { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func findCustomPluginByName(ctx context.Context, conn *kafkaconnect.Client, name string) (*awstypes.CustomPluginSummary, error) { + input := &kafkaconnect.ListCustomPluginsInput{} + + return findCustomPlugin(ctx, conn, input, func(v *awstypes.CustomPluginSummary) bool { + return aws.ToString(v.Name) == name + }) +} diff --git a/internal/service/kafkaconnect/custom_plugin_data_source_test.go b/internal/service/kafkaconnect/custom_plugin_data_source_test.go index b6ba398afc9..99e2112b156 100644 --- a/internal/service/kafkaconnect/custom_plugin_data_source_test.go +++ b/internal/service/kafkaconnect/custom_plugin_data_source_test.go @@ -23,7 +23,6 @@ func TestAccKafkaConnectCustomPluginDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -34,6 +33,7 @@ func TestAccKafkaConnectCustomPluginDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "latest_revision", dataSourceName, "latest_revision"), resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrName), resource.TestCheckResourceAttrPair(resourceName, names.AttrState, dataSourceName, names.AttrState), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), ), }, }, @@ -52,6 +52,10 @@ resource "aws_mskconnect_custom_plugin" "test" { file_key = aws_s3_object.test.key } } + + tags = { + key1 = "value1" + } } data "aws_mskconnect_custom_plugin" "test" { diff --git a/internal/service/kafkaconnect/custom_plugin_test.go b/internal/service/kafkaconnect/custom_plugin_test.go index 86a6fc1ee85..dc54884b6b6 100644 --- a/internal/service/kafkaconnect/custom_plugin_test.go +++ b/internal/service/kafkaconnect/custom_plugin_test.go @@ -8,7 +8,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafkaconnect" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -25,7 +24,7 @@ func TestAccKafkaConnectCustomPlugin_basic(t *testing.T) { resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -62,7 +61,7 @@ func TestAccKafkaConnectCustomPlugin_disappears(t *testing.T) { resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -85,7 +84,7 @@ func TestAccKafkaConnectCustomPlugin_description(t *testing.T) { resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -106,13 +105,58 @@ func TestAccKafkaConnectCustomPlugin_description(t *testing.T) { }) } +func TestAccKafkaConnectCustomPlugin_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_mskconnect_custom_plugin.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), + CheckDestroy: testAccCheckCustomPluginDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccCustomPluginConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPluginExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCustomPluginConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPluginExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccCustomPluginConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckCustomPluginExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func TestAccKafkaConnectCustomPlugin_objectVersion(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_mskconnect_custom_plugin.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), CheckDestroy: testAccCheckCustomPluginDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -140,11 +184,7 @@ func testAccCheckCustomPluginExists(ctx context.Context, name string) resource.T return fmt.Errorf("Not found: %s", name) } - if rs.Primary.ID == "" { - return fmt.Errorf("No MSK Connect Custom Plugin ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) _, err := tfkafkaconnect.FindCustomPluginByARN(ctx, conn, rs.Primary.ID) @@ -154,7 +194,7 @@ func testAccCheckCustomPluginExists(ctx context.Context, name string) resource.T func testAccCheckCustomPluginDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_mskconnect_custom_plugin" { @@ -251,3 +291,44 @@ resource "aws_mskconnect_custom_plugin" "test" { } `, rName)) } + +func testAccCustomPluginConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccCustomPluginBaseConfig(rName, false), fmt.Sprintf(` +resource "aws_mskconnect_custom_plugin" "test" { + name = %[1]q + content_type = "ZIP" + + location { + s3 { + bucket_arn = aws_s3_bucket.test.arn + file_key = aws_s3_object.test.key + } + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccCustomPluginConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccCustomPluginBaseConfig(rName, false), fmt.Sprintf(` +resource "aws_mskconnect_custom_plugin" "test" { + name = %[1]q + content_type = "ZIP" + + location { + s3 { + bucket_arn = aws_s3_bucket.test.arn + file_key = aws_s3_object.test.key + } + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/kafkaconnect/exports_test.go b/internal/service/kafkaconnect/exports_test.go new file mode 100644 index 00000000000..7b4d8ed191d --- /dev/null +++ b/internal/service/kafkaconnect/exports_test.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package kafkaconnect + +// Exports for use in tests only. +var ( + ResourceConnector = resourceConnector + ResourceCustomPlugin = resourceCustomPlugin + ResourceWorkerConfiguration = resourceWorkerConfiguration + + FindConnectorByARN = findConnectorByARN + FindCustomPluginByARN = findCustomPluginByARN + FindWorkerConfigurationByARN = findWorkerConfigurationByARN +) diff --git a/internal/service/kafkaconnect/find.go b/internal/service/kafkaconnect/find.go deleted file mode 100644 index 52cab9333a0..00000000000 --- a/internal/service/kafkaconnect/find.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafkaconnect - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func FindConnectorByARN(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) (*kafkaconnect.DescribeConnectorOutput, error) { - input := &kafkaconnect.DescribeConnectorInput{ - ConnectorArn: aws.String(arn), - } - - output, err := conn.DescribeConnectorWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - -func FindCustomPluginByARN(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) (*kafkaconnect.DescribeCustomPluginOutput, error) { - input := &kafkaconnect.DescribeCustomPluginInput{ - CustomPluginArn: aws.String(arn), - } - - output, err := conn.DescribeCustomPluginWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - -func FindWorkerConfigurationByARN(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { - input := &kafkaconnect.DescribeWorkerConfigurationInput{ - WorkerConfigurationArn: aws.String(arn), - } - - output, err := conn.DescribeWorkerConfigurationWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, kafkaconnect.ErrCodeNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} diff --git a/internal/service/kafkaconnect/generate.go b/internal/service/kafkaconnect/generate.go index aeb7bedb679..8d7866f815e 100644 --- a/internal/service/kafkaconnect/generate.go +++ b/internal/service/kafkaconnect/generate.go @@ -2,6 +2,7 @@ // SPDX-License-Identifier: MPL-2.0 //go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ServiceTagsMap -ListTags -UpdateTags -KVTValues -SkipTypesImp // ONLY generate directives and package declaration! Do not add anything else to this file. package kafkaconnect diff --git a/internal/service/kafkaconnect/service_endpoint_resolver_gen.go b/internal/service/kafkaconnect/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..ec9c3ea8ab5 --- /dev/null +++ b/internal/service/kafkaconnect/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package kafkaconnect + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ kafkaconnect_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver kafkaconnect_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: kafkaconnect_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params kafkaconnect_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up kafkaconnect endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*kafkaconnect_sdkv2.Options) { + return func(o *kafkaconnect_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/kafkaconnect/service_endpoints_gen_test.go b/internal/service/kafkaconnect/service_endpoints_gen_test.go index 724ce8ef02b..64f13b5747d 100644 --- a/internal/service/kafkaconnect/service_endpoints_gen_test.go +++ b/internal/service/kafkaconnect/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package kafkaconnect_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - kafkaconnect_sdkv1 "github.com/aws/aws-sdk-go/service/kafkaconnect" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := kafkaconnect_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(kafkaconnect_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), kafkaconnect_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := kafkaconnect_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(kafkaconnect_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), kafkaconnect_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.KafkaConnectConn(ctx) - - req, _ := client.ListConnectorsRequest(&kafkaconnect_sdkv1.ListConnectorsInput{}) + client := meta.KafkaConnectClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListConnectors(ctx, &kafkaconnect_sdkv2.ListConnectorsInput{}, + func(opts *kafkaconnect_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/kafkaconnect/service_package_gen.go b/internal/service/kafkaconnect/service_package_gen.go index cec5bb0353f..5a4f586bc5b 100644 --- a/internal/service/kafkaconnect/service_package_gen.go +++ b/internal/service/kafkaconnect/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package kafkaconnect import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - kafkaconnect_sdkv1 "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + kafkaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -28,16 +25,28 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceConnector, + Factory: dataSourceConnector, TypeName: "aws_mskconnect_connector", + Name: "Connector", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, { - Factory: DataSourceCustomPlugin, + Factory: dataSourceCustomPlugin, TypeName: "aws_mskconnect_custom_plugin", + Name: "Custom Plugin", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, { - Factory: DataSourceWorkerConfiguration, + Factory: dataSourceWorkerConfiguration, TypeName: "aws_mskconnect_worker_configuration", + Name: "Worker Configuration", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, } } @@ -45,16 +54,28 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceConnector, + Factory: resourceConnector, TypeName: "aws_mskconnect_connector", + Name: "Connector", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, { - Factory: ResourceCustomPlugin, + Factory: resourceCustomPlugin, TypeName: "aws_mskconnect_custom_plugin", + Name: "Custom Plugin", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, { - Factory: ResourceWorkerConfiguration, + Factory: resourceWorkerConfiguration, TypeName: "aws_mskconnect_worker_configuration", + Name: "Worker Configuration", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, }, } } @@ -63,25 +84,14 @@ func (p *servicePackage) ServicePackageName() string { return names.KafkaConnect } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*kafkaconnect_sdkv1.KafkaConnect, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*kafkaconnect_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return kafkaconnect_sdkv1.New(sess.Copy(&cfg)), nil + return kafkaconnect_sdkv2.NewFromConfig(cfg, + kafkaconnect_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/kafkaconnect/status.go b/internal/service/kafkaconnect/status.go deleted file mode 100644 index acb2f5e6596..00000000000 --- a/internal/service/kafkaconnect/status.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafkaconnect - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func statusConnectorState(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindConnectorByARN(ctx, conn, arn) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.ConnectorState), nil - } -} - -func statusCustomPluginState(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindCustomPluginByARN(ctx, conn, arn) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.CustomPluginState), nil - } -} diff --git a/internal/service/kafkaconnect/sweep.go b/internal/service/kafkaconnect/sweep.go index 37214bf32cc..8c957e6e321 100644 --- a/internal/service/kafkaconnect/sweep.go +++ b/internal/service/kafkaconnect/sweep.go @@ -7,11 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -27,6 +27,14 @@ func RegisterSweepers() { "aws_mskconnect_connector", }, }) + + resource.AddTestSweepers("aws_mskconnect_worker_configuration", &resource.Sweeper{ + Name: "aws_mskconnect_worker_configuration", + F: sweepWorkerConfigurations, + Dependencies: []string{ + "aws_mskconnect_connector", + }, + }) } func sweepConnectors(region string) error { @@ -35,33 +43,30 @@ func sweepConnectors(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.KafkaConnectConn(ctx) + conn := client.KafkaConnectClient(ctx) input := &kafkaconnect.ListConnectorsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListConnectorsPagesWithContext(ctx, input, func(page *kafkaconnect.ListConnectorsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := kafkaconnect.NewListConnectorsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping MSK Connect Connector sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing MSK Connect Connectors (%s): %w", region, err) } for _, v := range page.Connectors { - r := ResourceConnector() + r := resourceConnector() d := r.Data(nil) - d.SetId(aws.StringValue(v.ConnectorArn)) + d.SetId(aws.ToString(v.ConnectorArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping MSK Connect Connector sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing MSK Connect Connectors (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -79,39 +84,77 @@ func sweepCustomPlugins(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.KafkaConnectConn(ctx) + conn := client.KafkaConnectClient(ctx) input := &kafkaconnect.ListCustomPluginsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListCustomPluginsPagesWithContext(ctx, input, func(page *kafkaconnect.ListCustomPluginsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := kafkaconnect.NewListCustomPluginsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping MSK Connect Custom Plugin sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing MSK Connect Custom Plugins (%s): %w", region, err) } for _, v := range page.CustomPlugins { - r := ResourceCustomPlugin() + r := resourceCustomPlugin() d := r.Data(nil) - d.SetId(aws.StringValue(v.CustomPluginArn)) + d.SetId(aws.ToString(v.CustomPluginArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } + } - return !lastPage - }) + err = sweep.SweepOrchestrator(ctx, sweepResources) - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping MSK Connect Custom Plugin sweep for %s: %s", region, err) - return nil + if err != nil { + return fmt.Errorf("error sweeping MSK Connect Custom Plugins (%s): %w", region, err) } + return nil +} + +func sweepWorkerConfigurations(region string) error { + ctx := sweep.Context(region) + client, err := sweep.SharedRegionalSweepClient(ctx, region) if err != nil { - return fmt.Errorf("error listing MSK Connect Custom Plugins (%s): %w", region, err) + return fmt.Errorf("error getting client: %s", err) + } + conn := client.KafkaConnectClient(ctx) + input := &kafkaconnect.ListWorkerConfigurationsInput{} + sweepResources := make([]sweep.Sweepable, 0) + + pages := kafkaconnect.NewListWorkerConfigurationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping MSK Connect Worker Configuration sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing MSK Connect Worker Configurations (%s): %w", region, err) + } + + for _, v := range page.WorkerConfigurations { + r := resourceWorkerConfiguration() + d := r.Data(nil) + d.SetId(aws.ToString(v.WorkerConfigurationArn)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } } err = sweep.SweepOrchestrator(ctx, sweepResources) if err != nil { - return fmt.Errorf("error sweeping MSK Connect Custom Plugins (%s): %w", region, err) + return fmt.Errorf("error sweeping MSK Connect Worker Configurations (%s): %w", region, err) } return nil diff --git a/internal/service/kafkaconnect/tags_gen.go b/internal/service/kafkaconnect/tags_gen.go new file mode 100644 index 00000000000..27bd2906e04 --- /dev/null +++ b/internal/service/kafkaconnect/tags_gen.go @@ -0,0 +1,128 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package kafkaconnect + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists kafkaconnect service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *kafkaconnect.Client, identifier string, optFns ...func(*kafkaconnect.Options)) (tftags.KeyValueTags, error) { + input := &kafkaconnect.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), err + } + + return KeyValueTags(ctx, output.Tags), nil +} + +// ListTags lists kafkaconnect service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).KafkaConnectClient(ctx), identifier) + + if err != nil { + return err + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// map[string]string handling + +// Tags returns kafkaconnect service tags. +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() +} + +// KeyValueTags creates tftags.KeyValueTags from kafkaconnect service tags. +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { + return tftags.New(ctx, tags) +} + +// getTagsIn returns kafkaconnect service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) map[string]string { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets kafkaconnect service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) + } +} + +// updateTags updates kafkaconnect service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *kafkaconnect.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*kafkaconnect.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.KafkaConnect) + if len(removedTags) > 0 { + input := &kafkaconnect.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("untagging resource (%s): %w", identifier, err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.KafkaConnect) + if len(updatedTags) > 0 { + input := &kafkaconnect.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: Tags(updatedTags), + } + + _, err := conn.TagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// UpdateTags updates kafkaconnect service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).KafkaConnectClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/kafkaconnect/wait.go b/internal/service/kafkaconnect/wait.go deleted file mode 100644 index 944a30c5c44..00000000000 --- a/internal/service/kafkaconnect/wait.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kafkaconnect - -import ( - "context" - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" -) - -func waitConnectorCreated(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.ConnectorStateCreating}, - Target: []string{kafkaconnect.ConnectorStateRunning}, - Refresh: statusConnectorState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { - if state, stateDescription := aws.StringValue(output.ConnectorState), output.StateDescription; state == kafkaconnect.ConnectorStateFailed && stateDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateDescription.Code), aws.StringValue(stateDescription.Message))) - } - - return output, err - } - - return nil, err -} - -func waitConnectorDeleted(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.ConnectorStateDeleting}, - Target: []string{}, - Refresh: statusConnectorState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { - if state, stateDescription := aws.StringValue(output.ConnectorState), output.StateDescription; state == kafkaconnect.ConnectorStateFailed && stateDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateDescription.Code), aws.StringValue(stateDescription.Message))) - } - - return output, err - } - - return nil, err -} - -func waitConnectorUpdated(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeConnectorOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.ConnectorStateUpdating}, - Target: []string{kafkaconnect.ConnectorStateRunning}, - Refresh: statusConnectorState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeConnectorOutput); ok { - if state, stateDescription := aws.StringValue(output.ConnectorState), output.StateDescription; state == kafkaconnect.ConnectorStateFailed && stateDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateDescription.Code), aws.StringValue(stateDescription.Message))) - } - - return output, err - } - - return nil, err -} - -func waitCustomPluginCreated(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.CustomPluginStateCreating}, - Target: []string{kafkaconnect.CustomPluginStateActive}, - Refresh: statusCustomPluginState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeCustomPluginOutput); ok { - if state, stateDescription := aws.StringValue(output.CustomPluginState), output.StateDescription; state == kafkaconnect.CustomPluginStateCreateFailed && stateDescription != nil { - tfresource.SetLastError(err, fmt.Errorf("%s: %s", aws.StringValue(stateDescription.Code), aws.StringValue(stateDescription.Message))) - } - - return output, err - } - - return nil, err -} - -func waitCustomPluginDeleted(ctx context.Context, conn *kafkaconnect.KafkaConnect, arn string, timeout time.Duration) (*kafkaconnect.DescribeCustomPluginOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{kafkaconnect.CustomPluginStateDeleting}, - Target: []string{}, - Refresh: statusCustomPluginState(ctx, conn, arn), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*kafkaconnect.DescribeCustomPluginOutput); ok { - return output, err - } - - return nil, err -} diff --git a/internal/service/kafkaconnect/worker_configuration.go b/internal/service/kafkaconnect/worker_configuration.go index ced642449ad..c7c0c25ddeb 100644 --- a/internal/service/kafkaconnect/worker_configuration.go +++ b/internal/service/kafkaconnect/worker_configuration.go @@ -6,30 +6,43 @@ package kafkaconnect import ( "context" "log" + "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" itypes "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_mskconnect_worker_configuration") -func ResourceWorkerConfiguration() *schema.Resource { +// @SDKResource("aws_mskconnect_worker_configuration", name="Worker Configuration") +// @Tags(identifierAttribute="arn") +func resourceWorkerConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceWorkerConfigurationCreate, ReadWithoutTimeout: resourceWorkerConfigurationRead, - DeleteWithoutTimeout: schema.NoopContext, + UpdateWithoutTimeout: resourceWorkerConfigurationUpdate, + DeleteWithoutTimeout: resourceWorkerConfigurationDelete, Importer: &schema.ResourceImporter{ StateContext: schema.ImportStatePassthroughContext, }, + Timeouts: &schema.ResourceTimeout{ + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -62,43 +75,45 @@ func ResourceWorkerConfiguration() *schema.Resource { } }, }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), }, + + CustomizeDiff: verify.SetTagsDiff, } } func resourceWorkerConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) name := d.Get(names.AttrName).(string) input := &kafkaconnect.CreateWorkerConfigurationInput{ Name: aws.String(name), PropertiesFileContent: flex.StringValueToBase64String(d.Get("properties_file_content").(string)), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk(names.AttrDescription); ok { input.Description = aws.String(v.(string)) } - log.Printf("[DEBUG] Creating MSK Connect Worker Configuration: %s", input) - output, err := conn.CreateWorkerConfigurationWithContext(ctx, input) + output, err := conn.CreateWorkerConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating MSK Connect Worker Configuration (%s): %s", name, err) } - d.SetId(aws.StringValue(output.WorkerConfigurationArn)) + d.SetId(aws.ToString(output.WorkerConfigurationArn)) return append(diags, resourceWorkerConfigurationRead(ctx, d, meta)...) } func resourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - config, err := FindWorkerConfigurationByARN(ctx, conn, d.Id()) + config, err := findWorkerConfigurationByARN(ctx, conn, d.Id()) if tfresource.NotFound(err) && !d.IsNewResource() { log.Printf("[WARN] MSK Connect Worker Configuration (%s) not found, removing from state", d.Id()) @@ -116,7 +131,7 @@ func resourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData if config.LatestRevision != nil { d.Set("latest_revision", config.LatestRevision.Revision) - d.Set("properties_file_content", decodePropertiesFileContent(aws.StringValue(config.LatestRevision.PropertiesFileContent))) + d.Set("properties_file_content", decodePropertiesFileContent(aws.ToString(config.LatestRevision.PropertiesFileContent))) } else { d.Set("latest_revision", nil) d.Set("properties_file_content", nil) @@ -125,6 +140,96 @@ func resourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData return diags } +func resourceWorkerConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + // This update function is for updating tags only - there is no update action for this resource. + + return append(diags, resourceWorkerConfigurationRead(ctx, d, meta)...) +} + +func resourceWorkerConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) + + log.Printf("[DEBUG] Deleting MSK Connect Worker Configuration: %s", d.Id()) + _, err := conn.DeleteWorkerConfiguration(ctx, &kafkaconnect.DeleteWorkerConfigurationInput{ + WorkerConfigurationArn: aws.String(d.Id()), + }) + + if errs.IsA[*awstypes.NotFoundException](err) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting MSK Connect Worker Configuration (%s): %s", d.Id(), err) + } + + if _, err := waitWorkerConfigurationDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for MSK Connect Worker Configuration (%s) delete: %s", d.Id(), err) + } + + return diags +} + +func findWorkerConfigurationByARN(ctx context.Context, conn *kafkaconnect.Client, arn string) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { + input := &kafkaconnect.DescribeWorkerConfigurationInput{ + WorkerConfigurationArn: aws.String(arn), + } + + output, err := conn.DescribeWorkerConfiguration(ctx, input) + + if errs.IsA[*awstypes.NotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusWorkerConfiguration(ctx context.Context, conn *kafkaconnect.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findWorkerConfigurationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.WorkerConfigurationState), nil + } +} + +func waitWorkerConfigurationDeleted(ctx context.Context, conn *kafkaconnect.Client, arn string, timeout time.Duration) (*kafkaconnect.DescribeWorkerConfigurationOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.WorkerConfigurationStateDeleting), + Target: []string{}, + Refresh: statusWorkerConfiguration(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*kafkaconnect.DescribeWorkerConfigurationOutput); ok { + return output, err + } + + return nil, err +} + func decodePropertiesFileContent(content string) string { v, err := itypes.Base64Decode(content) if err != nil { diff --git a/internal/service/kafkaconnect/worker_configuration_data_source.go b/internal/service/kafkaconnect/worker_configuration_data_source.go index 6f993a75512..0086f510966 100644 --- a/internal/service/kafkaconnect/worker_configuration_data_source.go +++ b/internal/service/kafkaconnect/worker_configuration_data_source.go @@ -6,18 +6,22 @@ package kafkaconnect import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kafkaconnect" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/kafkaconnect" + awstypes "github.com/aws/aws-sdk-go-v2/service/kafkaconnect/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_mskconnect_worker_configuration") -func DataSourceWorkerConfiguration() *schema.Resource { +// @SDKDataSource("aws_mskconnect_worker_configuration", name="Worker Configuration") +// @Tags(identifierAttribute="arn") +func dataSourceWorkerConfiguration() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceWorkerConfigurationRead, @@ -42,62 +46,37 @@ func DataSourceWorkerConfiguration() *schema.Resource { Type: schema.TypeString, Computed: true, }, + names.AttrTags: tftags.TagsSchemaComputed(), }, } } func dataSourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).KafkaConnectClient(ctx) - conn := meta.(*conns.AWSClient).KafkaConnectConn(ctx) - - name := d.Get(names.AttrName) - var output []*kafkaconnect.WorkerConfigurationSummary - - err := conn.ListWorkerConfigurationsPagesWithContext(ctx, &kafkaconnect.ListWorkerConfigurationsInput{}, func(page *kafkaconnect.ListWorkerConfigurationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.WorkerConfigurations { - if aws.StringValue(v.Name) == name { - output = append(output, v) - } - } - - return !lastPage - }) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "listing MSK Connect Worker Configurations: %s", err) - } - - if len(output) == 0 || output[0] == nil { - err = tfresource.NewEmptyResultError(name) - } else if count := len(output); count > 1 { - err = tfresource.NewTooManyResultsError(count, name) - } + output, err := findWorkerConfigurationByName(ctx, conn, d.Get(names.AttrName).(string)) if err != nil { return sdkdiag.AppendFromErr(diags, tfresource.SingularDataSourceFindError("MSK Connect Worker Configuration", err)) } - arn := aws.StringValue(output[0].WorkerConfigurationArn) - config, err := FindWorkerConfigurationByARN(ctx, conn, arn) + arn := aws.ToString(output.WorkerConfigurationArn) + config, err := findWorkerConfigurationByARN(ctx, conn, arn) if err != nil { return sdkdiag.AppendErrorf(diags, "reading MSK Connect Worker Configuration (%s): %s", arn, err) } - d.SetId(aws.StringValue(config.Name)) - + name := aws.ToString(config.Name) + d.SetId(name) d.Set(names.AttrARN, config.WorkerConfigurationArn) d.Set(names.AttrDescription, config.Description) - d.Set(names.AttrName, config.Name) + d.Set(names.AttrName, name) if config.LatestRevision != nil { d.Set("latest_revision", config.LatestRevision.Revision) - d.Set("properties_file_content", decodePropertiesFileContent(aws.StringValue(config.LatestRevision.PropertiesFileContent))) + d.Set("properties_file_content", decodePropertiesFileContent(aws.ToString(config.LatestRevision.PropertiesFileContent))) } else { d.Set("latest_revision", nil) d.Set("properties_file_content", nil) @@ -105,3 +84,42 @@ func dataSourceWorkerConfigurationRead(ctx context.Context, d *schema.ResourceDa return diags } + +func findWorkerConfiguration(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListWorkerConfigurationsInput, filter tfslices.Predicate[*awstypes.WorkerConfigurationSummary]) (*awstypes.WorkerConfigurationSummary, error) { + output, err := findWorkerConfigurations(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findWorkerConfigurations(ctx context.Context, conn *kafkaconnect.Client, input *kafkaconnect.ListWorkerConfigurationsInput, filter tfslices.Predicate[*awstypes.WorkerConfigurationSummary]) ([]awstypes.WorkerConfigurationSummary, error) { + var output []awstypes.WorkerConfigurationSummary + + pages := kafkaconnect.NewListWorkerConfigurationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if err != nil { + return nil, err + } + + for _, v := range page.WorkerConfigurations { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func findWorkerConfigurationByName(ctx context.Context, conn *kafkaconnect.Client, name string) (*awstypes.WorkerConfigurationSummary, error) { + input := &kafkaconnect.ListWorkerConfigurationsInput{} + + return findWorkerConfiguration(ctx, conn, input, func(v *awstypes.WorkerConfigurationSummary) bool { + return aws.ToString(v.Name) == name + }) +} diff --git a/internal/service/kafkaconnect/worker_configuration_data_source_test.go b/internal/service/kafkaconnect/worker_configuration_data_source_test.go index fbff029753e..d7308c38de6 100644 --- a/internal/service/kafkaconnect/worker_configuration_data_source_test.go +++ b/internal/service/kafkaconnect/worker_configuration_data_source_test.go @@ -23,7 +23,6 @@ func TestAccKafkaConnectWorkerConfigurationDataSource_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -34,6 +33,7 @@ func TestAccKafkaConnectWorkerConfigurationDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "latest_revision", dataSourceName, "latest_revision"), resource.TestCheckResourceAttrPair(resourceName, names.AttrName, dataSourceName, names.AttrName), resource.TestCheckResourceAttrPair(resourceName, "properties_file_content", dataSourceName, "properties_file_content"), + resource.TestCheckResourceAttrPair(resourceName, names.AttrTags, dataSourceName, names.AttrTags), ), }, }, @@ -49,6 +49,10 @@ resource "aws_mskconnect_worker_configuration" "test" { key.converter=org.apache.kafka.connect.storage.StringConverter value.converter=org.apache.kafka.connect.storage.StringConverter EOF + + tags = { + key1 = "value1" + } } data "aws_mskconnect_worker_configuration" "test" { diff --git a/internal/service/kafkaconnect/worker_configuration_test.go b/internal/service/kafkaconnect/worker_configuration_test.go index 10123831093..4681029e31c 100644 --- a/internal/service/kafkaconnect/worker_configuration_test.go +++ b/internal/service/kafkaconnect/worker_configuration_test.go @@ -8,13 +8,13 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/kafkaconnect" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfkafkaconnect "github.com/hashicorp/terraform-provider-aws/internal/service/kafkaconnect" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -24,9 +24,9 @@ func TestAccKafkaConnectWorkerConfiguration_basic(t *testing.T) { resourceName := "aws_mskconnect_worker_configuration.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, + CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -48,15 +48,38 @@ func TestAccKafkaConnectWorkerConfiguration_basic(t *testing.T) { }) } +func TestAccKafkaConnectWorkerConfiguration_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_mskconnect_worker_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), + CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccWorkerConfigurationConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkerConfigurationExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfkafkaconnect.ResourceWorkerConfiguration(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccKafkaConnectWorkerConfiguration_description(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_mskconnect_worker_configuration.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, kafkaconnect.EndpointsID) }, + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), - CheckDestroy: nil, + CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { @@ -75,6 +98,51 @@ func TestAccKafkaConnectWorkerConfiguration_description(t *testing.T) { }) } +func TestAccKafkaConnectWorkerConfiguration_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_mskconnect_worker_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckPartitionHasService(t, names.KafkaConnectEndpointID) }, + ErrorCheck: acctest.ErrorCheck(t, names.KafkaConnectServiceID), + CheckDestroy: testAccCheckWorkerConfigurationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccWorkerConfigurationConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkerConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccWorkerConfigurationConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkerConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccWorkerConfigurationConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckWorkerConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + func testAccCheckWorkerConfigurationExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -82,11 +150,7 @@ func testAccCheckWorkerConfigurationExists(ctx context.Context, n string) resour return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No MSK Connect Worker Configuration ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) _, err := tfkafkaconnect.FindWorkerConfigurationByARN(ctx, conn, rs.Primary.ID) @@ -94,6 +158,32 @@ func testAccCheckWorkerConfigurationExists(ctx context.Context, n string) resour } } +func testAccCheckWorkerConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).KafkaConnectClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_mskconnect_worker_configuration" { + continue + } + + _, err := tfkafkaconnect.FindWorkerConfigurationByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("MSK Connect Worker Configuration %s still exists", rs.Primary.ID) + } + + return nil + } +} + func testAccWorkerConfigurationConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_mskconnect_worker_configuration" "test" { @@ -120,3 +210,38 @@ EOF } `, rName) } + +func testAccWorkerConfigurationConfig_tags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_mskconnect_worker_configuration" "test" { + name = %[1]q + + properties_file_content = <:'", d.Id()) + } + + policyName := parts[0] + policyType := parts[1] + + d.SetId(policyName) + d.Set("policy_type", policyType) + + return []*schema.ResourceData{d}, nil +} + +func findAccountPolicyByTwoPartKey(ctx context.Context, conn *cloudwatchlogs.Client, policyType types.PolicyType, policyName string) (*types.AccountPolicy, error) { + input := &cloudwatchlogs.DescribeAccountPoliciesInput{ + PolicyName: aws.String(policyName), + PolicyType: policyType, + } + + output, err := conn.DescribeAccountPolicies(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return tfresource.AssertSingleValueResult(output.AccountPolicies) +} diff --git a/internal/service/logs/account_policy_test.go b/internal/service/logs/account_policy_test.go new file mode 100644 index 00000000000..fc1be4c7744 --- /dev/null +++ b/internal/service/logs/account_policy_test.go @@ -0,0 +1,362 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package logs_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tflogs "github.com/hashicorp/terraform-provider-aws/internal/service/logs" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccLogsAccountPolicy_basicSubscriptionFilter(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cloudwatch_log_account_policy.test" + var accountPolicy types.AccountPolicy + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LogsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAccountPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAccountPolicyConfig_basicSubscriptionFilter(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccountPolicyExists(ctx, resourceName, &accountPolicy), + resource.TestCheckResourceAttr(resourceName, "policy_name", rName), + testAccCheckAccountHasSubscriptionFilterPolicy(resourceName, rName), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAccountPolicyImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccLogsAccountPolicy_basicDataProtection(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cloudwatch_log_account_policy.test" + var accountPolicy types.AccountPolicy + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LogsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAccountPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAccountPolicyConfig_basicDataProtection(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccountPolicyExists(ctx, resourceName, &accountPolicy), + resource.TestCheckResourceAttr(resourceName, "policy_name", rName), + resource.TestCheckResourceAttr(resourceName, "policy_type", "DATA_PROTECTION_POLICY"), + acctest.CheckResourceAttrEquivalentJSON(resourceName, "policy_document", ` +{ + "Name": "Test", + "Version": "2021-06-01", + "Statement": [ + { + "Sid": "Audit", + "DataIdentifier": [ + "arn:aws:dataprotection::aws:data-identifier/EmailAddress" + ], + "Operation": { + "Audit": { + "FindingsDestination": {} + } + } + }, + { + "Sid": "Redact", + "DataIdentifier": [ + "arn:aws:dataprotection::aws:data-identifier/EmailAddress" + ], + "Operation": { + "Deidentify": { + "MaskConfig": {} + } + } + } + ] +} +`), //lintignore:AWSAT005 + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAccountPolicyImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccLogsAccountPolicy_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_cloudwatch_log_account_policy.test" + var accountPolicy types.AccountPolicy + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LogsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAccountPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAccountPolicyConfig_basicDataProtection(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccountPolicyExists(ctx, resourceName, &accountPolicy), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tflogs.ResourceAccountPolicy(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccLogsAccountPolicy_selectionCriteria(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rSelectionCriteria := fmt.Sprintf("LogGroupName NOT IN [\"%s\"]", rName) + resourceName := "aws_cloudwatch_log_account_policy.test" + var accountPolicy types.AccountPolicy + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LogsServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAccountPolicyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAccountPolicyConfig_selectionCriteria(rName, rSelectionCriteria), + Check: resource.ComposeTestCheckFunc( + testAccCheckAccountPolicyExists(ctx, resourceName, &accountPolicy), + resource.TestCheckResourceAttr(resourceName, "policy_name", rName), + resource.TestCheckResourceAttr(resourceName, "selection_criteria", rSelectionCriteria), + ), + }, + { + ResourceName: resourceName, + ImportStateIdFunc: testAccAccountPolicyImportStateIDFunc(resourceName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAccountPolicyExists(ctx context.Context, n string, v *types.AccountPolicy) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).LogsClient(ctx) + + output, err := tflogs.FindAccountPolicyByTwoPartKey(ctx, conn, types.PolicyType(rs.Primary.Attributes["policy_type"]), rs.Primary.ID) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccAccountPolicyImportStateIDFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not found: %s", resourceName) + } + + policyName := rs.Primary.ID + policyType := rs.Primary.Attributes["policy_type"] + stateID := fmt.Sprintf("%s:%s", policyName, policyType) + + return stateID, nil + } +} + +func testAccCheckAccountPolicyDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).LogsClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_cloudwatch_log_account_policy" { + continue + } + + _, err := tflogs.FindAccountPolicyByTwoPartKey(ctx, conn, types.PolicyType(rs.Primary.Attributes["policy_type"]), rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("CloudWatch Logs Resource Policy still exists: %s", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckAccountHasSubscriptionFilterPolicy(resourceName string, rName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + expectedJSONTemplate := `{ + "DestinationArn": "arn:%s:lambda:%s:%s:function:%s", + "FilterPattern" : " ", + "Distribution" : "Random" + }` + expectedJSON := fmt.Sprintf(expectedJSONTemplate, acctest.Partition(), acctest.Region(), acctest.AccountID(), rName) + return acctest.CheckResourceAttrEquivalentJSON(resourceName, "policy_document", expectedJSON)(s) + } +} + +func testAccAccountPolicyConfig_lambdaBase(rName string) string { + return fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_cloudwatch_log_group" "test" { + name = %[1]q + retention_in_days = 1 +} + +resource "aws_iam_role" "test" { + name = %[1]q + + assume_role_policy = < 30720 || (len(value) == 0) { + errors = append(errors, fmt.Errorf("CloudWatch log account policy document must be between 1 and 30,720 characters.")) + } + if _, err := structure.NormalizeJsonString(v); err != nil { + errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err)) + } + return +} + func validLogGroupName(v interface{}, k string) (ws []string, errors []error) { value := v.(string) diff --git a/internal/service/lookoutmetrics/service_endpoint_resolver_gen.go b/internal/service/lookoutmetrics/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..4bba23d3d1d --- /dev/null +++ b/internal/service/lookoutmetrics/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package lookoutmetrics + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + lookoutmetrics_sdkv2 "github.com/aws/aws-sdk-go-v2/service/lookoutmetrics" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ lookoutmetrics_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver lookoutmetrics_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: lookoutmetrics_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params lookoutmetrics_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up lookoutmetrics endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*lookoutmetrics_sdkv2.Options) { + return func(o *lookoutmetrics_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/lookoutmetrics/service_endpoints_gen_test.go b/internal/service/lookoutmetrics/service_endpoints_gen_test.go index c954b142066..668ef9f66d8 100644 --- a/internal/service/lookoutmetrics/service_endpoints_gen_test.go +++ b/internal/service/lookoutmetrics/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := lookoutmetrics_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), lookoutmetrics_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := lookoutmetrics_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), lookoutmetrics_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/lookoutmetrics/service_package_gen.go b/internal/service/lookoutmetrics/service_package_gen.go index 6dd911d0b22..589afbfc397 100644 --- a/internal/service/lookoutmetrics/service_package_gen.go +++ b/internal/service/lookoutmetrics/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package lookoutmetrics @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" lookoutmetrics_sdkv2 "github.com/aws/aws-sdk-go-v2/service/lookoutmetrics" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*lookoutmetrics_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return lookoutmetrics_sdkv2.NewFromConfig(cfg, func(o *lookoutmetrics_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return lookoutmetrics_sdkv2.NewFromConfig(cfg, + lookoutmetrics_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/m2/application.go b/internal/service/m2/application.go index 6012c186a80..221abb4dd55 100644 --- a/internal/service/m2/application.go +++ b/internal/service/m2/application.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" @@ -36,8 +37,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @FrameworkResource(name="Application") +// @FrameworkResource("aws_m2_application", name="Application") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/m2;m2.GetApplicationOutput") func newApplicationResource(context.Context) (resource.ResourceWithConfigure, error) { r := &applicationResource{} @@ -149,29 +151,18 @@ func (r *applicationResource) Create(ctx context.Context, request resource.Creat conn := r.Meta().M2Client(ctx) name := data.Name.ValueString() - input := &m2.CreateApplicationInput{} - response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + input := m2.CreateApplicationInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) if response.Diagnostics.HasError() { return } - // AutoFlEx doesn't yet handle union types. - if !data.Definition.IsNull() { - definitionData, diags := data.Definition.ToPtr(ctx) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } - - input.Definition = expandDefinition(definitionData) - } - // Additional fields. input.ClientToken = aws.String(sdkid.UniqueId()) input.Tags = getTagsIn(ctx) outputRaw, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.AccessDeniedException](ctx, propagationTimeout, func() (interface{}, error) { - return conn.CreateApplication(ctx, input) + return conn.CreateApplication(ctx, &input) }, "does not have proper Trust Policy for M2 service") if err != nil { @@ -279,15 +270,10 @@ func (r *applicationResource) Update(ctx context.Context, request resource.Updat } if !new.Definition.Equal(old.Definition) { - // AutoFlEx doesn't yet handle union types. - if !new.Definition.IsNull() { - definitionData, diags := new.Definition.ToPtr(ctx) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } - - input.Definition = expandDefinition(definitionData) + d := fwflex.Expand(ctx, new.Definition, &input.Definition) + response.Diagnostics.Append(d...) + if response.Diagnostics.HasError() { + return } } @@ -396,11 +382,11 @@ func stopApplicationIfRunning(ctx context.Context, conn *m2.Client, id string, f } func findApplicationByID(ctx context.Context, conn *m2.Client, id string) (*m2.GetApplicationOutput, error) { - input := &m2.GetApplicationInput{ + input := m2.GetApplicationInput{ ApplicationId: aws.String(id), } - output, err := conn.GetApplication(ctx, input) + output, err := conn.GetApplication(ctx, &input) if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ @@ -421,12 +407,12 @@ func findApplicationByID(ctx context.Context, conn *m2.Client, id string) (*m2.G } func findApplicationVersionByTwoPartKey(ctx context.Context, conn *m2.Client, id string, version int32) (*m2.GetApplicationVersionOutput, error) { - input := &m2.GetApplicationVersionInput{ + input := m2.GetApplicationVersionInput{ ApplicationId: aws.String(id), ApplicationVersion: aws.Int32(version), } - output, err := conn.GetApplicationVersion(ctx, input) + output, err := conn.GetApplicationVersion(ctx, &input) if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ @@ -625,18 +611,22 @@ type definitionModel struct { S3Location types.String `tfsdk:"s3_location"` } -func expandDefinition(definitionData *definitionModel) awstypes.Definition { - if !definitionData.Content.IsNull() { - return &awstypes.DefinitionMemberContent{ - Value: definitionData.Content.ValueString(), +var ( + _ fwflex.Expander = definitionModel{} +) + +func (m definitionModel) Expand(ctx context.Context) (result any, diags diag.Diagnostics) { + switch { + case !m.Content.IsNull(): + result = &awstypes.DefinitionMemberContent{ + Value: m.Content.ValueString(), } - } - if !definitionData.S3Location.IsNull() { - return &awstypes.DefinitionMemberS3Location{ - Value: definitionData.S3Location.ValueString(), + case !m.S3Location.IsNull(): + result = &awstypes.DefinitionMemberS3Location{ + Value: m.S3Location.ValueString(), } } - return nil + return result, diags } diff --git a/internal/service/m2/application_tags_gen_test.go b/internal/service/m2/application_tags_gen_test.go new file mode 100644 index 00000000000..bcba32b73d1 --- /dev/null +++ b/internal/service/m2/application_tags_gen_test.go @@ -0,0 +1,1800 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package m2_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/m2" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccM2Application_tags(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_null(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + }, + }) +} + +func TestAccM2Application_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable("providervalue1updated"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact("providervalue1updated"), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact("providervalue1updated"), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable("providervalue1updated"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Application_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetApplicationOutput + resourceName := "aws_m2_application.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckApplicationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckApplicationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Application/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/internal/service/m2/application_test.go b/internal/service/m2/application_test.go index be3ceefc1b0..ba0f2673f8c 100644 --- a/internal/service/m2/application_test.go +++ b/internal/service/m2/application_test.go @@ -12,7 +12,11 @@ import ( "github.com/aws/aws-sdk-go-v2/service/m2" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfm2 "github.com/hashicorp/terraform-provider-aws/internal/service/m2" @@ -20,7 +24,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccM2Application_basic(t *testing.T) { +func TestAccM2Application_basic_Content(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -40,7 +44,7 @@ func TestAccM2Application_basic(t *testing.T) { CheckDestroy: testAccCheckApplicationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccApplicationConfig_basic(rName, "bluage"), + Config: testAccApplicationConfig_basic_Content(rName, "bluage"), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckApplicationExists(ctx, resourceName, &application), resource.TestCheckResourceAttrSet(resourceName, names.AttrApplicationID), @@ -54,8 +58,10 @@ func TestAccM2Application_basic(t *testing.T) { resource.TestCheckNoResourceAttr(resourceName, names.AttrKMSKeyID), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckNoResourceAttr(resourceName, names.AttrRoleARN), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, }, { ResourceName: resourceName, @@ -65,8 +71,7 @@ func TestAccM2Application_basic(t *testing.T) { }, }) } - -func TestAccM2Application_disappears(t *testing.T) { +func TestAccM2Application_basic_S3Location(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -79,70 +84,82 @@ func TestAccM2Application_disappears(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, names.M2EndpointID) - testAccApplicationPreCheck(ctx, t) + testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckApplicationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccApplicationConfig_basic(rName, "bluage"), - Check: resource.ComposeTestCheckFunc( + Config: testAccApplicationConfig_basic_S3Location(rName, "bluage"), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckApplicationExists(ctx, resourceName, &application), - acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfm2.ResourceApplication, resourceName), + resource.TestCheckResourceAttrSet(resourceName, names.AttrApplicationID), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "m2", regexache.MustCompile(`app/.+`)), + resource.TestCheckResourceAttr(resourceName, "current_version", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "definition.#", acctest.Ct1), + resource.TestCheckNoResourceAttr(resourceName, "definition.0.content"), + resource.TestMatchResourceAttr(resourceName, "definition.0.s3_location", regexache.MustCompile(`s3://[-a-z0-9]+/definition.json`)), + resource.TestCheckNoResourceAttr(resourceName, names.AttrDescription), + resource.TestCheckResourceAttr(resourceName, "engine_type", "bluage"), + resource.TestCheckNoResourceAttr(resourceName, names.AttrKMSKeyID), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckNoResourceAttr(resourceName, names.AttrRoleARN), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectEmptyPlan(), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("definition").AtSliceIndex(0).AtMapKey(names.AttrContent), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New("definition").AtSliceIndex(0).AtMapKey("s3_location"), knownvalue.StringRegexp(regexache.MustCompile(`s3://[-a-z0-9]+/definition.json`))), + }, + }, ExpectNonEmptyPlan: true, }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"definition"}, + }, }, }) } -func TestAccM2Application_tags(t *testing.T) { +func TestAccM2Application_disappears(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") } + var application m2.GetApplicationOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_m2_application.test" - var application m2.GetApplicationOutput resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.M2), + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.M2EndpointID) + testAccApplicationPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: resource.ComposeAggregateTestCheckFunc( - testAccCheckApplicationDestroy(ctx), - ), + CheckDestroy: testAccCheckApplicationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccApplicationConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccApplicationConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckApplicationExists(ctx, resourceName, &application), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccApplicationConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Config: testAccApplicationConfig_basic_Content(rName, "bluage"), Check: resource.ComposeTestCheckFunc( testAccCheckApplicationExists(ctx, resourceName, &application), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfm2.ResourceApplication, resourceName), ), + ExpectNonEmptyPlan: true, }, }, }) @@ -180,8 +197,10 @@ func TestAccM2Application_full(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, names.AttrKMSKeyID), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrRoleARN), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, }, { ResourceName: resourceName, @@ -295,12 +314,22 @@ func testAccApplicationPreCheck(ctx context.Context, t *testing.T) { } } -func testAccApplicationConfig_basic(rName, engineType string) string { +func testAccApplicationConfig_basic_Content(rName, engineType string) string { return testAccApplicationConfig_versioned(rName, engineType, 1, 1) } func testAccApplicationConfig_versioned(rName, engineType string, version, versions int) string { return fmt.Sprintf(` +resource "aws_m2_application" "test" { + name = %[1]q + engine_type = %[2]q + definition { + content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = %[3]d }) + } + + depends_on = [aws_s3_object.test] +} + resource "aws_s3_bucket" "test" { bucket = %[1]q } @@ -312,31 +341,50 @@ resource "aws_s3_object" "test" { key = "v${count.index + 1}/PlanetsDemo-v${count.index + 1}.zip" source = "test-fixtures/PlanetsDemo-v1.zip" } +`, rName, engineType, version, versions) +} +func testAccApplicationConfig_basic_S3Location(rName, engineType string) string { + return testAccApplicationConfig_S3Location_versioned(rName, engineType, 1, 1) +} + +func testAccApplicationConfig_S3Location_versioned(rName, engineType string, version, versions int) string { + return fmt.Sprintf(` resource "aws_m2_application" "test" { name = %[1]q engine_type = %[2]q definition { - content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = %[3]d }) + s3_location = "s3://${aws_s3_object.definition.bucket}/${aws_s3_object.definition.key}" } - depends_on = [aws_s3_object.test] -} -`, rName, engineType, version, versions) + depends_on = [ + aws_s3_object.application, + aws_s3_object.definition, + ] } -func testAccApplicationConfig_full(rName string) string { - return fmt.Sprintf(` resource "aws_s3_bucket" "test" { bucket = %[1]q } -resource "aws_s3_object" "test" { +resource "aws_s3_object" "application" { + count = %[4]d + bucket = aws_s3_bucket.test.id - key = "v1/PlanetsDemo-v1.zip" + key = "v${count.index + 1}/PlanetsDemo-v${count.index + 1}.zip" source = "test-fixtures/PlanetsDemo-v1.zip" } +resource "aws_s3_object" "definition" { + bucket = aws_s3_bucket.test.id + key = "definition.json" + content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = %[3]d }) +} +`, rName, engineType, version, versions) +} + +func testAccApplicationConfig_full(rName string) string { + return fmt.Sprintf(` resource "aws_m2_application" "test" { name = %[1]q engine_type = "bluage" @@ -350,6 +398,16 @@ resource "aws_m2_application" "test" { depends_on = [aws_s3_object.test, aws_iam_role_policy.test] } +resource "aws_s3_bucket" "test" { + bucket = %[1]q +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "v1/PlanetsDemo-v1.zip" + source = "test-fixtures/PlanetsDemo-v1.zip" +} + resource "aws_kms_key" "test" { description = %[1]q } @@ -391,60 +449,3 @@ resource "aws_iam_role_policy" "test" { } `, rName) } - -func testAccApplicationConfig_tags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_object" "test" { - bucket = aws_s3_bucket.test.id - key = "v1/PlanetsDemo-v1.zip" - source = "test-fixtures/PlanetsDemo-v1.zip" -} - -resource "aws_m2_application" "test" { - name = %[1]q - engine_type = "bluage" - definition { - content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = "v1" }) - } - - tags = { - %[2]q = %[3]q - } - - depends_on = [aws_s3_object.test] -} -`, rName, tagKey1, tagValue1) -} - -func testAccApplicationConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_s3_bucket" "test" { - bucket = %[1]q -} - -resource "aws_s3_object" "test" { - bucket = aws_s3_bucket.test.id - key = "v1/PlanetsDemo-v1.zip" - source = "test-fixtures/PlanetsDemo-v1.zip" -} - -resource "aws_m2_application" "test" { - name = %[1]q - engine_type = "bluage" - definition { - content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = "v1" }) - } - - tags = { - %[2]q = %[3]q - %[4]q = %[5]q - } - - depends_on = [aws_s3_object.test] -} -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) -} diff --git a/internal/service/m2/deployment.go b/internal/service/m2/deployment.go index a6abf897ebf..8eb29695ee6 100644 --- a/internal/service/m2/deployment.go +++ b/internal/service/m2/deployment.go @@ -30,7 +30,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @FrameworkResource(name="Deployment") +// @FrameworkResource("aws_m2_deployment", name="Deployment") func newDeploymentResource(context.Context) (resource.ResourceWithConfigure, error) { r := &deploymentResource{} diff --git a/internal/service/m2/environment.go b/internal/service/m2/environment.go index 79b640cc6f5..e0ab54314ad 100644 --- a/internal/service/m2/environment.go +++ b/internal/service/m2/environment.go @@ -42,8 +42,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @FrameworkResource(name="Environment") +// @FrameworkResource("aws_m2_environment", name="Environment") // @Tags(identifierAttribute="arn") +// @Testing(existsType="github.com/aws/aws-sdk-go-v2/service/m2;m2.GetEnvironmentOutput") func newEnvironmentResource(context.Context) (resource.ResourceWithConfigure, error) { r := &environmentResource{} @@ -278,34 +279,17 @@ func (r *environmentResource) Create(ctx context.Context, request resource.Creat conn := r.Meta().M2Client(ctx) name := data.Name.ValueString() - input := &m2.CreateEnvironmentInput{} - response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + input := m2.CreateEnvironmentInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, &input)...) if response.Diagnostics.HasError() { return } - // AutoFlEx doesn't yet handle union types. - if !data.StorageConfigurations.IsNull() { - storageConfigurationsData, diags := data.StorageConfigurations.ToSlice(ctx) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } - - storageConfigurations, diags := expandStorageConfigurations(ctx, storageConfigurationsData) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } - - input.StorageConfigurations = storageConfigurations - } - // Additional fields. input.ClientToken = aws.String(sdkid.UniqueId()) input.Tags = getTagsIn(ctx) - output, err := conn.CreateEnvironment(ctx, input) + output, err := conn.CreateEnvironment(ctx, &input) if err != nil { response.Diagnostics.AddError(fmt.Sprintf("creating Mainframe Modernization Environment (%s)", name), err.Error()) @@ -567,10 +551,12 @@ func waitEnvironmentUpdated(ctx context.Context, conn *m2.Client, id string, tim func waitEnvironmentDeleted(ctx context.Context, conn *m2.Client, id string, timeout time.Duration) (*m2.GetEnvironmentOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(awstypes.EnvironmentLifecycleAvailable, awstypes.EnvironmentLifecycleCreating, awstypes.EnvironmentLifecycleDeleting), - Target: []string{}, - Refresh: statusEnvironment(ctx, conn, id), - Timeout: timeout, + Pending: enum.Slice(awstypes.EnvironmentLifecycleAvailable, awstypes.EnvironmentLifecycleCreating, awstypes.EnvironmentLifecycleDeleting), + Target: []string{}, + Refresh: statusEnvironment(ctx, conn, id), + Timeout: timeout, + Delay: 4 * time.Minute, + MinTimeout: 10 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -623,6 +609,42 @@ type storageConfigurationModel struct { FSX fwtypes.ListNestedObjectValueOf[fsxStorageConfigurationModel] `tfsdk:"fsx"` } +func (m storageConfigurationModel) Expand(ctx context.Context) (result any, diags diag.Diagnostics) { + switch { + case !m.EFS.IsNull(): + efsStorageConfigurationData, d := m.EFS.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + var r awstypes.StorageConfigurationMemberEfs + diags.Append(fwflex.Expand(ctx, efsStorageConfigurationData, &r.Value)...) + if diags.HasError() { + return nil, diags + } + + return &r, diags + + case !m.FSX.IsNull(): + fsxStorageConfigurationData, d := m.FSX.ToPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return nil, diags + } + + var r awstypes.StorageConfigurationMemberFsx + diags.Append(fwflex.Expand(ctx, fsxStorageConfigurationData, &r.Value)...) + if diags.HasError() { + return nil, diags + } + + return &r, diags + } + + return nil, diags +} + type efsStorageConfigurationModel struct { FileSystemID types.String `tfsdk:"file_system_id"` MountPoint types.String `tfsdk:"mount_point"` @@ -637,46 +659,6 @@ type highAvailabilityConfigModel struct { DesiredCapacity types.Int64 `tfsdk:"desired_capacity"` } -func expandStorageConfigurations(ctx context.Context, storageConfigurationsData []*storageConfigurationModel) ([]awstypes.StorageConfiguration, diag.Diagnostics) { - var diags diag.Diagnostics - apiObjects := []awstypes.StorageConfiguration{} - - for _, item := range storageConfigurationsData { - if !item.EFS.IsNull() { - efsStorageConfigurationData, d := item.EFS.ToPtr(ctx) - diags.Append(d...) - if diags.HasError() { - return nil, diags - } - - apiObject := &awstypes.StorageConfigurationMemberEfs{} - diags.Append(fwflex.Expand(ctx, efsStorageConfigurationData, &apiObject.Value)...) - if diags.HasError() { - return nil, diags - } - - apiObjects = append(apiObjects, apiObject) - } - if !item.FSX.IsNull() { - fsxStorageConfigurationData, d := item.FSX.ToPtr(ctx) - diags.Append(d...) - if diags.HasError() { - return nil, diags - } - - apiObject := &awstypes.StorageConfigurationMemberFsx{} - diags.Append(fwflex.Expand(ctx, fsxStorageConfigurationData, &apiObject.Value)...) - if diags.HasError() { - return nil, diags - } - - apiObjects = append(apiObjects, apiObject) - } - } - - return apiObjects, diags -} - func flattenStorageConfigurations(ctx context.Context, apiObjects []awstypes.StorageConfiguration) ([]*storageConfigurationModel, diag.Diagnostics) { var diags diag.Diagnostics var storageConfigurationsData []*storageConfigurationModel diff --git a/internal/service/m2/environment_tags_gen_test.go b/internal/service/m2/environment_tags_gen_test.go new file mode 100644 index 00000000000..09404ebed43 --- /dev/null +++ b/internal/service/m2/environment_tags_gen_test.go @@ -0,0 +1,1800 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package m2_test + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/service/m2" + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccM2Environment_tags(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_null(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + }, + }) +} + +func TestAccM2Environment_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable("providervalue1updated"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact("providervalue1updated"), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact("providervalue1updated"), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable("providervalue1updated"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + t.Skip("Tags with null values are not correctly handled with the Plugin Framework") + + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey("computedkey1")), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccM2Environment_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + var v m2.GetEnvironmentOutput + resourceName := "aws_m2_environment.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.M2ServiceID), + CheckDestroy: testAccCheckEnvironmentDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEnvironmentExists(ctx, resourceName, &v), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags).AtMapKey(acctest.CtKey1)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Environment/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/internal/service/m2/environment_test.go b/internal/service/m2/environment_test.go index 1fc42f384b6..9012fc03a4b 100644 --- a/internal/service/m2/environment_test.go +++ b/internal/service/m2/environment_test.go @@ -12,7 +12,10 @@ import ( "github.com/aws/aws-sdk-go-v2/service/m2" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfm2 "github.com/hashicorp/terraform-provider-aws/internal/service/m2" @@ -58,10 +61,12 @@ func TestAccM2Environment_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, names.AttrPreferredMaintenanceWindow), resource.TestCheckResourceAttr(resourceName, names.AttrPubliclyAccessible, acctest.CtFalse), acctest.CheckResourceAttrGreaterThanValue(resourceName, "security_group_ids.#", 0), - resource.TestCheckResourceAttr(resourceName, "storage_configuration.#", acctest.Ct0), acctest.CheckResourceAttrGreaterThanValue(resourceName, "subnet_ids.#", 0), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("storage_configuration"), knownvalue.ListExact([]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, }, { ResourceName: resourceName, @@ -103,55 +108,6 @@ func TestAccM2Environment_disappears(t *testing.T) { }) } -func TestAccM2Environment_tags(t *testing.T) { - ctx := acctest.Context(t) - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_m2_environment.test" - var environment m2.GetEnvironmentOutput - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, names.M2), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckEnvironmentDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccEnvironmentConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), - Check: resource.ComposeTestCheckFunc( - testAccCheckEnvironmentExists(ctx, resourceName, &environment), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccEnvironmentConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckEnvironmentExists(ctx, resourceName, &environment), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - { - Config: testAccEnvironmentConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), - Check: resource.ComposeTestCheckFunc( - testAccCheckEnvironmentExists(ctx, resourceName, &environment), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), - ), - }, - }, - }) -} - func TestAccM2Environment_full(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -187,10 +143,12 @@ func TestAccM2Environment_full(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, names.AttrPreferredMaintenanceWindow), resource.TestCheckResourceAttr(resourceName, names.AttrPubliclyAccessible, acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "storage_configuration.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "subnet_ids.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("storage_configuration"), knownvalue.ListExact([]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, }, { ResourceName: resourceName, @@ -267,11 +225,20 @@ func TestAccM2Environment_efs(t *testing.T) { Config: testAccEnvironmentConfig_efsComplete(rName), Check: resource.ComposeTestCheckFunc( testAccCheckEnvironmentExists(ctx, resourceName, &environment), - resource.TestCheckResourceAttr(resourceName, "storage_configuration.#", acctest.Ct1), - resource.TestCheckResourceAttrSet(resourceName, "storage_configuration.0.efs.0.file_system_id"), - resource.TestCheckResourceAttr(resourceName, "storage_configuration.0.efs.0.mount_point", "/m2/mount/efsexample"), - resource.TestCheckResourceAttr(resourceName, "storage_configuration.0.fsx.#", acctest.Ct0), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("storage_configuration"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "efs": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrFileSystemID: knownvalue.NotNull(), // TODO: should be Pair, depends on https://github.com/hashicorp/terraform-plugin-testing/pull/330 + "mount_point": knownvalue.StringExact("/m2/mount/efsexample"), + }), + }), + "fsx": knownvalue.ListExact([]knownvalue.Check{}), + }), + })), + }, }, { ResourceName: resourceName, @@ -281,6 +248,7 @@ func TestAccM2Environment_efs(t *testing.T) { }, }) } + func TestAccM2Environment_fsx(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -304,11 +272,20 @@ func TestAccM2Environment_fsx(t *testing.T) { Config: testAccEnvironmentConfig_fsxComplete(rName), Check: resource.ComposeTestCheckFunc( testAccCheckEnvironmentExists(ctx, resourceName, &environment), - resource.TestCheckResourceAttr(resourceName, "storage_configuration.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "storage_configuration.0.efs.#", acctest.Ct0), - resource.TestCheckResourceAttrSet(resourceName, "storage_configuration.0.fsx.0.file_system_id"), - resource.TestCheckResourceAttr(resourceName, "storage_configuration.0.fsx.0.mount_point", "/m2/mount/fsxexample"), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("storage_configuration"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "efs": knownvalue.ListExact([]knownvalue.Check{}), + "fsx": knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + names.AttrFileSystemID: knownvalue.NotNull(), // TODO: should be Pair, depends on https://github.com/hashicorp/terraform-plugin-testing/pull/330 + "mount_point": knownvalue.StringExact("/m2/mount/fsxexample"), + }), + }), + }), + })), + }, }, { ResourceName: resourceName, @@ -484,6 +461,24 @@ resource "aws_m2_environment" "test" { func testAccEnvironmentConfig_efsComplete(rName string) string { return acctest.ConfigCompose(testAccEnvironmentConfig_base(rName), fmt.Sprintf(` +resource "aws_m2_environment" "test" { + name = %[1]q + engine_type = "bluage" + engine_version = "3.7.0" + instance_type = "M2.m5.large" + security_group_ids = [aws_security_group.test.id] + subnet_ids = aws_subnet.test[*].id + + storage_configuration { + efs { + file_system_id = aws_efs_file_system.test.id + mount_point = "/m2/mount/efsexample" + } + } + + depends_on = [aws_efs_mount_target.test] +} + resource "aws_efs_file_system" "test" { tags = { Name = %[1]q @@ -510,36 +505,11 @@ resource "aws_efs_mount_target" "test" { security_groups = [aws_security_group.test.id] } -resource "aws_m2_environment" "test" { - name = %[1]q - engine_type = "bluage" - engine_version = "3.7.0" - instance_type = "M2.m5.large" - security_group_ids = [aws_security_group.test.id] - subnet_ids = aws_subnet.test[*].id - - storage_configuration { - efs { - file_system_id = aws_efs_file_system.test.id - mount_point = "/m2/mount/efsexample" - } - } -} `, rName)) } func testAccEnvironmentConfig_fsxComplete(rName string) string { return acctest.ConfigCompose(testAccEnvironmentConfig_base(rName), fmt.Sprintf(` -resource "aws_fsx_lustre_file_system" "test" { - storage_capacity = 1200 - subnet_ids = [aws_subnet.test[0].id] - security_group_ids = [aws_security_group.test.id] - - tags = { - Name = %[1]q - } -} - resource "aws_m2_environment" "test" { name = %[1]q engine_type = "bluage" @@ -555,34 +525,15 @@ resource "aws_m2_environment" "test" { } } } -`, rName)) -} -func testAccEnvironmentConfig_tags1(rName, tagKey1, tagValue1 string) string { - return fmt.Sprintf(` -resource "aws_m2_environment" "test" { - engine_type = "microfocus" - instance_type = "M2.m5.large" - name = %[1]q - - tags = { - %[2]q = %[3]q - } -} -`, rName, tagKey1, tagValue1) -} - -func testAccEnvironmentConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return fmt.Sprintf(` -resource "aws_m2_environment" "test" { - engine_type = "microfocus" - instance_type = "M2.m5.large" - name = %[1]q +resource "aws_fsx_lustre_file_system" "test" { + storage_capacity = 1200 + subnet_ids = [aws_subnet.test[0].id] + security_group_ids = [aws_security_group.test.id] tags = { - %[2]q = %[3]q - %[4]q = %[5]q + Name = %[1]q } } -`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +`, rName)) } diff --git a/internal/service/m2/generate.go b/internal/service/m2/generate.go index e3b3812b734..e59337b86fc 100644 --- a/internal/service/m2/generate.go +++ b/internal/service/m2/generate.go @@ -3,6 +3,7 @@ //go:generate go run ../../generate/servicepackage/main.go //go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsMap -UpdateTags -KVTValues -AWSSDKVersion=2 -SkipTypesImp +//go:generate go run ../../generate/tagstests/main.go // ONLY generate directives and package declaration! Do not add anything else to this file package m2 diff --git a/internal/service/m2/service_endpoint_resolver_gen.go b/internal/service/m2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..b02504b1580 --- /dev/null +++ b/internal/service/m2/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package m2 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + m2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/m2" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ m2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver m2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: m2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params m2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up m2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*m2_sdkv2.Options) { + return func(o *m2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/m2/service_endpoints_gen_test.go b/internal/service/m2/service_endpoints_gen_test.go index fc2224ae028..3fe928c2ec9 100644 --- a/internal/service/m2/service_endpoints_gen_test.go +++ b/internal/service/m2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := m2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), m2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := m2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), m2_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/m2/service_package_gen.go b/internal/service/m2/service_package_gen.go index d69b95704e2..5d730add214 100644 --- a/internal/service/m2/service_package_gen.go +++ b/internal/service/m2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package m2 @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" m2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/m2" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -58,19 +57,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*m2_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return m2_sdkv2.NewFromConfig(cfg, func(o *m2_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return m2_sdkv2.NewFromConfig(cfg, + m2_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/m2/sweep.go b/internal/service/m2/sweep.go new file mode 100644 index 00000000000..a49ff5b959f --- /dev/null +++ b/internal/service/m2/sweep.go @@ -0,0 +1,77 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package m2 + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/m2" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/sweep" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/framework" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func RegisterSweepers() { + sweep.Register("aws_m2_application", sweepApplications) + + sweep.Register("aws_m2_environment", sweepEnvironments) +} + +func sweepApplications(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.M2Client(ctx) + + var sweepResources []sweep.Sweepable + + pages := m2.NewListApplicationsPaginator(conn, &m2.ListApplicationsInput{}) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if awsv2.SkipSweepError(err) { + tflog.Warn(ctx, "Skipping sweeper", map[string]any{ + "error": err.Error(), + }) + return nil, nil + } + if err != nil { + return nil, err + } + + for _, application := range page.Applications { + sweepResources = append(sweepResources, framework.NewSweepResource(newApplicationResource, client, + framework.NewAttribute(names.AttrID, aws.ToString(application.ApplicationId)))) + } + } + + return sweepResources, nil +} + +func sweepEnvironments(ctx context.Context, client *conns.AWSClient) ([]sweep.Sweepable, error) { + conn := client.M2Client(ctx) + + var sweepResources []sweep.Sweepable + + pages := m2.NewListEnvironmentsPaginator(conn, &m2.ListEnvironmentsInput{}) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + if awsv2.SkipSweepError(err) { + tflog.Warn(ctx, "Skipping sweeper", map[string]any{ + "error": err.Error(), + }) + return nil, nil + } + if err != nil { + return nil, err + } + + for _, environment := range page.Environments { + sweepResources = append(sweepResources, framework.NewSweepResource(newEnvironmentResource, client, + framework.NewAttribute(names.AttrID, aws.ToString(environment.EnvironmentId)))) + } + } + + return sweepResources, nil +} diff --git a/internal/service/m2/testdata/Application/tags/main_gen.tf b/internal/service/m2/testdata/Application/tags/main_gen.tf new file mode 100644 index 00000000000..cf2ce9d4da7 --- /dev/null +++ b/internal/service/m2/testdata/Application/tags/main_gen.tf @@ -0,0 +1,37 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_m2_application" "test" { + name = var.rName + engine_type = "bluage" + definition { + content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = "v1" }) + } + + tags = var.resource_tags + + depends_on = [aws_s3_object.test] +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "v1/PlanetsDemo-v1.zip" + source = "test-fixtures/PlanetsDemo-v1.zip" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/m2/testdata/Application/tagsComputed1/main_gen.tf b/internal/service/m2/testdata/Application/tagsComputed1/main_gen.tf new file mode 100644 index 00000000000..2fa4687d730 --- /dev/null +++ b/internal/service/m2/testdata/Application/tagsComputed1/main_gen.tf @@ -0,0 +1,41 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_m2_application" "test" { + name = var.rName + engine_type = "bluage" + definition { + content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = "v1" }) + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + } + + depends_on = [aws_s3_object.test] +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "v1/PlanetsDemo-v1.zip" + source = "test-fixtures/PlanetsDemo-v1.zip" +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/m2/testdata/Application/tagsComputed2/main_gen.tf b/internal/service/m2/testdata/Application/tagsComputed2/main_gen.tf new file mode 100644 index 00000000000..47016bbe88f --- /dev/null +++ b/internal/service/m2/testdata/Application/tagsComputed2/main_gen.tf @@ -0,0 +1,52 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_m2_application" "test" { + name = var.rName + engine_type = "bluage" + definition { + content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = "v1" }) + } + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } + + depends_on = [aws_s3_object.test] +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "v1/PlanetsDemo-v1.zip" + source = "test-fixtures/PlanetsDemo-v1.zip" +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/m2/testdata/Application/tags_defaults/main_gen.tf b/internal/service/m2/testdata/Application/tags_defaults/main_gen.tf new file mode 100644 index 00000000000..d4ca1c29a5e --- /dev/null +++ b/internal/service/m2/testdata/Application/tags_defaults/main_gen.tf @@ -0,0 +1,48 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_m2_application" "test" { + name = var.rName + engine_type = "bluage" + definition { + content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = "v1" }) + } + + tags = var.resource_tags + + depends_on = [aws_s3_object.test] +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "v1/PlanetsDemo-v1.zip" + source = "test-fixtures/PlanetsDemo-v1.zip" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/m2/testdata/Application/tags_ignore/main_gen.tf b/internal/service/m2/testdata/Application/tags_ignore/main_gen.tf new file mode 100644 index 00000000000..d1cc30d6079 --- /dev/null +++ b/internal/service/m2/testdata/Application/tags_ignore/main_gen.tf @@ -0,0 +1,57 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_m2_application" "test" { + name = var.rName + engine_type = "bluage" + definition { + content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = "v1" }) + } + + tags = var.resource_tags + + depends_on = [aws_s3_object.test] +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "v1/PlanetsDemo-v1.zip" + source = "test-fixtures/PlanetsDemo-v1.zip" +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/m2/testdata/Environment/tags/main_gen.tf b/internal/service/m2/testdata/Environment/tags/main_gen.tf new file mode 100644 index 00000000000..3448c0711df --- /dev/null +++ b/internal/service/m2/testdata/Environment/tags/main_gen.tf @@ -0,0 +1,23 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_m2_environment" "test" { + name = var.rName + engine_type = "microfocus" + instance_type = "M2.m5.large" + + tags = var.resource_tags +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} diff --git a/internal/service/m2/testdata/Environment/tagsComputed1/main_gen.tf b/internal/service/m2/testdata/Environment/tagsComputed1/main_gen.tf new file mode 100644 index 00000000000..c4ed8477242 --- /dev/null +++ b/internal/service/m2/testdata/Environment/tagsComputed1/main_gen.tf @@ -0,0 +1,27 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_m2_environment" "test" { + name = var.rName + engine_type = "microfocus" + instance_type = "M2.m5.large" + + tags = { + (var.unknownTagKey) = null_resource.test.id + } +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} diff --git a/internal/service/m2/testdata/Environment/tagsComputed2/main_gen.tf b/internal/service/m2/testdata/Environment/tagsComputed2/main_gen.tf new file mode 100644 index 00000000000..3dd9e972bdb --- /dev/null +++ b/internal/service/m2/testdata/Environment/tagsComputed2/main_gen.tf @@ -0,0 +1,38 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "null" {} + +resource "aws_m2_environment" "test" { + name = var.rName + engine_type = "microfocus" + instance_type = "M2.m5.large" + + tags = { + (var.unknownTagKey) = null_resource.test.id + (var.knownTagKey) = var.knownTagValue + } +} + +resource "null_resource" "test" {} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "unknownTagKey" { + type = string + nullable = false +} + +variable "knownTagKey" { + type = string + nullable = false +} + +variable "knownTagValue" { + type = string + nullable = false +} diff --git a/internal/service/m2/testdata/Environment/tags_defaults/main_gen.tf b/internal/service/m2/testdata/Environment/tags_defaults/main_gen.tf new file mode 100644 index 00000000000..610e2af5c17 --- /dev/null +++ b/internal/service/m2/testdata/Environment/tags_defaults/main_gen.tf @@ -0,0 +1,34 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } +} + +resource "aws_m2_environment" "test" { + name = var.rName + engine_type = "microfocus" + instance_type = "M2.m5.large" + + tags = var.resource_tags +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = false +} diff --git a/internal/service/m2/testdata/Environment/tags_ignore/main_gen.tf b/internal/service/m2/testdata/Environment/tags_ignore/main_gen.tf new file mode 100644 index 00000000000..ca0d96359a3 --- /dev/null +++ b/internal/service/m2/testdata/Environment/tags_ignore/main_gen.tf @@ -0,0 +1,43 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +provider "aws" { + default_tags { + tags = var.provider_tags + } + ignore_tags { + keys = var.ignore_tag_keys + } +} + +resource "aws_m2_environment" "test" { + name = var.rName + engine_type = "microfocus" + instance_type = "M2.m5.large" + + tags = var.resource_tags +} + +variable "rName" { + description = "Name for resource" + type = string + nullable = false +} + +variable "resource_tags" { + description = "Tags to set on resource. To specify no tags, set to `null`" + # Not setting a default, so that this must explicitly be set to `null` to specify no tags + type = map(string) + nullable = true +} + +variable "provider_tags" { + type = map(string) + nullable = true + default = null +} + +variable "ignore_tag_keys" { + type = set(string) + nullable = false +} diff --git a/internal/service/m2/testdata/tmpl/application_tags.gtpl b/internal/service/m2/testdata/tmpl/application_tags.gtpl new file mode 100644 index 00000000000..9b517da3177 --- /dev/null +++ b/internal/service/m2/testdata/tmpl/application_tags.gtpl @@ -0,0 +1,20 @@ +resource "aws_m2_application" "test" { + name = var.rName + engine_type = "bluage" + definition { + content = templatefile("test-fixtures/application-definition.json", { s3_bucket = aws_s3_bucket.test.id, version = "v1" }) + } +{{- template "tags" . }} + + depends_on = [aws_s3_object.test] +} + +resource "aws_s3_bucket" "test" { + bucket = var.rName +} + +resource "aws_s3_object" "test" { + bucket = aws_s3_bucket.test.id + key = "v1/PlanetsDemo-v1.zip" + source = "test-fixtures/PlanetsDemo-v1.zip" +} diff --git a/internal/service/m2/testdata/tmpl/environment_tags.gtpl b/internal/service/m2/testdata/tmpl/environment_tags.gtpl new file mode 100644 index 00000000000..04085d30fbf --- /dev/null +++ b/internal/service/m2/testdata/tmpl/environment_tags.gtpl @@ -0,0 +1,6 @@ +resource "aws_m2_environment" "test" { + name = var.rName + engine_type = "microfocus" + instance_type = "M2.m5.large" +{{- template "tags" . }} +} diff --git a/internal/service/macie2/service_endpoint_resolver_gen.go b/internal/service/macie2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..951f8e931d2 --- /dev/null +++ b/internal/service/macie2/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package macie2 + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/macie2/service_endpoints_gen_test.go b/internal/service/macie2/service_endpoints_gen_test.go index 044f0d68313..f2ad02f5d18 100644 --- a/internal/service/macie2/service_endpoints_gen_test.go +++ b/internal/service/macie2/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(macie2_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(macie2_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/macie2/service_package_gen.go b/internal/service/macie2/service_package_gen.go index c59dc5236ff..eae4bd32aee 100644 --- a/internal/service/macie2/service_package_gen.go +++ b/internal/service/macie2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package macie2 @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" macie2_sdkv1 "github.com/aws/aws-sdk-go/service/macie2" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -89,11 +88,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*m "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return macie2_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/mediaconnect/service_endpoint_resolver_gen.go b/internal/service/mediaconnect/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..edce80ae4b4 --- /dev/null +++ b/internal/service/mediaconnect/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package mediaconnect + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + mediaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediaconnect" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ mediaconnect_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver mediaconnect_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: mediaconnect_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params mediaconnect_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up mediaconnect endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*mediaconnect_sdkv2.Options) { + return func(o *mediaconnect_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/mediaconnect/service_endpoints_gen_test.go b/internal/service/mediaconnect/service_endpoints_gen_test.go index a1818cb2713..f78b29f92bf 100644 --- a/internal/service/mediaconnect/service_endpoints_gen_test.go +++ b/internal/service/mediaconnect/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := mediaconnect_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediaconnect_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := mediaconnect_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediaconnect_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/mediaconnect/service_package_gen.go b/internal/service/mediaconnect/service_package_gen.go index d7ff980f452..20f3febc209 100644 --- a/internal/service/mediaconnect/service_package_gen.go +++ b/internal/service/mediaconnect/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package mediaconnect @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" mediaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediaconnect" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*mediaconnect_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return mediaconnect_sdkv2.NewFromConfig(cfg, func(o *mediaconnect_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return mediaconnect_sdkv2.NewFromConfig(cfg, + mediaconnect_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/mediaconvert/service_endpoint_resolver_gen.go b/internal/service/mediaconvert/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f18489f36d9 --- /dev/null +++ b/internal/service/mediaconvert/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package mediaconvert + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + mediaconvert_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediaconvert" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ mediaconvert_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver mediaconvert_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: mediaconvert_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params mediaconvert_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up mediaconvert endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*mediaconvert_sdkv2.Options) { + return func(o *mediaconvert_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/mediaconvert/service_endpoints_gen_test.go b/internal/service/mediaconvert/service_endpoints_gen_test.go index 09f742a21f4..ea5b7f7e00c 100644 --- a/internal/service/mediaconvert/service_endpoints_gen_test.go +++ b/internal/service/mediaconvert/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := mediaconvert_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediaconvert_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := mediaconvert_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediaconvert_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/mediaconvert/service_package_gen.go b/internal/service/mediaconvert/service_package_gen.go index 47233f70ce1..dbd9672aef7 100644 --- a/internal/service/mediaconvert/service_package_gen.go +++ b/internal/service/mediaconvert/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package mediaconvert @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" mediaconvert_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediaconvert" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -57,19 +56,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*mediaconvert_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return mediaconvert_sdkv2.NewFromConfig(cfg, func(o *mediaconvert_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return mediaconvert_sdkv2.NewFromConfig(cfg, + mediaconvert_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/medialive/service_endpoint_resolver_gen.go b/internal/service/medialive/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..fbaf32ec2b4 --- /dev/null +++ b/internal/service/medialive/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package medialive + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + medialive_sdkv2 "github.com/aws/aws-sdk-go-v2/service/medialive" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ medialive_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver medialive_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: medialive_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params medialive_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up medialive endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*medialive_sdkv2.Options) { + return func(o *medialive_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/medialive/service_endpoints_gen_test.go b/internal/service/medialive/service_endpoints_gen_test.go index b39c210c172..b90ee39c6cd 100644 --- a/internal/service/medialive/service_endpoints_gen_test.go +++ b/internal/service/medialive/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := medialive_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), medialive_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := medialive_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), medialive_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/medialive/service_package_gen.go b/internal/service/medialive/service_package_gen.go index 477c0926706..5fdc269e9a5 100644 --- a/internal/service/medialive/service_package_gen.go +++ b/internal/service/medialive/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package medialive @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" medialive_sdkv2 "github.com/aws/aws-sdk-go-v2/service/medialive" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -81,19 +80,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*medialive_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return medialive_sdkv2.NewFromConfig(cfg, func(o *medialive_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return medialive_sdkv2.NewFromConfig(cfg, + medialive_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/mediapackage/service_endpoint_resolver_gen.go b/internal/service/mediapackage/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..10e45dc9536 --- /dev/null +++ b/internal/service/mediapackage/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package mediapackage + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + mediapackage_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediapackage" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ mediapackage_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver mediapackage_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: mediapackage_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params mediapackage_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up mediapackage endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*mediapackage_sdkv2.Options) { + return func(o *mediapackage_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/mediapackage/service_endpoints_gen_test.go b/internal/service/mediapackage/service_endpoints_gen_test.go index 36055f4e405..e0034ff4753 100644 --- a/internal/service/mediapackage/service_endpoints_gen_test.go +++ b/internal/service/mediapackage/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := mediapackage_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediapackage_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := mediapackage_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediapackage_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/mediapackage/service_package_gen.go b/internal/service/mediapackage/service_package_gen.go index 2fe02aba5d6..2ad78fcf337 100644 --- a/internal/service/mediapackage/service_package_gen.go +++ b/internal/service/mediapackage/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package mediapackage @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" mediapackage_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediapackage" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*mediapackage_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return mediapackage_sdkv2.NewFromConfig(cfg, func(o *mediapackage_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return mediapackage_sdkv2.NewFromConfig(cfg, + mediapackage_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/mediapackagev2/service_endpoint_resolver_gen.go b/internal/service/mediapackagev2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..1173787d817 --- /dev/null +++ b/internal/service/mediapackagev2/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package mediapackagev2 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + mediapackagev2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediapackagev2" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ mediapackagev2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver mediapackagev2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: mediapackagev2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params mediapackagev2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up mediapackagev2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*mediapackagev2_sdkv2.Options) { + return func(o *mediapackagev2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/mediapackagev2/service_endpoints_gen_test.go b/internal/service/mediapackagev2/service_endpoints_gen_test.go index f8290ca9eb0..04e98251e10 100644 --- a/internal/service/mediapackagev2/service_endpoints_gen_test.go +++ b/internal/service/mediapackagev2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := mediapackagev2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediapackagev2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := mediapackagev2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediapackagev2_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/mediapackagev2/service_package_gen.go b/internal/service/mediapackagev2/service_package_gen.go index c280eb568bc..1a5bf9e1f7a 100644 --- a/internal/service/mediapackagev2/service_package_gen.go +++ b/internal/service/mediapackagev2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package mediapackagev2 @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" mediapackagev2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediapackagev2" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*mediapackagev2_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return mediapackagev2_sdkv2.NewFromConfig(cfg, func(o *mediapackagev2_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return mediapackagev2_sdkv2.NewFromConfig(cfg, + mediapackagev2_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/mediastore/service_endpoint_resolver_gen.go b/internal/service/mediastore/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..a3f770246ac --- /dev/null +++ b/internal/service/mediastore/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package mediastore + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + mediastore_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediastore" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ mediastore_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver mediastore_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: mediastore_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params mediastore_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up mediastore endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*mediastore_sdkv2.Options) { + return func(o *mediastore_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/mediastore/service_endpoints_gen_test.go b/internal/service/mediastore/service_endpoints_gen_test.go index 3d78e79d276..a967b6552f8 100644 --- a/internal/service/mediastore/service_endpoints_gen_test.go +++ b/internal/service/mediastore/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := mediastore_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediastore_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := mediastore_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mediastore_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/mediastore/service_package_gen.go b/internal/service/mediastore/service_package_gen.go index b011ea718d5..8b0c82c7788 100644 --- a/internal/service/mediastore/service_package_gen.go +++ b/internal/service/mediastore/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package mediastore @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" mediastore_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediastore" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -52,19 +51,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*mediastore_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return mediastore_sdkv2.NewFromConfig(cfg, func(o *mediastore_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return mediastore_sdkv2.NewFromConfig(cfg, + mediastore_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/memorydb/service_endpoint_resolver_gen.go b/internal/service/memorydb/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..185906c0b20 --- /dev/null +++ b/internal/service/memorydb/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package memorydb + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/memorydb/service_endpoints_gen_test.go b/internal/service/memorydb/service_endpoints_gen_test.go index 0f1ebc09422..1e19c62146f 100644 --- a/internal/service/memorydb/service_endpoints_gen_test.go +++ b/internal/service/memorydb/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(memorydb_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(memorydb_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/memorydb/service_package_gen.go b/internal/service/memorydb/service_package_gen.go index b6ef402c04f..9b026311bb6 100644 --- a/internal/service/memorydb/service_package_gen.go +++ b/internal/service/memorydb/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package memorydb @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" memorydb_sdkv1 "github.com/aws/aws-sdk-go/service/memorydb" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -122,11 +121,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*m "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return memorydb_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/meta/service_package_gen.go b/internal/service/meta/service_package_gen.go index 8ae99fcad16..2abe76c7625 100644 --- a/internal/service/meta/service_package_gen.go +++ b/internal/service/meta/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package meta diff --git a/internal/service/mq/service_endpoint_resolver_gen.go b/internal/service/mq/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..b34425906f7 --- /dev/null +++ b/internal/service/mq/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package mq + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + mq_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mq" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ mq_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver mq_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: mq_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params mq_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up mq endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*mq_sdkv2.Options) { + return func(o *mq_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/mq/service_endpoints_gen_test.go b/internal/service/mq/service_endpoints_gen_test.go index 641786b65cd..4321844e88e 100644 --- a/internal/service/mq/service_endpoints_gen_test.go +++ b/internal/service/mq/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := mq_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mq_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := mq_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), mq_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/mq/service_package_gen.go b/internal/service/mq/service_package_gen.go index d3338933cb3..e2f4dafde65 100644 --- a/internal/service/mq/service_package_gen.go +++ b/internal/service/mq/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package mq @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" mq_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mq" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -72,19 +71,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*mq_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return mq_sdkv2.NewFromConfig(cfg, func(o *mq_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return mq_sdkv2.NewFromConfig(cfg, + mq_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/mwaa/service_endpoint_resolver_gen.go b/internal/service/mwaa/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f2d52704cc6 --- /dev/null +++ b/internal/service/mwaa/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package mwaa + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + mwaa_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mwaa" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ mwaa_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver mwaa_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: mwaa_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params mwaa_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up mwaa endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*mwaa_sdkv2.Options) { + return func(o *mwaa_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/mwaa/service_package_gen.go b/internal/service/mwaa/service_package_gen.go index 557a1052ce3..544c283240a 100644 --- a/internal/service/mwaa/service_package_gen.go +++ b/internal/service/mwaa/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package mwaa @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" mwaa_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mwaa" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*mwaa_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return mwaa_sdkv2.NewFromConfig(cfg, func(o *mwaa_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return mwaa_sdkv2.NewFromConfig(cfg, + mwaa_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/neptune/service_endpoint_resolver_gen.go b/internal/service/neptune/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..e6396b11bad --- /dev/null +++ b/internal/service/neptune/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package neptune + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/neptune/service_endpoints_gen_test.go b/internal/service/neptune/service_endpoints_gen_test.go index 23149d67360..34f31264714 100644 --- a/internal/service/neptune/service_endpoints_gen_test.go +++ b/internal/service/neptune/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(neptune_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(neptune_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/neptune/service_package_gen.go b/internal/service/neptune/service_package_gen.go index ae90ff3983e..fdcf2b44ea5 100644 --- a/internal/service/neptune/service_package_gen.go +++ b/internal/service/neptune/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package neptune @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" neptune_sdkv1 "github.com/aws/aws-sdk-go/service/neptune" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -122,11 +121,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*n "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return neptune_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/neptunegraph/service_endpoint_resolver_gen.go b/internal/service/neptunegraph/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..20805e472f0 --- /dev/null +++ b/internal/service/neptunegraph/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package neptunegraph + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + neptunegraph_sdkv2 "github.com/aws/aws-sdk-go-v2/service/neptunegraph" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ neptunegraph_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver neptunegraph_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: neptunegraph_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params neptunegraph_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up neptunegraph endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*neptunegraph_sdkv2.Options) { + return func(o *neptunegraph_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/neptunegraph/service_package_gen.go b/internal/service/neptunegraph/service_package_gen.go index bd5fe2c6b90..699cca71095 100644 --- a/internal/service/neptunegraph/service_package_gen.go +++ b/internal/service/neptunegraph/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package neptunegraph @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" neptunegraph_sdkv2 "github.com/aws/aws-sdk-go-v2/service/neptunegraph" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*neptunegraph_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return neptunegraph_sdkv2.NewFromConfig(cfg, func(o *neptunegraph_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return neptunegraph_sdkv2.NewFromConfig(cfg, + neptunegraph_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/networkfirewall/exports_test.go b/internal/service/networkfirewall/exports_test.go new file mode 100644 index 00000000000..ec40d5ef6b7 --- /dev/null +++ b/internal/service/networkfirewall/exports_test.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkfirewall + +// Exports for use in tests only. +var ( + ResourceFirewall = resourceFirewall + ResourceFirewallPolicy = resourceFirewallPolicy + ResourceLoggingConfiguration = resourceLoggingConfiguration + ResourceResourcePolicy = resourceResourcePolicy + ResourceRuleGroup = resourceRuleGroup + ResourceTLSInspectionConfiguration = newTLSInspectionConfigurationResource + + FindFirewallByARN = findFirewallByARN + FindFirewallPolicyByARN = findFirewallPolicyByARN + FindLoggingConfigurationByARN = findLoggingConfigurationByARN + FindResourcePolicyByARN = findResourcePolicyByARN + FindRuleGroupByARN = findRuleGroupByARN + FindTLSInspectionConfigurationByARN = findTLSInspectionConfigurationByARN +) diff --git a/internal/service/networkfirewall/find.go b/internal/service/networkfirewall/find.go deleted file mode 100644 index 16815140437..00000000000 --- a/internal/service/networkfirewall/find.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package networkfirewall - -import ( - "context" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" -) - -// FindLoggingConfiguration returns the LoggingConfigurationOutput from a call to DescribeLoggingConfigurationWithContext -// given the context and FindFirewall ARN. -// Returns nil if the FindLoggingConfiguration is not found. -func FindLoggingConfiguration(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) (*networkfirewall.DescribeLoggingConfigurationOutput, error) { - input := &networkfirewall.DescribeLoggingConfigurationInput{ - FirewallArn: aws.String(arn), - } - output, err := conn.DescribeLoggingConfigurationWithContext(ctx, input) - if err != nil { - return nil, err - } - return output, nil -} - -// FindFirewallPolicyByNameAndARN returns the FirewallPolicyOutput from a call to DescribeFirewallPolicyWithContext -// given the context and at least one of FirewallPolicyArn and FirewallPolicyName. -func FindFirewallPolicyByNameAndARN(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string, name string) (*networkfirewall.DescribeFirewallPolicyOutput, error) { - input := &networkfirewall.DescribeFirewallPolicyInput{} - if arn != "" { - input.FirewallPolicyArn = aws.String(arn) - } - if name != "" { - input.FirewallPolicyName = aws.String(name) - } - - output, err := conn.DescribeFirewallPolicyWithContext(ctx, input) - if err != nil { - return nil, err - } - return output, nil -} - -// FindResourcePolicy returns the Policy string from a call to DescribeResourcePolicyWithContext -// given the context and resource ARN. -// Returns nil if the FindResourcePolicy is not found. -func FindResourcePolicy(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) (*string, error) { - input := &networkfirewall.DescribeResourcePolicyInput{ - ResourceArn: aws.String(arn), - } - output, err := conn.DescribeResourcePolicyWithContext(ctx, input) - if err != nil { - return nil, err - } - if output == nil { - return nil, nil - } - return output.Policy, nil -} diff --git a/internal/service/networkfirewall/firewall.go b/internal/service/networkfirewall/firewall.go index 95dceedc33e..651e8e51001 100644 --- a/internal/service/networkfirewall/firewall.go +++ b/internal/service/networkfirewall/firewall.go @@ -4,23 +4,22 @@ package networkfirewall import ( - "bytes" "context" - "fmt" "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -29,7 +28,7 @@ import ( // @SDKResource("aws_networkfirewall_firewall", name="Firewall") // @Tags(identifierAttribute="id") -func ResourceFirewall() *schema.Resource { +func resourceFirewall() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceFirewallCreate, ReadWithoutTimeout: resourceFirewallRead, @@ -53,112 +52,113 @@ func ResourceFirewall() *schema.Resource { verify.SetTagsDiff, ), - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - "delete_protection": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - names.AttrDescription: { - Type: schema.TypeString, - Optional: true, - }, - names.AttrEncryptionConfiguration: encryptionConfigurationSchema(), - "firewall_policy_arn": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, - }, - "firewall_policy_change_protection": { - Type: schema.TypeBool, - Optional: true, - }, - "firewall_status": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "sync_states": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrAvailabilityZone: { - Type: schema.TypeString, - Computed: true, - }, - "attachment": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "endpoint_id": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrSubnetID: { - Type: schema.TypeString, - Computed: true, + SchemaFunc: func() map[string]*schema.Schema { + return map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "delete_protection": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + names.AttrDescription: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrEncryptionConfiguration: encryptionConfigurationSchema(), + "firewall_policy_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, + "firewall_policy_change_protection": { + Type: schema.TypeBool, + Optional: true, + }, + "firewall_status": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sync_states": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "attachment": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "endpoint_id": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrSubnetID: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, + names.AttrAvailabilityZone: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, }, }, }, - }, - names.AttrName: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "subnet_change_protection": { - Type: schema.TypeBool, - Optional: true, - }, - "subnet_mapping": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrIPAddressType: { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice(networkfirewall.IPAddressType_Values(), false), - }, - names.AttrSubnetID: { - Type: schema.TypeString, - Required: true, + names.AttrName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_change_protection": { + Type: schema.TypeBool, + Optional: true, + }, + "subnet_mapping": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrIPAddressType: { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateDiagFunc: enum.Validate[awstypes.IPAddressType](), + }, + names.AttrSubnetID: { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), - "update_token": { - Type: schema.TypeString, - Computed: true, - }, - names.AttrVPCID: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "update_token": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrVPCID: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + } }, } } func resourceFirewallCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) name := d.Get(names.AttrName).(string) input := &networkfirewall.CreateFirewallInput{ @@ -170,7 +170,7 @@ func resourceFirewallCreate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("delete_protection"); ok { - input.DeleteProtection = aws.Bool(v.(bool)) + input.DeleteProtection = v.(bool) } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -182,20 +182,20 @@ func resourceFirewallCreate(ctx context.Context, d *schema.ResourceData, meta in } if v, ok := d.GetOk("firewall_policy_change_protection"); ok { - input.FirewallPolicyChangeProtection = aws.Bool(v.(bool)) + input.FirewallPolicyChangeProtection = v.(bool) } if v, ok := d.GetOk("subnet_change_protection"); ok { - input.SubnetChangeProtection = aws.Bool(v.(bool)) + input.SubnetChangeProtection = v.(bool) } - output, err := conn.CreateFirewallWithContext(ctx, input) + output, err := conn.CreateFirewall(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating NetworkFirewall Firewall (%s): %s", name, err) } - d.SetId(aws.StringValue(output.Firewall.FirewallArn)) + d.SetId(aws.ToString(output.Firewall.FirewallArn)) if _, err := waitFirewallCreated(ctx, conn, d.Timeout(schema.TimeoutCreate), d.Id()); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) create: %s", d.Id(), err) @@ -206,10 +206,9 @@ func resourceFirewallCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceFirewallRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - - output, err := FindFirewallByARN(ctx, conn, d.Id()) + output, err := findFirewallByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] NetworkFirewall Firewall (%s) not found, removing from state", d.Id()) @@ -248,24 +247,24 @@ func resourceFirewallRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) updateToken := d.Get("update_token").(string) if d.HasChange("delete_protection") { input := &networkfirewall.UpdateFirewallDeleteProtectionInput{ - DeleteProtection: aws.Bool(d.Get("delete_protection").(bool)), + DeleteProtection: d.Get("delete_protection").(bool), FirewallArn: aws.String(d.Id()), UpdateToken: aws.String(updateToken), } - output, err := conn.UpdateFirewallDeleteProtectionWithContext(ctx, input) + output, err := conn.UpdateFirewallDeleteProtection(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall (%s) delete protection: %s", d.Id(), err) } - updateToken = aws.StringValue(output.UpdateToken) + updateToken = aws.ToString(output.UpdateToken) } if d.HasChange(names.AttrDescription) { @@ -275,13 +274,13 @@ func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta in UpdateToken: aws.String(updateToken), } - output, err := conn.UpdateFirewallDescriptionWithContext(ctx, input) + output, err := conn.UpdateFirewallDescription(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall (%s) description: %s", d.Id(), err) } - updateToken = aws.StringValue(output.UpdateToken) + updateToken = aws.ToString(output.UpdateToken) } if d.HasChange(names.AttrEncryptionConfiguration) { @@ -291,32 +290,32 @@ func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta in UpdateToken: aws.String(updateToken), } - output, err := conn.UpdateFirewallEncryptionConfigurationWithContext(ctx, input) + output, err := conn.UpdateFirewallEncryptionConfiguration(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall (%s) encryption configuration: %s", d.Id(), err) } - updateToken = aws.StringValue(output.UpdateToken) + updateToken = aws.ToString(output.UpdateToken) } // Note: The *_change_protection fields below are handled before their respective fields - // to account for disabling and subsequent changes + // to account for disabling and subsequent changes. if d.HasChange("firewall_policy_change_protection") { input := &networkfirewall.UpdateFirewallPolicyChangeProtectionInput{ FirewallArn: aws.String(d.Id()), - FirewallPolicyChangeProtection: aws.Bool(d.Get("firewall_policy_change_protection").(bool)), + FirewallPolicyChangeProtection: d.Get("firewall_policy_change_protection").(bool), UpdateToken: aws.String(updateToken), } - output, err := conn.UpdateFirewallPolicyChangeProtectionWithContext(ctx, input) + output, err := conn.UpdateFirewallPolicyChangeProtection(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall (%s) firewall policy change protection: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall (%s) policy change protection: %s", d.Id(), err) } - updateToken = aws.StringValue(output.UpdateToken) + updateToken = aws.ToString(output.UpdateToken) } if d.HasChange("firewall_policy_arn") { @@ -326,29 +325,29 @@ func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta in UpdateToken: aws.String(updateToken), } - output, err := conn.AssociateFirewallPolicyWithContext(ctx, input) + output, err := conn.AssociateFirewallPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall (%s) firewall policy ARN: %s", d.Id(), err) } - updateToken = aws.StringValue(output.UpdateToken) + updateToken = aws.ToString(output.UpdateToken) } if d.HasChange("subnet_change_protection") { input := &networkfirewall.UpdateSubnetChangeProtectionInput{ FirewallArn: aws.String(d.Id()), - SubnetChangeProtection: aws.Bool(d.Get("subnet_change_protection").(bool)), + SubnetChangeProtection: d.Get("subnet_change_protection").(bool), UpdateToken: aws.String(updateToken), } - output, err := conn.UpdateSubnetChangeProtectionWithContext(ctx, input) + output, err := conn.UpdateSubnetChangeProtection(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall (%s) subnet change protection: %s", d.Id(), err) } - updateToken = aws.StringValue(output.UpdateToken) + updateToken = aws.ToString(output.UpdateToken) } if d.HasChange("subnet_mapping") { @@ -362,35 +361,39 @@ func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta in UpdateToken: aws.String(updateToken), } - _, err := conn.AssociateSubnetsWithContext(ctx, input) + _, err := conn.AssociateSubnets(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "associating NetworkFirewall Firewall (%s) subnets: %s", d.Id(), err) } - updateToken, err = waitFirewallUpdated(ctx, conn, d.Timeout(schema.TimeoutUpdate), d.Id()) + output, err := waitFirewallUpdated(ctx, conn, d.Timeout(schema.TimeoutUpdate), d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) update: %s", d.Id(), err) } + + updateToken = aws.ToString(output.UpdateToken) } if len(subnetsToRemove) > 0 { input := &networkfirewall.DisassociateSubnetsInput{ FirewallArn: aws.String(d.Id()), - SubnetIds: aws.StringSlice(subnetsToRemove), + SubnetIds: subnetsToRemove, UpdateToken: aws.String(updateToken), } - _, err := conn.DisassociateSubnetsWithContext(ctx, input) + _, err := conn.DisassociateSubnets(ctx, input) if err == nil { - /*updateToken*/ _, err = waitFirewallUpdated(ctx, conn, d.Timeout(schema.TimeoutUpdate), d.Id()) + /*output*/ _, err := waitFirewallUpdated(ctx, conn, d.Timeout(schema.TimeoutUpdate), d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) update: %s", d.Id(), err) } - } else if !tfawserr.ErrMessageContains(err, networkfirewall.ErrCodeInvalidRequestException, "inaccessible") { + + // updateToken = aws.ToString(output.UpdateToken) + } else if !errs.IsAErrorMessageContains[*awstypes.InvalidRequestException](err, "inaccessible") { return sdkdiag.AppendErrorf(diags, "disassociating NetworkFirewall Firewall (%s) subnets: %s", d.Id(), err) } } @@ -401,15 +404,14 @@ func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceFirewallDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) log.Printf("[DEBUG] Deleting NetworkFirewall Firewall: %s", d.Id()) - _, err := conn.DeleteFirewallWithContext(ctx, &networkfirewall.DeleteFirewallInput{ + _, err := conn.DeleteFirewall(ctx, &networkfirewall.DeleteFirewallInput{ FirewallArn: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -424,14 +426,10 @@ func resourceFirewallDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } -func FindFirewallByARN(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) (*networkfirewall.DescribeFirewallOutput, error) { - input := &networkfirewall.DescribeFirewallInput{ - FirewallArn: aws.String(arn), - } +func findFirewall(ctx context.Context, conn *networkfirewall.Client, input *networkfirewall.DescribeFirewallInput) (*networkfirewall.DescribeFirewallOutput, error) { + output, err := conn.DescribeFirewall(ctx, input) - output, err := conn.DescribeFirewallWithContext(ctx, input) - - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -449,9 +447,17 @@ func FindFirewallByARN(ctx context.Context, conn *networkfirewall.NetworkFirewal return output, nil } -func statusFirewall(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) retry.StateRefreshFunc { +func findFirewallByARN(ctx context.Context, conn *networkfirewall.Client, arn string) (*networkfirewall.DescribeFirewallOutput, error) { + input := &networkfirewall.DescribeFirewallInput{ + FirewallArn: aws.String(arn), + } + + return findFirewall(ctx, conn, input) +} + +func statusFirewall(ctx context.Context, conn *networkfirewall.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindFirewallByARN(ctx, conn, arn) + output, err := findFirewallByARN(ctx, conn, arn) if tfresource.NotFound(err) { return nil, "", nil @@ -461,14 +467,14 @@ func statusFirewall(ctx context.Context, conn *networkfirewall.NetworkFirewall, return nil, "", err } - return output, aws.StringValue(output.FirewallStatus.Status), nil + return output, string(output.FirewallStatus.Status), nil } } -func waitFirewallCreated(ctx context.Context, conn *networkfirewall.NetworkFirewall, timeout time.Duration, arn string) (*networkfirewall.Firewall, error) { +func waitFirewallCreated(ctx context.Context, conn *networkfirewall.Client, timeout time.Duration, arn string) (*networkfirewall.DescribeFirewallOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{networkfirewall.FirewallStatusValueProvisioning}, - Target: []string{networkfirewall.FirewallStatusValueReady}, + Pending: enum.Slice(awstypes.FirewallStatusValueProvisioning), + Target: enum.Slice(awstypes.FirewallStatusValueReady), Refresh: statusFirewall(ctx, conn, arn), Timeout: timeout, } @@ -476,36 +482,36 @@ func waitFirewallCreated(ctx context.Context, conn *networkfirewall.NetworkFirew outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*networkfirewall.DescribeFirewallOutput); ok { - return output.Firewall, err + return output, err } return nil, err } -func waitFirewallUpdated(ctx context.Context, conn *networkfirewall.NetworkFirewall, timeout time.Duration, arn string) (string, error) { +func waitFirewallUpdated(ctx context.Context, conn *networkfirewall.Client, timeout time.Duration, arn string) (*networkfirewall.DescribeFirewallOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{networkfirewall.FirewallStatusValueProvisioning}, - Target: []string{networkfirewall.FirewallStatusValueReady}, + Pending: enum.Slice(awstypes.FirewallStatusValueProvisioning), + Target: enum.Slice(awstypes.FirewallStatusValueReady), Refresh: statusFirewall(ctx, conn, arn), Timeout: timeout, // Delay added to account for Associate/DisassociateSubnet calls that return // a READY status immediately after the method is called instead of immediately - // returning PROVISIONING + // returning PROVISIONING. Delay: 30 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*networkfirewall.DescribeFirewallOutput); ok { - return aws.StringValue(output.UpdateToken), err + return output, err } - return "", err + return nil, err } -func waitFirewallDeleted(ctx context.Context, conn *networkfirewall.NetworkFirewall, timeout time.Duration, arn string) (*networkfirewall.Firewall, error) { +func waitFirewallDeleted(ctx context.Context, conn *networkfirewall.Client, timeout time.Duration, arn string) (*networkfirewall.DescribeFirewallOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{networkfirewall.FirewallStatusValueDeleting}, + Pending: enum.Slice(awstypes.FirewallStatusValueDeleting), Target: []string{}, Refresh: statusFirewall(ctx, conn, arn), Timeout: timeout, @@ -514,38 +520,44 @@ func waitFirewallDeleted(ctx context.Context, conn *networkfirewall.NetworkFirew outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*networkfirewall.DescribeFirewallOutput); ok { - return output.Firewall, err + return output, err } return nil, err } -func expandSubnetMappings(l []interface{}) []*networkfirewall.SubnetMapping { - mappings := make([]*networkfirewall.SubnetMapping, 0, len(l)) - for _, tfMapRaw := range l { +func expandSubnetMappings(tfList []interface{}) []awstypes.SubnetMapping { + apiObjects := make([]awstypes.SubnetMapping, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - mapping := &networkfirewall.SubnetMapping{ + + apiObject := awstypes.SubnetMapping{ SubnetId: aws.String(tfMap[names.AttrSubnetID].(string)), } + if v, ok := tfMap[names.AttrIPAddressType].(string); ok && v != "" { - mapping.IPAddressType = aws.String(v) + apiObject.IPAddressType = awstypes.IPAddressType(v) } - mappings = append(mappings, mapping) + + apiObjects = append(apiObjects, apiObject) } - return mappings + return apiObjects } -func expandSubnetMappingIDs(l []interface{}) []string { +func expandSubnetMappingIDs(tfList []interface{}) []string { var ids []string - for _, tfMapRaw := range l { + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } + if id, ok := tfMap[names.AttrSubnetID].(string); ok && id != "" { ids = append(ids, id) } @@ -554,79 +566,66 @@ func expandSubnetMappingIDs(l []interface{}) []string { return ids } -func flattenFirewallStatus(status *networkfirewall.FirewallStatus) []interface{} { - if status == nil { +func flattenFirewallStatus(apiObject *awstypes.FirewallStatus) []interface{} { + if apiObject == nil { return nil } - m := map[string]interface{}{ - "sync_states": flattenSyncStates(status.SyncStates), + tfMap := map[string]interface{}{ + "sync_states": flattenSyncStates(apiObject.SyncStates), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenSyncStates(s map[string]*networkfirewall.SyncState) []interface{} { - if s == nil { +func flattenSyncStates(apiObject map[string]awstypes.SyncState) []interface{} { + if apiObject == nil { return nil } - syncStates := make([]interface{}, 0, len(s)) - for k, v := range s { - m := map[string]interface{}{ + tfList := make([]interface{}, 0, len(apiObject)) + + for k, v := range apiObject { + tfMap := map[string]interface{}{ + "attachment": flattenAttachment(v.Attachment), names.AttrAvailabilityZone: k, - "attachment": flattenSyncStateAttachment(v.Attachment), } - syncStates = append(syncStates, m) + + tfList = append(tfList, tfMap) } - return syncStates + return tfList } -func flattenSyncStateAttachment(a *networkfirewall.Attachment) []interface{} { - if a == nil { +func flattenAttachment(apiObject *awstypes.Attachment) []interface{} { + if apiObject == nil { return nil } - m := map[string]interface{}{ - "endpoint_id": aws.StringValue(a.EndpointId), - names.AttrSubnetID: aws.StringValue(a.SubnetId), + tfMap := map[string]interface{}{ + "endpoint_id": aws.ToString(apiObject.EndpointId), + names.AttrSubnetID: aws.ToString(apiObject.SubnetId), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenSubnetMappings(sm []*networkfirewall.SubnetMapping) []interface{} { - mappings := make([]interface{}, 0, len(sm)) - for _, s := range sm { - m := map[string]interface{}{ - names.AttrSubnetID: aws.StringValue(s.SubnetId), - names.AttrIPAddressType: aws.StringValue(s.IPAddressType), - } - mappings = append(mappings, m) - } +func flattenSubnetMappings(apiObjects []awstypes.SubnetMapping) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) - return mappings -} - -func subnetMappingsHash(v interface{}) int { - var buf bytes.Buffer + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + names.AttrIPAddressType: apiObject.IPAddressType, + names.AttrSubnetID: aws.ToString(apiObject.SubnetId), + } - tfMap, ok := v.(map[string]interface{}) - if !ok { - return 0 - } - if id, ok := tfMap[names.AttrSubnetID].(string); ok { - buf.WriteString(fmt.Sprintf("%s-", id)) - } - if id, ok := tfMap[names.AttrIPAddressType].(string); ok { - buf.WriteString(fmt.Sprintf("%s-", id)) + tfList = append(tfList, tfMap) } - return create.StringHashcode(buf.String()) + return tfList } -func subnetMappingsDiff(old, new *schema.Set) ([]string, []*networkfirewall.SubnetMapping) { +func subnetMappingsDiff(old, new *schema.Set) ([]string, []awstypes.SubnetMapping) { if old.Len() == 0 { return nil, expandSubnetMappings(new.List()) } @@ -634,6 +633,7 @@ func subnetMappingsDiff(old, new *schema.Set) ([]string, []*networkfirewall.Subn return expandSubnetMappingIDs(old.List()), nil } + subnetMappingsHash := sdkv2.SimpleSchemaSetFunc(names.AttrIPAddressType, names.AttrSubnetID) oldHashedSet := schema.NewSet(subnetMappingsHash, old.List()) newHashedSet := schema.NewSet(subnetMappingsHash, new.List()) diff --git a/internal/service/networkfirewall/firewall_data_source.go b/internal/service/networkfirewall/firewall_data_source.go index 7c8c4de6f6c..580f53449ba 100644 --- a/internal/service/networkfirewall/firewall_data_source.go +++ b/internal/service/networkfirewall/firewall_data_source.go @@ -5,12 +5,11 @@ package networkfirewall import ( "context" - "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -21,10 +20,12 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_networkfirewall_firewall") -func DataSourceFirewall() *schema.Resource { +// @SDKDataSource("aws_networkfirewall_firewall", name="Firewall") +// @Tags +func dataSourceFirewall() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceFirewallResourceRead, + Schema: map[string]*schema.Schema{ names.AttrARN: { Type: schema.TypeString, @@ -119,10 +120,6 @@ func DataSourceFirewall() *schema.Resource { Computed: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - names.AttrAvailabilityZone: { - Type: schema.TypeString, - Computed: true, - }, "attachment": { Type: schema.TypeList, Computed: true, @@ -143,6 +140,10 @@ func DataSourceFirewall() *schema.Resource { }, }, }, + names.AttrAvailabilityZone: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, @@ -172,7 +173,7 @@ func DataSourceFirewall() *schema.Resource { }, }, }, - names.AttrTags: tftags.TagsSchema(), + names.AttrTags: tftags.TagsSchemaComputed(), "update_token": { Type: schema.TypeString, Computed: true, @@ -187,181 +188,166 @@ func DataSourceFirewall() *schema.Resource { func dataSourceFirewallResourceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) input := &networkfirewall.DescribeFirewallInput{} - if v, ok := d.GetOk(names.AttrARN); ok { input.FirewallArn = aws.String(v.(string)) } - if v, ok := d.GetOk(names.AttrName); ok { input.FirewallName = aws.String(v.(string)) } - if input.FirewallArn == nil && input.FirewallName == nil { - return sdkdiag.AppendErrorf(diags, "must specify either arn, name, or both") - } - - output, err := conn.DescribeFirewallWithContext(ctx, input) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { - log.Printf("[WARN] NetworkFirewall Firewall (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } + output, err := findFirewall(ctx, conn, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Firewall (%s): %s", d.Id(), err) - } - - if output == nil || output.Firewall == nil { - return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Firewall (%s): empty output", d.Id()) + return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Firewall: %s", err) } firewall := output.Firewall - + d.SetId(aws.ToString(firewall.FirewallArn)) d.Set(names.AttrARN, firewall.FirewallArn) d.Set("delete_protection", firewall.DeleteProtection) d.Set(names.AttrDescription, firewall.Description) - d.Set(names.AttrName, firewall.FirewallName) - d.Set(names.AttrEncryptionConfiguration, flattenDataSourceEncryptionConfiguration(firewall.EncryptionConfiguration)) + if err := d.Set(names.AttrEncryptionConfiguration, flattenDataSourceEncryptionConfiguration(firewall.EncryptionConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting encryption_configuration: %s", err) + } d.Set("firewall_policy_arn", firewall.FirewallPolicyArn) d.Set("firewall_policy_change_protection", firewall.FirewallPolicyChangeProtection) - d.Set("firewall_status", flattenDataSourceFirewallStatus(output.FirewallStatus)) + if err := d.Set("firewall_status", flattenDataSourceFirewallStatus(output.FirewallStatus)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting firewall_status: %s", err) + } + d.Set(names.AttrName, firewall.FirewallName) d.Set("subnet_change_protection", firewall.SubnetChangeProtection) - d.Set("update_token", output.UpdateToken) - d.Set(names.AttrVPCID, firewall.VpcId) - if err := d.Set("subnet_mapping", flattenDataSourceSubnetMappings(firewall.SubnetMappings)); err != nil { return sdkdiag.AppendErrorf(diags, "setting subnet_mappings: %s", err) } + d.Set("update_token", output.UpdateToken) + d.Set(names.AttrVPCID, firewall.VpcId) - if err := d.Set(names.AttrTags, KeyValueTags(ctx, firewall.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } - - d.SetId(aws.StringValue(firewall.FirewallArn)) + setTagsOut(ctx, firewall.Tags) return diags } -func flattenDataSourceFirewallStatus(status *networkfirewall.FirewallStatus) []interface{} { - if status == nil { +func flattenDataSourceFirewallStatus(apiObject *awstypes.FirewallStatus) []interface{} { + if apiObject == nil { return nil } - m := map[string]interface{}{} - if status.CapacityUsageSummary != nil { - m["capacity_usage_summary"] = flattenDataSourceCapacityUsageSummary(status.CapacityUsageSummary) - } - if status.ConfigurationSyncStateSummary != nil { - m["configuration_sync_state_summary"] = aws.StringValue(status.ConfigurationSyncStateSummary) + + tfMap := map[string]interface{}{ + "configuration_sync_state_summary": apiObject.ConfigurationSyncStateSummary, + names.AttrStatus: apiObject.Status, } - if status.Status != nil { - m[names.AttrStatus] = aws.StringValue(status.Status) + + if apiObject.CapacityUsageSummary != nil { + tfMap["capacity_usage_summary"] = flattenDataSourceCapacityUsageSummary(apiObject.CapacityUsageSummary) } - if status.SyncStates != nil { - m["sync_states"] = flattenDataSourceSyncStates(status.SyncStates) + if apiObject.SyncStates != nil { + tfMap["sync_states"] = flattenDataSourceSyncStates(apiObject.SyncStates) } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenDataSourceCapacityUsageSummary(state *networkfirewall.CapacityUsageSummary) []interface{} { - if state == nil { +func flattenDataSourceCapacityUsageSummary(apiObject *awstypes.CapacityUsageSummary) []interface{} { + if apiObject == nil { return nil } - m := map[string]interface{}{ - "cidrs": flattenDataSourceCIDRSummary(state.CIDRs), + tfMap := map[string]interface{}{ + "cidrs": flattenDataSourceCIDRSummary(apiObject.CIDRs), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenDataSourceCIDRSummary(state *networkfirewall.CIDRSummary) []interface{} { - if state == nil { +func flattenDataSourceCIDRSummary(apiObject *awstypes.CIDRSummary) []interface{} { + if apiObject == nil { return nil } - m := map[string]interface{}{ - "available_cidr_count": int(aws.Int64Value(state.AvailableCIDRCount)), - "ip_set_references": flattenDataSourceIPSetReferences(state.IPSetReferences), - "utilized_cidr_count": int(aws.Int64Value(state.UtilizedCIDRCount)), + tfMap := map[string]interface{}{ + "available_cidr_count": aws.ToInt32(apiObject.AvailableCIDRCount), + "ip_set_references": flattenDataSourceIPSetReferences(apiObject.IPSetReferences), + "utilized_cidr_count": aws.ToInt32(apiObject.UtilizedCIDRCount), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenDataSourceIPSetReferences(state map[string]*networkfirewall.IPSetMetadata) []interface{} { - if state == nil { +func flattenDataSourceIPSetReferences(apiObject map[string]awstypes.IPSetMetadata) []interface{} { + if apiObject == nil { return nil } - ipSetReferences := make([]interface{}, 0, len(state)) - for _, v := range state { - m := map[string]interface{}{ - "resolved_cidr_count": int(aws.Int64Value(v.ResolvedCIDRCount)), - } - ipSetReferences = append(ipSetReferences, m) + tfList := make([]interface{}, 0, len(apiObject)) + + for _, v := range apiObject { + tfList = append(tfList, map[string]interface{}{ + "resolved_cidr_count": aws.ToInt32(v.ResolvedCIDRCount), + }) } - return ipSetReferences + return tfList } -func flattenDataSourceSyncStates(state map[string]*networkfirewall.SyncState) []interface{} { - if state == nil { +func flattenDataSourceSyncStates(apiObject map[string]awstypes.SyncState) []interface{} { + if apiObject == nil { return nil } - syncStates := make([]interface{}, 0, len(state)) - for k, v := range state { - m := map[string]interface{}{ + tfList := make([]interface{}, 0, len(apiObject)) + + for k, v := range apiObject { + tfMap := map[string]interface{}{ + "attachment": flattenDataSourceAttachment(v.Attachment), names.AttrAvailabilityZone: k, - "attachment": flattenDataSourceSyncStateAttachment(v.Attachment), } - syncStates = append(syncStates, m) + + tfList = append(tfList, tfMap) } - return syncStates + return tfList } -func flattenDataSourceSyncStateAttachment(attach *networkfirewall.Attachment) []interface{} { - if attach == nil { +func flattenDataSourceAttachment(apiObject *awstypes.Attachment) []interface{} { + if apiObject == nil { return nil } - m := map[string]interface{}{ - "endpoint_id": aws.StringValue(attach.EndpointId), - names.AttrStatus: aws.StringValue(attach.Status), - names.AttrSubnetID: aws.StringValue(attach.SubnetId), + tfMap := map[string]interface{}{ + "endpoint_id": aws.ToString(apiObject.EndpointId), + names.AttrStatus: apiObject.Status, + names.AttrSubnetID: aws.ToString(apiObject.SubnetId), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenDataSourceSubnetMappings(subnet []*networkfirewall.SubnetMapping) []interface{} { - mappings := make([]interface{}, 0, len(subnet)) - for _, s := range subnet { - m := map[string]interface{}{ - names.AttrSubnetID: aws.StringValue(s.SubnetId), +func flattenDataSourceSubnetMappings(apiObjects []awstypes.SubnetMapping) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, s := range apiObjects { + tfMap := map[string]interface{}{ + names.AttrSubnetID: aws.ToString(s.SubnetId), } - mappings = append(mappings, m) + + tfList = append(tfList, tfMap) } - return mappings + return tfList } -func flattenDataSourceEncryptionConfiguration(encrypt *networkfirewall.EncryptionConfiguration) []interface{} { - if encrypt == nil { +func flattenDataSourceEncryptionConfiguration(apiObject *awstypes.EncryptionConfiguration) []interface{} { + if apiObject == nil { return nil } - m := map[string]interface{}{ - names.AttrKeyID: aws.StringValue(encrypt.KeyId), - names.AttrType: aws.StringValue(encrypt.Type), + tfMap := map[string]interface{}{ + names.AttrKeyID: aws.ToString(apiObject.KeyId), + names.AttrType: apiObject.Type, } - return []interface{}{m} + return []interface{}{tfMap} } diff --git a/internal/service/networkfirewall/firewall_policy.go b/internal/service/networkfirewall/firewall_policy.go index 20add02b7ff..3b05e33701a 100644 --- a/internal/service/networkfirewall/firewall_policy.go +++ b/internal/service/networkfirewall/firewall_policy.go @@ -9,15 +9,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -28,7 +30,7 @@ import ( // @SDKResource("aws_networkfirewall_firewall_policy", name="Firewall Policy") // @Tags(identifierAttribute="id") -func ResourceFirewallPolicy() *schema.Resource { +func resourceFirewallPolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceFirewallPolicyCreate, ReadWithoutTimeout: resourceFirewallPolicyRead, @@ -39,166 +41,168 @@ func ResourceFirewallPolicy() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - names.AttrDescription: { - Type: schema.TypeString, - Optional: true, - }, - names.AttrEncryptionConfiguration: encryptionConfigurationSchema(), - "firewall_policy": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "policy_variables": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "rule_variables": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 32), - validation.StringMatch(regexache.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_]+$`), "must contain only alphanumeric and underscore characters"), - ), - }, - "ip_set": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "definition": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, + SchemaFunc: func() map[string]*schema.Schema { + return map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + names.AttrDescription: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrEncryptionConfiguration: encryptionConfigurationSchema(), + "firewall_policy": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "policy_variables": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_variables": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_set": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "definition": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, }, }, + names.AttrKey: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexache.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), + validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_]+$`), "must contain only alphanumeric and underscore characters"), + ), + }, }, }, }, }, }, }, - }, - "stateful_default_actions": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "stateful_engine_options": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "rule_order": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(networkfirewall.RuleOrder_Values(), false), - }, - "stream_exception_policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(networkfirewall.StreamExceptionPolicy_Values(), false), + "stateful_default_actions": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "stateful_engine_options": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_order": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.RuleOrder](), + }, + "stream_exception_policy": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.StreamExceptionPolicy](), + }, }, }, }, - }, - "stateful_rule_group_reference": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "override": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrAction: { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(networkfirewall.OverrideAction_Values(), false), + "stateful_rule_group_reference": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "override": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrAction: { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.OverrideAction](), + }, }, }, }, - }, - names.AttrPriority: { - Type: schema.TypeInt, - Optional: true, - ValidateFunc: validation.IntAtLeast(1), - }, - names.AttrResourceARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + names.AttrPriority: { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(1), + }, + names.AttrResourceARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "stateless_custom_action": customActionSchema(), - "stateless_default_actions": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "stateless_fragment_default_actions": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "stateless_rule_group_reference": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrPriority: { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntAtLeast(1), - }, - names.AttrResourceARN: { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + "stateless_custom_action": customActionSchema(), + "stateless_default_actions": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "stateless_fragment_default_actions": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "stateless_rule_group_reference": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrPriority: { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(1), + }, + names.AttrResourceARN: { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - "tls_inspection_configuration_arn": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: verify.ValidARN, + "tls_inspection_configuration_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - names.AttrName: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), - "update_token": { - Type: schema.TypeString, - Computed: true, - }, + names.AttrName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + "update_token": { + Type: schema.TypeString, + Computed: true, + }, + } }, CustomizeDiff: customdiff.Sequence( @@ -214,8 +218,7 @@ func ResourceFirewallPolicy() *schema.Resource { func resourceFirewallPolicyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) name := d.Get(names.AttrName).(string) input := &networkfirewall.CreateFirewallPolicyInput{ @@ -227,27 +230,27 @@ func resourceFirewallPolicyCreate(ctx context.Context, d *schema.ResourceData, m if v, ok := d.GetOk(names.AttrDescription); ok { input.Description = aws.String(v.(string)) } + if v, ok := d.GetOk(names.AttrEncryptionConfiguration); ok { input.EncryptionConfiguration = expandEncryptionConfiguration(v.([]interface{})) } - output, err := conn.CreateFirewallPolicyWithContext(ctx, input) + output, err := conn.CreateFirewallPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating NetworkFirewall Firewall Policy (%s): %s", name, err) } - d.SetId(aws.StringValue(output.FirewallPolicyResponse.FirewallPolicyArn)) + d.SetId(aws.ToString(output.FirewallPolicyResponse.FirewallPolicyArn)) return append(diags, resourceFirewallPolicyRead(ctx, d, meta)...) } func resourceFirewallPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - - output, err := FindFirewallPolicyByARN(ctx, conn, d.Id()) + output, err := findFirewallPolicyByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] NetworkFirewall Firewall Policy (%s) not found, removing from state", d.Id()) @@ -262,7 +265,9 @@ func resourceFirewallPolicyRead(ctx context.Context, d *schema.ResourceData, met response := output.FirewallPolicyResponse d.Set(names.AttrARN, response.FirewallPolicyArn) d.Set(names.AttrDescription, response.Description) - d.Set(names.AttrEncryptionConfiguration, flattenEncryptionConfiguration(response.EncryptionConfiguration)) + if err := d.Set(names.AttrEncryptionConfiguration, flattenEncryptionConfiguration(response.EncryptionConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting encryption_configuration: %s", err) + } if err := d.Set("firewall_policy", flattenFirewallPolicy(output.FirewallPolicy)); err != nil { return sdkdiag.AppendErrorf(diags, "setting firewall_policy: %s", err) } @@ -276,8 +281,7 @@ func resourceFirewallPolicyRead(ctx context.Context, d *schema.ResourceData, met func resourceFirewallPolicyUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) if d.HasChanges(names.AttrDescription, names.AttrEncryptionConfiguration, "firewall_policy") { input := &networkfirewall.UpdateFirewallPolicyInput{ @@ -287,12 +291,12 @@ func resourceFirewallPolicyUpdate(ctx context.Context, d *schema.ResourceData, m UpdateToken: aws.String(d.Get("update_token").(string)), } - // Only pass non-empty description values, else API request returns an InternalServiceError + // Only pass non-empty description values, else API request returns an InternalServiceError. if v, ok := d.GetOk(names.AttrDescription); ok { input.Description = aws.String(v.(string)) } - _, err := conn.UpdateFirewallPolicyWithContext(ctx, input) + _, err := conn.UpdateFirewallPolicy(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Firewall Policy (%s): %s", d.Id(), err) @@ -304,20 +308,19 @@ func resourceFirewallPolicyUpdate(ctx context.Context, d *schema.ResourceData, m func resourceFirewallPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) + log.Printf("[DEBUG] Deleting NetworkFirewall Firewall Policy: %s", d.Id()) const ( timeout = 10 * time.Minute ) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - - log.Printf("[DEBUG] Deleting NetworkFirewall Firewall Policy: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func() (interface{}, error) { - return conn.DeleteFirewallPolicyWithContext(ctx, &networkfirewall.DeleteFirewallPolicyInput{ + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidOperationException](ctx, timeout, func() (interface{}, error) { + return conn.DeleteFirewallPolicy(ctx, &networkfirewall.DeleteFirewallPolicyInput{ FirewallPolicyArn: aws.String(d.Id()), }) - }, networkfirewall.ErrCodeInvalidOperationException, "Unable to delete the object because it is still in use") + }, "Unable to delete the object because it is still in use") - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -332,14 +335,10 @@ func resourceFirewallPolicyDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func FindFirewallPolicyByARN(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) (*networkfirewall.DescribeFirewallPolicyOutput, error) { - input := &networkfirewall.DescribeFirewallPolicyInput{ - FirewallPolicyArn: aws.String(arn), - } - - output, err := conn.DescribeFirewallPolicyWithContext(ctx, input) +func findFirewallPolicy(ctx context.Context, conn *networkfirewall.Client, input *networkfirewall.DescribeFirewallPolicyInput) (*networkfirewall.DescribeFirewallPolicyOutput, error) { + output, err := conn.DescribeFirewallPolicy(ctx, input) - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -357,9 +356,17 @@ func FindFirewallPolicyByARN(ctx context.Context, conn *networkfirewall.NetworkF return output, nil } -func statusFirewallPolicy(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) retry.StateRefreshFunc { +func findFirewallPolicyByARN(ctx context.Context, conn *networkfirewall.Client, arn string) (*networkfirewall.DescribeFirewallPolicyOutput, error) { + input := &networkfirewall.DescribeFirewallPolicyInput{ + FirewallPolicyArn: aws.String(arn), + } + + return findFirewallPolicy(ctx, conn, input) +} + +func statusFirewallPolicy(ctx context.Context, conn *networkfirewall.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindFirewallPolicyByARN(ctx, conn, arn) + output, err := findFirewallPolicyByARN(ctx, conn, arn) if tfresource.NotFound(err) { return nil, "", nil @@ -369,13 +376,13 @@ func statusFirewallPolicy(ctx context.Context, conn *networkfirewall.NetworkFire return nil, "", err } - return output, aws.StringValue(output.FirewallPolicyResponse.FirewallPolicyStatus), nil + return output, string(output.FirewallPolicyResponse.FirewallPolicyStatus), nil } } -func waitFirewallPolicyDeleted(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string, timeout time.Duration) (*networkfirewall.DescribeFirewallPolicyOutput, error) { +func waitFirewallPolicyDeleted(ctx context.Context, conn *networkfirewall.Client, arn string, timeout time.Duration) (*networkfirewall.DescribeFirewallPolicyOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{networkfirewall.ResourceStatusDeleting}, + Pending: enum.Slice(awstypes.ResourceStatusDeleting), Target: []string{}, Refresh: statusFirewallPolicy(ctx, conn, arn), Timeout: timeout, @@ -390,248 +397,262 @@ func waitFirewallPolicyDeleted(ctx context.Context, conn *networkfirewall.Networ return nil, err } -func expandPolicyVariables(tfMap map[string]interface{}) *networkfirewall.PolicyVariables { +func expandPolicyVariables(tfMap map[string]interface{}) *awstypes.PolicyVariables { if tfMap == nil { return nil } - policyVariables := &networkfirewall.PolicyVariables{} + apiObject := &awstypes.PolicyVariables{} - if rvMap, ok := tfMap["rule_variables"].(*schema.Set); ok && rvMap.Len() > 0 { - policyVariables.RuleVariables = expandIPSets(rvMap.List()) + if v, ok := tfMap["rule_variables"].(*schema.Set); ok && v.Len() > 0 { + apiObject.RuleVariables = expandIPSets(v.List()) } - return policyVariables + return apiObject } -func expandStatefulEngineOptions(l []interface{}) *networkfirewall.StatefulEngineOptions { - if len(l) == 0 || l[0] == nil { +func expandStatefulEngineOptions(tfList []interface{}) *awstypes.StatefulEngineOptions { + if len(tfList) == 0 || tfList[0] == nil { return nil } - options := &networkfirewall.StatefulEngineOptions{} + apiObject := &awstypes.StatefulEngineOptions{} - m := l[0].(map[string]interface{}) - if v, ok := m["rule_order"].(string); ok && v != "" { - options.RuleOrder = aws.String(v) + tfMap := tfList[0].(map[string]interface{}) + + if v, ok := tfMap["rule_order"].(string); ok && v != "" { + apiObject.RuleOrder = awstypes.RuleOrder(v) } - if v, ok := m["stream_exception_policy"].(string); ok && v != "" { - options.StreamExceptionPolicy = aws.String(v) + if v, ok := tfMap["stream_exception_policy"].(string); ok && v != "" { + apiObject.StreamExceptionPolicy = awstypes.StreamExceptionPolicy(v) } - return options + return apiObject } -func expandStatefulRuleGroupOverride(l []interface{}) *networkfirewall.StatefulRuleGroupOverride { - if len(l) == 0 || l[0] == nil { +func expandStatefulRuleGroupOverride(tfList []interface{}) *awstypes.StatefulRuleGroupOverride { + if len(tfList) == 0 || tfList[0] == nil { return nil } - lRaw := l[0].(map[string]interface{}) - override := &networkfirewall.StatefulRuleGroupOverride{} + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.StatefulRuleGroupOverride{} - if v, ok := lRaw[names.AttrAction].(string); ok && v != "" { - override.SetAction(v) + if v, ok := tfMap[names.AttrAction].(string); ok && v != "" { + apiObject.Action = awstypes.OverrideAction(v) } - return override + return apiObject } -func expandStatefulRuleGroupReferences(l []interface{}) []*networkfirewall.StatefulRuleGroupReference { - if len(l) == 0 || l[0] == nil { +func expandStatefulRuleGroupReferences(tfList []interface{}) []awstypes.StatefulRuleGroupReference { + if len(tfList) == 0 || tfList[0] == nil { return nil } - references := make([]*networkfirewall.StatefulRuleGroupReference, 0, len(l)) - for _, tfMapRaw := range l { + + apiObjects := make([]awstypes.StatefulRuleGroupReference, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - reference := &networkfirewall.StatefulRuleGroupReference{} + apiObject := awstypes.StatefulRuleGroupReference{} + + if v, ok := tfMap["override"].([]interface{}); ok && len(v) > 0 { + apiObject.Override = expandStatefulRuleGroupOverride(v) + } if v, ok := tfMap[names.AttrPriority].(int); ok && v > 0 { - reference.Priority = aws.Int64(int64(v)) + apiObject.Priority = aws.Int32(int32(v)) } if v, ok := tfMap[names.AttrResourceARN].(string); ok && v != "" { - reference.ResourceArn = aws.String(v) + apiObject.ResourceArn = aws.String(v) } - if v, ok := tfMap["override"].([]interface{}); ok && len(v) > 0 { - reference.Override = expandStatefulRuleGroupOverride(v) - } - - references = append(references, reference) + apiObjects = append(apiObjects, apiObject) } - return references + return apiObjects } -func expandStatelessRuleGroupReferences(l []interface{}) []*networkfirewall.StatelessRuleGroupReference { - if len(l) == 0 || l[0] == nil { +func expandStatelessRuleGroupReferences(tfList []interface{}) []awstypes.StatelessRuleGroupReference { + if len(tfList) == 0 || tfList[0] == nil { return nil } - references := make([]*networkfirewall.StatelessRuleGroupReference, 0, len(l)) - for _, tfMapRaw := range l { + + apiObjects := make([]awstypes.StatelessRuleGroupReference, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - reference := &networkfirewall.StatelessRuleGroupReference{} + + apiObject := awstypes.StatelessRuleGroupReference{} + if v, ok := tfMap[names.AttrPriority].(int); ok && v > 0 { - reference.Priority = aws.Int64(int64(v)) + apiObject.Priority = aws.Int32(int32(v)) } if v, ok := tfMap[names.AttrResourceARN].(string); ok && v != "" { - reference.ResourceArn = aws.String(v) + apiObject.ResourceArn = aws.String(v) } - references = append(references, reference) + + apiObjects = append(apiObjects, apiObject) } - return references + + return apiObjects } -func expandFirewallPolicy(l []interface{}) *networkfirewall.FirewallPolicy { - if len(l) == 0 || l[0] == nil { +func expandFirewallPolicy(tfList []interface{}) *awstypes.FirewallPolicy { + if len(tfList) == 0 || tfList[0] == nil { return nil } - lRaw := l[0].(map[string]interface{}) - policy := &networkfirewall.FirewallPolicy{ - StatelessDefaultActions: flex.ExpandStringSet(lRaw["stateless_default_actions"].(*schema.Set)), - StatelessFragmentDefaultActions: flex.ExpandStringSet(lRaw["stateless_fragment_default_actions"].(*schema.Set)), + + tfMap := tfList[0].(map[string]interface{}) + apiObject := &awstypes.FirewallPolicy{ + StatelessDefaultActions: flex.ExpandStringValueSet(tfMap["stateless_default_actions"].(*schema.Set)), + StatelessFragmentDefaultActions: flex.ExpandStringValueSet(tfMap["stateless_fragment_default_actions"].(*schema.Set)), } - if v, ok := lRaw["policy_variables"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - policy.PolicyVariables = expandPolicyVariables(v.([]interface{})[0].(map[string]interface{})) + if v, ok := tfMap["policy_variables"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + apiObject.PolicyVariables = expandPolicyVariables(v.([]interface{})[0].(map[string]interface{})) } - if v, ok := lRaw["stateful_default_actions"].(*schema.Set); ok && v.Len() > 0 { - policy.StatefulDefaultActions = flex.ExpandStringSet(v) + if v, ok := tfMap["stateful_default_actions"].(*schema.Set); ok && v.Len() > 0 { + apiObject.StatefulDefaultActions = flex.ExpandStringValueSet(v) } - if v, ok := lRaw["stateful_engine_options"].([]interface{}); ok && len(v) > 0 { - policy.StatefulEngineOptions = expandStatefulEngineOptions(v) + if v, ok := tfMap["stateful_engine_options"].([]interface{}); ok && len(v) > 0 { + apiObject.StatefulEngineOptions = expandStatefulEngineOptions(v) } - if v, ok := lRaw["stateful_rule_group_reference"].(*schema.Set); ok && v.Len() > 0 { - policy.StatefulRuleGroupReferences = expandStatefulRuleGroupReferences(v.List()) + if v, ok := tfMap["stateful_rule_group_reference"].(*schema.Set); ok && v.Len() > 0 { + apiObject.StatefulRuleGroupReferences = expandStatefulRuleGroupReferences(v.List()) } - if v, ok := lRaw["stateless_custom_action"].(*schema.Set); ok && v.Len() > 0 { - policy.StatelessCustomActions = expandCustomActions(v.List()) + if v, ok := tfMap["stateless_custom_action"].(*schema.Set); ok && v.Len() > 0 { + apiObject.StatelessCustomActions = expandCustomActions(v.List()) } - if v, ok := lRaw["stateless_rule_group_reference"].(*schema.Set); ok && v.Len() > 0 { - policy.StatelessRuleGroupReferences = expandStatelessRuleGroupReferences(v.List()) + if v, ok := tfMap["stateless_rule_group_reference"].(*schema.Set); ok && v.Len() > 0 { + apiObject.StatelessRuleGroupReferences = expandStatelessRuleGroupReferences(v.List()) } - if v, ok := lRaw["tls_inspection_configuration_arn"].(string); ok && v != "" { - policy.TLSInspectionConfigurationArn = aws.String(v) + if v, ok := tfMap["tls_inspection_configuration_arn"].(string); ok && v != "" { + apiObject.TLSInspectionConfigurationArn = aws.String(v) } - return policy + return apiObject } -func flattenFirewallPolicy(policy *networkfirewall.FirewallPolicy) []interface{} { - if policy == nil { +func flattenFirewallPolicy(apiObject *awstypes.FirewallPolicy) []interface{} { + if apiObject == nil { return []interface{}{} } - p := map[string]interface{}{} - if policy.PolicyVariables != nil { - p["policy_variables"] = flattenPolicyVariables(policy.PolicyVariables) + + tfMap := map[string]interface{}{} + + if apiObject.PolicyVariables != nil { + tfMap["policy_variables"] = flattenPolicyVariables(apiObject.PolicyVariables) } - if policy.StatefulDefaultActions != nil { - p["stateful_default_actions"] = flex.FlattenStringSet(policy.StatefulDefaultActions) + if apiObject.StatefulDefaultActions != nil { + tfMap["stateful_default_actions"] = apiObject.StatefulDefaultActions } - if policy.StatefulEngineOptions != nil { - p["stateful_engine_options"] = flattenStatefulEngineOptions(policy.StatefulEngineOptions) + if apiObject.StatefulEngineOptions != nil { + tfMap["stateful_engine_options"] = flattenStatefulEngineOptions(apiObject.StatefulEngineOptions) } - if policy.StatefulRuleGroupReferences != nil { - p["stateful_rule_group_reference"] = flattenPolicyStatefulRuleGroupReference(policy.StatefulRuleGroupReferences) + if apiObject.StatefulRuleGroupReferences != nil { + tfMap["stateful_rule_group_reference"] = flattenPolicyStatefulRuleGroupReferences(apiObject.StatefulRuleGroupReferences) } - if policy.StatelessCustomActions != nil { - p["stateless_custom_action"] = flattenCustomActions(policy.StatelessCustomActions) + if apiObject.StatelessCustomActions != nil { + tfMap["stateless_custom_action"] = flattenCustomActions(apiObject.StatelessCustomActions) } - if policy.StatelessDefaultActions != nil { - p["stateless_default_actions"] = flex.FlattenStringSet(policy.StatelessDefaultActions) + if apiObject.StatelessDefaultActions != nil { + tfMap["stateless_default_actions"] = apiObject.StatelessDefaultActions } - if policy.StatelessFragmentDefaultActions != nil { - p["stateless_fragment_default_actions"] = flex.FlattenStringSet(policy.StatelessFragmentDefaultActions) + if apiObject.StatelessFragmentDefaultActions != nil { + tfMap["stateless_fragment_default_actions"] = apiObject.StatelessFragmentDefaultActions } - if policy.StatelessRuleGroupReferences != nil { - p["stateless_rule_group_reference"] = flattenPolicyStatelessRuleGroupReference(policy.StatelessRuleGroupReferences) + if apiObject.StatelessRuleGroupReferences != nil { + tfMap["stateless_rule_group_reference"] = flattenPolicyStatelessRuleGroupReferences(apiObject.StatelessRuleGroupReferences) } - if policy.TLSInspectionConfigurationArn != nil { - p["tls_inspection_configuration_arn"] = aws.StringValue(policy.TLSInspectionConfigurationArn) + if apiObject.TLSInspectionConfigurationArn != nil { + tfMap["tls_inspection_configuration_arn"] = aws.ToString(apiObject.TLSInspectionConfigurationArn) } - return []interface{}{p} + return []interface{}{tfMap} } -func flattenPolicyVariables(variables *networkfirewall.PolicyVariables) []interface{} { - if variables == nil { +func flattenPolicyVariables(apiObject *awstypes.PolicyVariables) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "rule_variables": flattenIPSets(variables.RuleVariables), + tfMap := map[string]interface{}{ + "rule_variables": flattenIPSets(apiObject.RuleVariables), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenStatefulEngineOptions(options *networkfirewall.StatefulEngineOptions) []interface{} { - if options == nil { +func flattenStatefulEngineOptions(apiObject *awstypes.StatefulEngineOptions) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{} - if options.RuleOrder != nil { - m["rule_order"] = aws.StringValue(options.RuleOrder) - } - if options.StreamExceptionPolicy != nil { - m["stream_exception_policy"] = aws.StringValue(options.StreamExceptionPolicy) + tfMap := map[string]interface{}{ + "rule_order": apiObject.RuleOrder, + "stream_exception_policy": apiObject.StreamExceptionPolicy, } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenStatefulRuleGroupOverride(override *networkfirewall.StatefulRuleGroupOverride) []interface{} { - if override == nil { +func flattenStatefulRuleGroupOverride(apiObject *awstypes.StatefulRuleGroupOverride) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - names.AttrAction: aws.StringValue(override.Action), + tfMap := map[string]interface{}{ + names.AttrAction: apiObject.Action, } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenPolicyStatefulRuleGroupReference(l []*networkfirewall.StatefulRuleGroupReference) []interface{} { - references := make([]interface{}, 0, len(l)) - for _, ref := range l { - reference := map[string]interface{}{ - names.AttrResourceARN: aws.StringValue(ref.ResourceArn), +func flattenPolicyStatefulRuleGroupReferences(apiObjects []awstypes.StatefulRuleGroupReference) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + names.AttrResourceARN: aws.ToString(apiObject.ResourceArn), } - if ref.Priority != nil { - reference[names.AttrPriority] = int(aws.Int64Value(ref.Priority)) + + if apiObject.Override != nil { + tfMap["override"] = flattenStatefulRuleGroupOverride(apiObject.Override) } - if ref.Override != nil { - reference["override"] = flattenStatefulRuleGroupOverride(ref.Override) + if apiObject.Priority != nil { + tfMap[names.AttrPriority] = aws.ToInt32(apiObject.Priority) } - references = append(references, reference) + tfList = append(tfList, tfMap) } - return references + return tfList } -func flattenPolicyStatelessRuleGroupReference(l []*networkfirewall.StatelessRuleGroupReference) []interface{} { - references := make([]interface{}, 0, len(l)) - for _, ref := range l { - reference := map[string]interface{}{ - names.AttrPriority: int(aws.Int64Value(ref.Priority)), - names.AttrResourceARN: aws.StringValue(ref.ResourceArn), +func flattenPolicyStatelessRuleGroupReferences(apiObjects []awstypes.StatelessRuleGroupReference) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + names.AttrPriority: aws.ToInt32(apiObject.Priority), + names.AttrResourceARN: aws.ToString(apiObject.ResourceArn), } - references = append(references, reference) + + tfList = append(tfList, tfMap) } - return references + + return tfList } diff --git a/internal/service/networkfirewall/firewall_policy_data_source.go b/internal/service/networkfirewall/firewall_policy_data_source.go index a53040a7ca0..fcabb20d3fa 100644 --- a/internal/service/networkfirewall/firewall_policy_data_source.go +++ b/internal/service/networkfirewall/firewall_policy_data_source.go @@ -5,181 +5,173 @@ package networkfirewall import ( "context" - "log" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/sdkv2" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_networkfirewall_firewall_policy") -func DataSourceFirewallPolicy() *schema.Resource { +// @SDKDataSource("aws_networkfirewall_firewall_policy", name="Firewall Policy") +// @Tags +func dataSourceFirewallPolicy() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceFirewallPolicyRead, - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - AtLeastOneOf: []string{names.AttrARN, names.AttrName}, - Optional: true, - ValidateFunc: verify.ValidARN, - }, - names.AttrDescription: { - Type: schema.TypeString, - Computed: true, - }, - "firewall_policy": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "stateful_default_actions": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "stateful_engine_options": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "rule_order": { - Type: schema.TypeString, - Computed: true, - }, - "stream_exception_policy": { - Type: schema.TypeString, - Computed: true, + + SchemaFunc: func() map[string]*schema.Schema { + return map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + AtLeastOneOf: []string{names.AttrARN, names.AttrName}, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + names.AttrDescription: { + Type: schema.TypeString, + Computed: true, + }, + "firewall_policy": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "stateful_default_actions": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "stateful_engine_options": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_order": { + Type: schema.TypeString, + Computed: true, + }, + "stream_exception_policy": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "stateful_rule_group_reference": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "override": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrAction: { - Type: schema.TypeString, - Optional: true, + "stateful_rule_group_reference": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "override": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrAction: { + Type: schema.TypeString, + Optional: true, + }, }, }, }, - }, - names.AttrPriority: { - Type: schema.TypeInt, - Computed: true, - }, - names.AttrResourceARN: { - Type: schema.TypeString, - Computed: true, + names.AttrPriority: { + Type: schema.TypeInt, + Computed: true, + }, + names.AttrResourceARN: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "stateless_custom_action": customActionSchemaDataSource(), - "stateless_default_actions": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "stateless_fragment_default_actions": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "stateless_rule_group_reference": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrPriority: { - Type: schema.TypeInt, - Computed: true, - }, - names.AttrResourceARN: { - Type: schema.TypeString, - Computed: true, + "stateless_custom_action": sdkv2.DataSourcePropertyFromResourceProperty(customActionSchema()), + "stateless_default_actions": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "stateless_fragment_default_actions": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "stateless_rule_group_reference": { + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrPriority: { + Type: schema.TypeInt, + Computed: true, + }, + names.AttrResourceARN: { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - "tls_inspection_configuration_arn": { - Type: schema.TypeString, - Computed: true, + "tls_inspection_configuration_arn": { + Type: schema.TypeString, + Computed: true, + }, }, }, }, - }, - names.AttrName: { - Type: schema.TypeString, - Optional: true, - AtLeastOneOf: []string{names.AttrARN, names.AttrName}, - ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z-]{1,128}$`), "Must have 1-128 valid characters: a-z, A-Z, 0-9 and -(hyphen)"), - }, - names.AttrTags: tftags.TagsSchemaComputed(), - "update_token": { - Type: schema.TypeString, - Computed: true, - }, + names.AttrName: { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{names.AttrARN, names.AttrName}, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z-]{1,128}$`), "Must have 1-128 valid characters: a-z, A-Z, 0-9 and -(hyphen)"), + }, + names.AttrTags: tftags.TagsSchemaComputed(), + "update_token": { + Type: schema.TypeString, + Computed: true, + }, + } }, } } func dataSourceFirewallPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - - arn := d.Get(names.AttrARN).(string) - name := d.Get(names.AttrName).(string) - - log.Printf("[DEBUG] Reading NetworkFirewall Firewall Policy %s %s", arn, name) + input := &networkfirewall.DescribeFirewallPolicyInput{} + if v := d.Get(names.AttrARN).(string); v != "" { + input.FirewallPolicyArn = aws.String(v) + } + if v := d.Get(names.AttrName).(string); v != "" { + input.FirewallPolicyName = aws.String(v) + } - output, err := FindFirewallPolicyByNameAndARN(ctx, conn, arn, name) + output, err := findFirewallPolicy(ctx, conn, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Firewall Policy (%s, %s): %s", arn, name, err) - } - - if output == nil { - return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Firewall Policy (%s, %s): empty output", arn, name) - } - if output.FirewallPolicyResponse == nil { - return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Firewall Policy (%s, %s): empty output.FirewallPolicyResponse", arn, name) + return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Firewall Policy: %s", err) } resp := output.FirewallPolicyResponse - policy := output.FirewallPolicy - - d.SetId(aws.StringValue(resp.FirewallPolicyArn)) + d.SetId(aws.ToString(resp.FirewallPolicyArn)) d.Set(names.AttrARN, resp.FirewallPolicyArn) d.Set(names.AttrDescription, resp.Description) - d.Set(names.AttrName, resp.FirewallPolicyName) - d.Set("update_token", output.UpdateToken) - - if err := d.Set("firewall_policy", flattenFirewallPolicy(policy)); err != nil { + if err := d.Set("firewall_policy", flattenFirewallPolicy(output.FirewallPolicy)); err != nil { return sdkdiag.AppendErrorf(diags, "setting firewall_policy: %s", err) } + d.Set(names.AttrName, resp.FirewallPolicyName) + d.Set("update_token", output.UpdateToken) - tags := KeyValueTags(ctx, resp.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - - if err := d.Set(names.AttrTags, tags.Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) - } + setTagsOut(ctx, resp.Tags) return diags } diff --git a/internal/service/networkfirewall/firewall_policy_data_source_test.go b/internal/service/networkfirewall/firewall_policy_data_source_test.go index 72d06b0e6e5..cb0f80fb7bc 100644 --- a/internal/service/networkfirewall/firewall_policy_data_source_test.go +++ b/internal/service/networkfirewall/firewall_policy_data_source_test.go @@ -7,7 +7,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/networkfirewall" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" @@ -16,7 +15,6 @@ import ( func TestAccNetworkFirewallFirewallPolicyDataSource_arn(t *testing.T) { ctx := acctest.Context(t) - var firewallPolicy networkfirewall.DescribeFirewallPolicyOutput rName := sdkacctest.RandomWithPrefix("resource-test-terraform") resourceName := "aws_networkfirewall_firewall_policy.test" datasourceName := "data.aws_networkfirewall_firewall_policy.test" @@ -29,7 +27,6 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_arn(t *testing.T) { { Config: testAccFirewallPolicyDataSourceConfig_arn(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckFirewallPolicyExists(ctx, resourceName, &firewallPolicy), resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.#", resourceName, "firewall_policy.#"), @@ -48,7 +45,6 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_arn(t *testing.T) { func TestAccNetworkFirewallFirewallPolicyDataSource_name(t *testing.T) { ctx := acctest.Context(t) - var firewallPolicy networkfirewall.DescribeFirewallPolicyOutput rName := sdkacctest.RandomWithPrefix("resource-test-terraform") resourceName := "aws_networkfirewall_firewall_policy.test" datasourceName := "data.aws_networkfirewall_firewall_policy.test" @@ -61,7 +57,6 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_name(t *testing.T) { { Config: testAccFirewallPolicyDataSourceConfig_name(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckFirewallPolicyExists(ctx, resourceName, &firewallPolicy), resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.#", resourceName, "firewall_policy.#"), @@ -80,7 +75,6 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_name(t *testing.T) { func TestAccNetworkFirewallFirewallPolicyDataSource_nameAndARN(t *testing.T) { ctx := acctest.Context(t) - var firewallPolicy networkfirewall.DescribeFirewallPolicyOutput rName := sdkacctest.RandomWithPrefix("resource-test-terraform") resourceName := "aws_networkfirewall_firewall_policy.test" datasourceName := "data.aws_networkfirewall_firewall_policy.test" @@ -93,7 +87,6 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_nameAndARN(t *testing.T) { { Config: testAccFirewallPolicyDataSourceConfig_nameAndARN(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckFirewallPolicyExists(ctx, resourceName, &firewallPolicy), resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.#", resourceName, "firewall_policy.#"), @@ -112,7 +105,6 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_nameAndARN(t *testing.T) { func TestAccNetworkFirewallFirewallPolicyDataSource_withOverriddenManagedRuleGroup(t *testing.T) { ctx := acctest.Context(t) - var firewallPolicy networkfirewall.DescribeFirewallPolicyOutput rName := sdkacctest.RandomWithPrefix("resource-test-terraform") resourceName := "aws_networkfirewall_firewall_policy.test" datasourceName := "data.aws_networkfirewall_firewall_policy.test" @@ -125,7 +117,6 @@ func TestAccNetworkFirewallFirewallPolicyDataSource_withOverriddenManagedRuleGro { Config: testAccFirewallPolicyDataSourceConfig_withOverriddenManagedRuleGroup(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckFirewallPolicyExists(ctx, resourceName, &firewallPolicy), resource.TestCheckResourceAttrPair(datasourceName, names.AttrARN, resourceName, names.AttrARN), resource.TestCheckResourceAttrPair(datasourceName, names.AttrDescription, resourceName, names.AttrDescription), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.#", resourceName, "firewall_policy.#"), resource.TestCheckResourceAttrPair(datasourceName, "firewall_policy.0.stateless_fragment_default_actions.#", resourceName, "firewall_policy.0.stateless_fragment_default_actions.#"), diff --git a/internal/service/networkfirewall/firewall_policy_test.go b/internal/service/networkfirewall/firewall_policy_test.go index 625e151adbf..0b84c4dc2f7 100644 --- a/internal/service/networkfirewall/firewall_policy_test.go +++ b/internal/service/networkfirewall/firewall_policy_test.go @@ -8,8 +8,9 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -225,8 +226,8 @@ func TestAccNetworkFirewallFirewallPolicy_statefulEngineOption(t *testing.T) { testAccCheckFirewallPolicyExists(ctx, resourceName, &firewallPolicy), resource.TestCheckResourceAttr(resourceName, "firewall_policy.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", networkfirewall.RuleOrderStrictOrder), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", networkfirewall.StreamExceptionPolicyDrop), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", string(awstypes.RuleOrderStrictOrder)), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", string(awstypes.StreamExceptionPolicyDrop)), ), }, { @@ -256,8 +257,8 @@ func TestAccNetworkFirewallFirewallPolicy_updateStatefulEngineOption(t *testing. testAccCheckFirewallPolicyExists(ctx, resourceName, &firewallPolicy1), resource.TestCheckResourceAttr(resourceName, "firewall_policy.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", networkfirewall.RuleOrderDefaultActionOrder), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", networkfirewall.StreamExceptionPolicyContinue), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", string(awstypes.RuleOrderDefaultActionOrder)), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", string(awstypes.StreamExceptionPolicyContinue)), ), }, { @@ -275,8 +276,8 @@ func TestAccNetworkFirewallFirewallPolicy_updateStatefulEngineOption(t *testing. testAccCheckFirewallPolicyRecreated(&firewallPolicy2, &firewallPolicy3), resource.TestCheckResourceAttr(resourceName, "firewall_policy.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", networkfirewall.RuleOrderStrictOrder), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", networkfirewall.StreamExceptionPolicyReject), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", string(awstypes.RuleOrderStrictOrder)), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", string(awstypes.StreamExceptionPolicyReject)), ), }, { @@ -306,7 +307,7 @@ func TestAccNetworkFirewallFirewallPolicy_statefulEngineOptionsSingle(t *testing testAccCheckFirewallPolicyExists(ctx, resourceName, &firewallPolicy), resource.TestCheckResourceAttr(resourceName, "firewall_policy.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", networkfirewall.RuleOrderDefaultActionOrder), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", string(awstypes.RuleOrderDefaultActionOrder)), resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", ""), ), }, @@ -317,7 +318,7 @@ func TestAccNetworkFirewallFirewallPolicy_statefulEngineOptionsSingle(t *testing resource.TestCheckResourceAttr(resourceName, "firewall_policy.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.rule_order", ""), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", networkfirewall.StreamExceptionPolicyReject), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_engine_options.0.stream_exception_policy", string(awstypes.StreamExceptionPolicyReject)), ), }, { @@ -354,9 +355,10 @@ func TestAccNetworkFirewallFirewallPolicy_statefulRuleGroupReference(t *testing. ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"firewall_policy.0.stateful_rule_group_reference.0.priority"}, }, }, }) @@ -381,9 +383,10 @@ func TestAccNetworkFirewallFirewallPolicy_statefulRuleGroupReferenceManaged(t *t ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"firewall_policy.0.stateful_rule_group_reference.0.priority"}, }, }, }) @@ -466,9 +469,10 @@ func TestAccNetworkFirewallFirewallPolicy_multipleStatefulRuleGroupReferences(t ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"firewall_policy.0.stateful_rule_group_reference.0.priority"}, }, }, }) @@ -511,7 +515,7 @@ func TestAccNetworkFirewallFirewallPolicy_statefulRuleGroupOverrideActionReferen var firewallPolicy networkfirewall.DescribeFirewallPolicyOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_networkfirewall_firewall_policy.test" - override_action := networkfirewall.OverrideActionDropToAlert + overrideAction := string(awstypes.OverrideActionDropToAlert) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, @@ -520,16 +524,17 @@ func TestAccNetworkFirewallFirewallPolicy_statefulRuleGroupOverrideActionReferen CheckDestroy: testAccCheckFirewallPolicyDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccFirewallPolicyConfig_statefulRuleGroupReferenceManagedOverrideAction(rName, override_action), + Config: testAccFirewallPolicyConfig_statefulRuleGroupReferenceManagedOverrideAction(rName, overrideAction), Check: resource.ComposeTestCheckFunc( testAccCheckFirewallPolicyExists(ctx, resourceName, &firewallPolicy), - resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_rule_group_reference.0.override.0.action", override_action), + resource.TestCheckResourceAttr(resourceName, "firewall_policy.0.stateful_rule_group_reference.0.override.0.action", overrideAction), ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"firewall_policy.0.stateful_rule_group_reference.0.priority"}, }, }, }) @@ -915,9 +920,10 @@ func TestAccNetworkFirewallFirewallPolicy_statefulRuleGroupReferenceAndCustomAct ), }, { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"firewall_policy.0.stateful_rule_group_reference.0.priority"}, }, }, }) @@ -1039,7 +1045,7 @@ func testAccCheckFirewallPolicyDestroy(ctx context.Context) resource.TestCheckFu continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) _, err := tfnetworkfirewall.FindFirewallPolicyByARN(ctx, conn, rs.Primary.ID) @@ -1065,11 +1071,7 @@ func testAccCheckFirewallPolicyExists(ctx context.Context, n string, v *networkf return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No NetworkFirewall Firewall Policy ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) output, err := tfnetworkfirewall.FindFirewallPolicyByARN(ctx, conn, rs.Primary.ID) @@ -1085,7 +1087,7 @@ func testAccCheckFirewallPolicyExists(ctx context.Context, n string, v *networkf func testAccCheckFirewallPolicyNotRecreated(i, j *networkfirewall.DescribeFirewallPolicyOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - if before, after := aws.StringValue(i.FirewallPolicyResponse.FirewallPolicyId), aws.StringValue(j.FirewallPolicyResponse.FirewallPolicyId); before != after { + if before, after := aws.ToString(i.FirewallPolicyResponse.FirewallPolicyId), aws.ToString(j.FirewallPolicyResponse.FirewallPolicyId); before != after { return fmt.Errorf("NetworkFirewall Firewall Policy was recreated. got: %s, expected: %s", after, before) } return nil @@ -1094,7 +1096,7 @@ func testAccCheckFirewallPolicyNotRecreated(i, j *networkfirewall.DescribeFirewa func testAccCheckFirewallPolicyRecreated(i, j *networkfirewall.DescribeFirewallPolicyOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - if before, after := aws.StringValue(i.FirewallPolicyResponse.FirewallPolicyId), aws.StringValue(j.FirewallPolicyResponse.FirewallPolicyId); before == after { + if before, after := aws.ToString(i.FirewallPolicyResponse.FirewallPolicyId), aws.ToString(j.FirewallPolicyResponse.FirewallPolicyId); before == after { return fmt.Errorf("NetworkFirewall Firewall Policy (%s) was not recreated", before) } return nil diff --git a/internal/service/networkfirewall/firewall_test.go b/internal/service/networkfirewall/firewall_test.go index 1df937ad479..5d74130d105 100644 --- a/internal/service/networkfirewall/firewall_test.go +++ b/internal/service/networkfirewall/firewall_test.go @@ -10,7 +10,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/networkfirewall" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -55,7 +56,7 @@ func TestAccNetworkFirewallFirewall_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "subnet_mapping.#", acctest.Ct1), resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_mapping.*.subnet_id", subnetResourceName, names.AttrID), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "subnet_mapping.*", map[string]string{ - names.AttrIPAddressType: networkfirewall.IPAddressTypeIpv4, + names.AttrIPAddressType: string(awstypes.IPAddressTypeIpv4), }), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestCheckResourceAttrSet(resourceName, "update_token"), @@ -104,7 +105,7 @@ func TestAccNetworkFirewallFirewall_dualstackSubnet(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "subnet_mapping.#", acctest.Ct1), resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_mapping.*.subnet_id", subnetResourceName, names.AttrID), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "subnet_mapping.*", map[string]string{ - names.AttrIPAddressType: networkfirewall.IPAddressTypeDualstack, + names.AttrIPAddressType: string(awstypes.IPAddressTypeDualstack), }), resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), resource.TestCheckResourceAttrSet(resourceName, "update_token"), @@ -433,7 +434,7 @@ func testAccCheckFirewallDestroy(ctx context.Context) resource.TestCheckFunc { continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) _, err := tfnetworkfirewall.FindFirewallByARN(ctx, conn, rs.Primary.ID) @@ -459,11 +460,7 @@ func testAccCheckFirewallExists(ctx context.Context, n string) resource.TestChec return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No NetworkFirewall Firewall ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) _, err := tfnetworkfirewall.FindFirewallByARN(ctx, conn, rs.Primary.ID) @@ -472,11 +469,11 @@ func testAccCheckFirewallExists(ctx context.Context, n string) resource.TestChec } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) input := &networkfirewall.ListFirewallsInput{} - _, err := conn.ListFirewallsWithContext(ctx, input) + _, err := conn.ListFirewalls(ctx, input) if acctest.PreCheckSkipError(err) { t.Skipf("skipping acceptance testing: %s", err) diff --git a/internal/service/networkfirewall/generate.go b/internal/service/networkfirewall/generate.go index 98d7418946b..eab93db6667 100644 --- a/internal/service/networkfirewall/generate.go +++ b/internal/service/networkfirewall/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -ListTags -ServiceTagsSlice -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/networkfirewall/helpers.go b/internal/service/networkfirewall/helpers.go index 9d0d4ab8242..cd3b4f11752 100644 --- a/internal/service/networkfirewall/helpers.go +++ b/internal/service/networkfirewall/helpers.go @@ -5,10 +5,12 @@ package networkfirewall import ( "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -24,44 +26,49 @@ func encryptionConfigurationSchema() *schema.Schema { Optional: true, }, names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.EncryptionType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.EncryptionType](), }, }, }, } } -func expandEncryptionConfiguration(tfList []interface{}) *networkfirewall.EncryptionConfiguration { - ec := &networkfirewall.EncryptionConfiguration{Type: aws.String(networkfirewall.EncryptionTypeAwsOwnedKmsKey)} +func expandEncryptionConfiguration(tfList []interface{}) *awstypes.EncryptionConfiguration { + apiObject := &awstypes.EncryptionConfiguration{ + Type: awstypes.EncryptionTypeAwsOwnedKmsKey, + } + if len(tfList) == 1 && tfList[0] != nil { tfMap := tfList[0].(map[string]interface{}) + if v, ok := tfMap[names.AttrKeyID].(string); ok { - ec.KeyId = aws.String(v) + apiObject.KeyId = aws.String(v) } if v, ok := tfMap[names.AttrType].(string); ok { - ec.Type = aws.String(v) + apiObject.Type = awstypes.EncryptionType(v) } } - return ec + return apiObject } -func flattenEncryptionConfiguration(apiObject *networkfirewall.EncryptionConfiguration) []interface{} { - if apiObject == nil || apiObject.Type == nil { +func flattenEncryptionConfiguration(apiObject *awstypes.EncryptionConfiguration) []interface{} { + if apiObject == nil || apiObject.Type == "" { return nil } - if aws.StringValue(apiObject.Type) == networkfirewall.EncryptionTypeAwsOwnedKmsKey { + + if apiObject.Type == awstypes.EncryptionTypeAwsOwnedKmsKey { return nil } - m := map[string]interface{}{ - names.AttrKeyID: aws.StringValue(apiObject.KeyId), - names.AttrType: aws.StringValue(apiObject.Type), + tfMap := map[string]interface{}{ + names.AttrKeyID: aws.ToString(apiObject.KeyId), + names.AttrType: apiObject.Type, } - return []interface{}{m} + return []interface{}{tfMap} } func customActionSchema() *schema.Schema { @@ -111,178 +118,212 @@ func customActionSchema() *schema.Schema { } } -func expandCustomActions(l []interface{}) []*networkfirewall.CustomAction { - if len(l) == 0 || l[0] == nil { +func expandCustomActions(tfList []interface{}) []awstypes.CustomAction { + if len(tfList) == 0 || tfList[0] == nil { return nil } - customActions := make([]*networkfirewall.CustomAction, 0, len(l)) - for _, tfMapRaw := range l { - customAction := &networkfirewall.CustomAction{} + apiObjects := make([]awstypes.CustomAction, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } + + apiObject := awstypes.CustomAction{} + if v, ok := tfMap["action_definition"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - customAction.ActionDefinition = expandActionDefinition(v) + apiObject.ActionDefinition = expandActionDefinition(v) } if v, ok := tfMap["action_name"].(string); ok && v != "" { - customAction.ActionName = aws.String(v) + apiObject.ActionName = aws.String(v) } - customActions = append(customActions, customAction) + + apiObjects = append(apiObjects, apiObject) } - return customActions + return apiObjects } -func expandActionDefinition(l []interface{}) *networkfirewall.ActionDefinition { - if l == nil || l[0] == nil { +func expandActionDefinition(tfList []interface{}) *awstypes.ActionDefinition { + if tfList == nil || tfList[0] == nil { return nil } - tfMap, ok := l[0].(map[string]interface{}) + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - customAction := &networkfirewall.ActionDefinition{} + + apiObject := &awstypes.ActionDefinition{} if v, ok := tfMap["publish_metric_action"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - customAction.PublishMetricAction = expandCustomActionPublishMetricAction(v) + apiObject.PublishMetricAction = expandPublishMetricAction(v) } - return customAction + return apiObject } -func expandCustomActionPublishMetricAction(l []interface{}) *networkfirewall.PublishMetricAction { - if len(l) == 0 || l[0] == nil { +func expandPublishMetricAction(tfList []interface{}) *awstypes.PublishMetricAction { + if len(tfList) == 0 || tfList[0] == nil { return nil } - tfMap, ok := l[0].(map[string]interface{}) + + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - action := &networkfirewall.PublishMetricAction{} - if tfSet, ok := tfMap["dimension"].(*schema.Set); ok && tfSet.Len() > 0 { - tfList := tfSet.List() - dimensions := make([]*networkfirewall.Dimension, 0, len(tfList)) + + apiObject := &awstypes.PublishMetricAction{} + + if v, ok := tfMap["dimension"].(*schema.Set); ok && v.Len() > 0 { + tfList := v.List() + dimensions := make([]awstypes.Dimension, 0, len(tfList)) + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - dimension := &networkfirewall.Dimension{ + + dimensions = append(dimensions, awstypes.Dimension{ Value: aws.String(tfMap[names.AttrValue].(string)), - } - dimensions = append(dimensions, dimension) + }) } - action.Dimensions = dimensions + + apiObject.Dimensions = dimensions } - return action + + return apiObject } -func flattenCustomActions(c []*networkfirewall.CustomAction) []interface{} { - if c == nil { +func flattenCustomActions(apiObjects []awstypes.CustomAction) []interface{} { + if apiObjects == nil { return []interface{}{} } - customActions := make([]interface{}, 0, len(c)) - for _, elem := range c { - m := map[string]interface{}{ - "action_definition": flattenActionDefinition(elem.ActionDefinition), - "action_name": aws.StringValue(elem.ActionName), + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + "action_definition": flattenActionDefinition(apiObject.ActionDefinition), + "action_name": aws.ToString(apiObject.ActionName), } - customActions = append(customActions, m) + + tfList = append(tfList, tfMap) } - return customActions + return tfList } -func flattenActionDefinition(v *networkfirewall.ActionDefinition) []interface{} { - if v == nil { +func flattenActionDefinition(apiObject *awstypes.ActionDefinition) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "publish_metric_action": flattenPublishMetricAction(v.PublishMetricAction), + + tfMap := map[string]interface{}{ + "publish_metric_action": flattenPublishMetricAction(apiObject.PublishMetricAction), } - return []interface{}{m} + + return []interface{}{tfMap} } -func flattenPublishMetricAction(m *networkfirewall.PublishMetricAction) []interface{} { - if m == nil { +func flattenPublishMetricAction(apiObject *awstypes.PublishMetricAction) []interface{} { + if apiObject == nil { return []interface{}{} } - metrics := map[string]interface{}{ - "dimension": flattenDimensions(m.Dimensions), + tfMap := map[string]interface{}{ + "dimension": flattenDimensions(apiObject.Dimensions), } - return []interface{}{metrics} + return []interface{}{tfMap} } -func flattenDimensions(d []*networkfirewall.Dimension) []interface{} { - dimensions := make([]interface{}, 0, len(d)) - for _, v := range d { - dimension := map[string]interface{}{ - names.AttrValue: aws.StringValue(v.Value), - } - dimensions = append(dimensions, dimension) +func flattenDimensions(apiObjects []awstypes.Dimension) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfList = append(tfList, map[string]interface{}{ + names.AttrValue: aws.ToString(apiObject.Value), + }) } - return dimensions + return tfList } func forceNewIfNotRuleOrderDefault(key string, d *schema.ResourceDiff) error { if d.Id() != "" && d.HasChange(key) { old, new := d.GetChange(key) - defaultRuleOrderOld := old == nil || old.(string) == "" || old.(string) == networkfirewall.RuleOrderDefaultActionOrder - defaultRuleOrderNew := new == nil || new.(string) == "" || new.(string) == networkfirewall.RuleOrderDefaultActionOrder + defaultRuleOrderOld := old == nil || old.(string) == "" || old.(string) == string(awstypes.RuleOrderDefaultActionOrder) + defaultRuleOrderNew := new == nil || new.(string) == "" || new.(string) == string(awstypes.RuleOrderDefaultActionOrder) if (defaultRuleOrderOld && !defaultRuleOrderNew) || (defaultRuleOrderNew && !defaultRuleOrderOld) { return d.ForceNew(key) } } + return nil } -func customActionSchemaDataSource() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "action_definition": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "publish_metric_action": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "dimension": { - Type: schema.TypeSet, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrValue: { - Type: schema.TypeString, - Computed: true, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - "action_name": { - Type: schema.TypeString, - Computed: true, - }, - }, - }, +func expandIPSets(tfList []interface{}) map[string]awstypes.IPSet { + if len(tfList) == 0 || tfList[0] == nil { + return nil } + + apiObject := make(map[string]awstypes.IPSet) + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + if k, ok := tfMap[names.AttrKey].(string); ok && k != "" { + if tfList, ok := tfMap["ip_set"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { + tfMap, ok := tfList[0].(map[string]interface{}) + if !ok { + continue + } + + if v, ok := tfMap["definition"].(*schema.Set); ok && v.Len() > 0 { + apiObject[k] = awstypes.IPSet{ + Definition: flex.ExpandStringValueSet(v), + } + } + } + } + } + + return apiObject +} + +func flattenIPSets(tfMap map[string]awstypes.IPSet) []interface{} { + if tfMap == nil { + return []interface{}{} + } + + tfList := make([]interface{}, 0, len(tfMap)) + + for k, v := range tfMap { + tfList = append(tfList, map[string]interface{}{ + names.AttrKey: k, + "ip_set": flattenIPSet(&v), + }) + } + + return tfList +} + +func flattenIPSet(apiObject *awstypes.IPSet) []interface{} { + if apiObject == nil { + return []interface{}{} + } + + tfMap := map[string]interface{}{ + "definition": apiObject.Definition, + } + + return []interface{}{tfMap} } diff --git a/internal/service/networkfirewall/logging_configuration.go b/internal/service/networkfirewall/logging_configuration.go index 5844aa053ff..904612e4226 100644 --- a/internal/service/networkfirewall/logging_configuration.go +++ b/internal/service/networkfirewall/logging_configuration.go @@ -9,19 +9,24 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_networkfirewall_logging_configuration") -func ResourceLoggingConfiguration() *schema.Resource { +// @SDKResource("aws_networkfirewall_logging_configuration", name="Logging Configuration") +func resourceLoggingConfiguration() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceLoggingConfigurationCreate, ReadWithoutTimeout: resourceLoggingConfigurationRead, @@ -34,9 +39,10 @@ func ResourceLoggingConfiguration() *schema.Resource { Schema: map[string]*schema.Schema{ "firewall_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, }, names.AttrLoggingConfiguration: { Type: schema.TypeList, @@ -58,14 +64,14 @@ func ResourceLoggingConfiguration() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, "log_destination_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.LogDestinationType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogDestinationType](), }, "log_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.LogType_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.LogType](), }, }, }, @@ -79,48 +85,36 @@ func ResourceLoggingConfiguration() *schema.Resource { func resourceLoggingConfigurationCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - firewallArn := d.Get("firewall_arn").(string) - - log.Printf("[DEBUG] Adding Logging Configuration to NetworkFirewall Firewall: %s", firewallArn) - - loggingConfigs := expandLoggingConfiguration(d.Get(names.AttrLoggingConfiguration).([]interface{})) - // cumulatively add the configured "log_destination_config" in "logging_configuration" - err := putLoggingConfiguration(ctx, conn, firewallArn, loggingConfigs) - if err != nil { + firewallARN := d.Get("firewall_arn").(string) + loggingConfigs := expandLoggingConfigurations(d.Get(names.AttrLoggingConfiguration).([]interface{})) + if err := addLoggingConfigurations(ctx, conn, firewallARN, loggingConfigs); err != nil { return sdkdiag.AppendFromErr(diags, err) } - d.SetId(firewallArn) + d.SetId(firewallARN) return append(diags, resourceLoggingConfigurationRead(ctx, d, meta)...) } func resourceLoggingConfigurationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + output, err := findLoggingConfigurationByARN(ctx, conn, d.Id()) - log.Printf("[DEBUG] Reading Logging Configuration for NetworkFirewall Firewall: %s", d.Id()) - - output, err := FindLoggingConfiguration(ctx, conn, d.Id()) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { - log.Printf("[WARN] Logging Configuration for NetworkFirewall Firewall (%s) not found, removing from state", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] NetworkFirewall Logging Configuration (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Logging Configuration for NetworkFirewall Firewall: %s: %s", d.Id(), err) - } - - if output == nil { - return sdkdiag.AppendErrorf(diags, "reading Logging Configuration for NetworkFirewall Firewall: %s: empty output", d.Id()) + return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Logging Configuration (%s): %s", d.Id(), err) } d.Set("firewall_arn", output.FirewallArn) - if err := d.Set(names.AttrLoggingConfiguration, flattenLoggingConfiguration(output.LoggingConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting logging_configuration: %s", err) } @@ -130,28 +124,23 @@ func resourceLoggingConfigurationRead(ctx context.Context, d *schema.ResourceDat func resourceLoggingConfigurationUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - - log.Printf("[DEBUG] Updating Logging Configuration for NetworkFirewall Firewall: %s", d.Id()) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) o, n := d.GetChange(names.AttrLoggingConfiguration) - // Remove destination configs one by one, if any + + // Remove destination configs one by one, if any. if oldConfig := o.([]interface{}); len(oldConfig) != 0 && oldConfig[0] != nil { - loggingConfig := expandLoggingConfigurationOnUpdate(oldConfig) - if loggingConfig != nil { - err := removeLoggingConfiguration(ctx, conn, d.Id(), loggingConfig) - if err != nil { + if loggingConfig := expandLoggingConfigurationOnUpdate(oldConfig); loggingConfig != nil { + if err := removeLoggingConfiguration(ctx, conn, d.Id(), loggingConfig); err != nil { return sdkdiag.AppendFromErr(diags, err) } } } - // Only send new LoggingConfiguration with content + + // Only send new LoggingConfiguration with content. if newConfig := n.([]interface{}); len(newConfig) != 0 && newConfig[0] != nil { - loggingConfigs := expandLoggingConfiguration(d.Get(names.AttrLoggingConfiguration).([]interface{})) - // cumulatively add the configured "log_destination_config" in "logging_configuration" - err := putLoggingConfiguration(ctx, conn, d.Id(), loggingConfigs) - if err != nil { + loggingConfigs := expandLoggingConfigurations(d.Get(names.AttrLoggingConfiguration).([]interface{})) + if err := addLoggingConfigurations(ctx, conn, d.Id(), loggingConfigs); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -161,21 +150,21 @@ func resourceLoggingConfigurationUpdate(ctx context.Context, d *schema.ResourceD func resourceLoggingConfigurationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + output, err := findLoggingConfigurationByARN(ctx, conn, d.Id()) - log.Printf("[DEBUG] Deleting Logging Configuration for NetworkFirewall Firewall: %s", d.Id()) - output, err := FindLoggingConfiguration(ctx, conn, d.Id()) - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + if tfresource.NotFound(err) { return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Logging Configuration for NetworkFirewall Firewall: %s: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Logging Configuration (%s): %s", d.Id(), err) } if output != nil && output.LoggingConfiguration != nil { - err := removeLoggingConfiguration(ctx, conn, aws.StringValue(output.FirewallArn), output.LoggingConfiguration) + log.Printf("[DEBUG] Deleting NetworkFirewall Logging Configuration: %s", d.Id()) + err := removeLoggingConfiguration(ctx, conn, d.Id(), output.LoggingConfiguration) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -184,166 +173,204 @@ func resourceLoggingConfigurationDelete(ctx context.Context, d *schema.ResourceD return diags } -func putLoggingConfiguration(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string, l []*networkfirewall.LoggingConfiguration) error { +func addLoggingConfigurations(ctx context.Context, conn *networkfirewall.Client, arn string, loggingConfigs []*awstypes.LoggingConfiguration) error { var errs []error - for _, config := range l { + for _, loggingConfig := range loggingConfigs { input := &networkfirewall.UpdateLoggingConfigurationInput{ FirewallArn: aws.String(arn), - LoggingConfiguration: config, + LoggingConfiguration: loggingConfig, } - _, err := conn.UpdateLoggingConfigurationWithContext(ctx, input) + _, err := conn.UpdateLoggingConfiguration(ctx, input) if err != nil { - errs = append(errs, fmt.Errorf("adding Logging Configuration to NetworkFirewall Firewall (%s): %w", arn, err)) + errs = append(errs, fmt.Errorf("adding NetworkFirewall Logging Configuration (%s): %w", arn, err)) } } return errors.Join(errs...) } -func removeLoggingConfiguration(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string, l *networkfirewall.LoggingConfiguration) error { - if l == nil { +func removeLoggingConfiguration(ctx context.Context, conn *networkfirewall.Client, arn string, loggingConfig *awstypes.LoggingConfiguration) error { + if loggingConfig == nil { return nil } var errs []error - // Must delete destination configs one at a time - for i, config := range l.LogDestinationConfigs { + // Must delete destination configs one at a time. + for i, logDestinationConfig := range loggingConfig.LogDestinationConfigs { input := &networkfirewall.UpdateLoggingConfigurationInput{ FirewallArn: aws.String(arn), } - if i == 0 && len(l.LogDestinationConfigs) == 2 { - loggingConfig := &networkfirewall.LoggingConfiguration{ - LogDestinationConfigs: []*networkfirewall.LogDestinationConfig{config}, + if i == 0 && len(loggingConfig.LogDestinationConfigs) == 2 { + loggingConfig := &awstypes.LoggingConfiguration{ + LogDestinationConfigs: []awstypes.LogDestinationConfig{logDestinationConfig}, } input.LoggingConfiguration = loggingConfig } - _, err := conn.UpdateLoggingConfigurationWithContext(ctx, input) + _, err := conn.UpdateLoggingConfiguration(ctx, input) if err != nil { - errs = append(errs, fmt.Errorf("removing Logging Configuration LogDestinationConfig (%v) from NetworkFirewall Firewall: %s: %w", config, arn, err)) + errs = append(errs, fmt.Errorf("removing NetworkFirewall Logging Configuration (%s): %w", arn, err)) } } return errors.Join(errs...) } -func expandLoggingConfiguration(l []interface{}) []*networkfirewall.LoggingConfiguration { - if len(l) == 0 || l[0] == nil { +func findLoggingConfigurationByARN(ctx context.Context, conn *networkfirewall.Client, arn string) (*networkfirewall.DescribeLoggingConfigurationOutput, error) { + input := &networkfirewall.DescribeLoggingConfigurationInput{ + FirewallArn: aws.String(arn), + } + + output, err := conn.DescribeLoggingConfiguration(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.LoggingConfiguration == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func expandLoggingConfigurations(tfList []interface{}) []*awstypes.LoggingConfiguration { + if len(tfList) == 0 || tfList[0] == nil { return nil } - tfMap, ok := l[0].(map[string]interface{}) + + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - loggingConfigs := make([]*networkfirewall.LoggingConfiguration, 0) - if tfSet, ok := tfMap["log_destination_config"].(*schema.Set); ok && tfSet.Len() > 0 { - tfList := tfSet.List() - for _, tfMapRaw := range tfList { + apiObjects := make([]*awstypes.LoggingConfiguration, 0) + + if v, ok := tfMap["log_destination_config"].(*schema.Set); ok && v.Len() > 0 { + for _, tfMapRaw := range v.List() { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - config := &networkfirewall.LogDestinationConfig{} + + logDestinationConfig := awstypes.LogDestinationConfig{} + if v, ok := tfMap["log_destination"].(map[string]interface{}); ok && len(v) > 0 { - config.LogDestination = aws.StringMap(expandLogDestinationConfigLogDestination(v)) + logDestinationConfig.LogDestination = flex.ExpandStringValueMap(v) } if v, ok := tfMap["log_destination_type"].(string); ok && v != "" { - config.LogDestinationType = aws.String(v) + logDestinationConfig.LogDestinationType = awstypes.LogDestinationType(v) } if v, ok := tfMap["log_type"].(string); ok && v != "" { - config.LogType = aws.String(v) + logDestinationConfig.LogType = awstypes.LogType(v) } - // exclude empty LogDestinationConfig due to TypeMap in TypeSet behavior - // Related: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 - if config.LogDestination == nil && config.LogDestinationType == nil && config.LogType == nil { + + // Exclude empty LogDestinationConfig due to TypeMap in TypeSet behavior. + // Related: https://github.com/hashicorp/terraform-plugin-sdk/issues/588. + if logDestinationConfig.LogDestination == nil && logDestinationConfig.LogDestinationType == "" && logDestinationConfig.LogType == "" { continue } - loggingConfig := &networkfirewall.LoggingConfiguration{} - // include all (max 2) "log_destination_config" i.e. prepend the already-expanded loggingConfig - if len(loggingConfigs) == 1 && len(loggingConfigs[0].LogDestinationConfigs) == 1 { - loggingConfig.LogDestinationConfigs = append(loggingConfig.LogDestinationConfigs, loggingConfigs[0].LogDestinationConfigs[0]) + + apiObject := &awstypes.LoggingConfiguration{} + // Include all (max 2) "log_destination_config" i.e. prepend the already-expanded loggingConfig. + if len(apiObjects) == 1 && len(apiObjects[0].LogDestinationConfigs) == 1 { + apiObject.LogDestinationConfigs = append(apiObject.LogDestinationConfigs, apiObjects[0].LogDestinationConfigs[0]) } - loggingConfig.LogDestinationConfigs = append(loggingConfig.LogDestinationConfigs, config) - loggingConfigs = append(loggingConfigs, loggingConfig) + apiObject.LogDestinationConfigs = append(apiObject.LogDestinationConfigs, logDestinationConfig) + + apiObjects = append(apiObjects, apiObject) } } - return loggingConfigs + + return apiObjects } -func expandLoggingConfigurationOnUpdate(l []interface{}) *networkfirewall.LoggingConfiguration { - if len(l) == 0 || l[0] == nil { +func expandLoggingConfigurationOnUpdate(tfList []interface{}) *awstypes.LoggingConfiguration { + if len(tfList) == 0 || tfList[0] == nil { return nil } - tfMap, ok := l[0].(map[string]interface{}) + + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - loggingConfig := &networkfirewall.LoggingConfiguration{} - if tfSet, ok := tfMap["log_destination_config"].(*schema.Set); ok && tfSet.Len() > 0 { - tfList := tfSet.List() - destConfigs := make([]*networkfirewall.LogDestinationConfig, 0, len(tfList)) + apiObject := &awstypes.LoggingConfiguration{} + + if v, ok := tfMap["log_destination_config"].(*schema.Set); ok && v.Len() > 0 { + tfList := v.List() + logDestinationConfigs := make([]awstypes.LogDestinationConfig, 0, len(tfList)) + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - config := &networkfirewall.LogDestinationConfig{} + + logDestinationConfig := awstypes.LogDestinationConfig{} + if v, ok := tfMap["log_destination"].(map[string]interface{}); ok && len(v) > 0 { - config.LogDestination = aws.StringMap(expandLogDestinationConfigLogDestination(v)) + logDestinationConfig.LogDestination = flex.ExpandStringValueMap(v) } if v, ok := tfMap["log_destination_type"].(string); ok && v != "" { - config.LogDestinationType = aws.String(v) + logDestinationConfig.LogDestinationType = awstypes.LogDestinationType(v) } if v, ok := tfMap["log_type"].(string); ok && v != "" { - config.LogType = aws.String(v) + logDestinationConfig.LogType = awstypes.LogType(v) } - // exclude empty LogDestinationConfig due to TypeMap in TypeSet behavior - // Related: https://github.com/hashicorp/terraform-plugin-sdk/issues/588 - if config.LogDestination == nil && config.LogDestinationType == nil && config.LogType == nil { + + // Exclude empty LogDestinationConfig due to TypeMap in TypeSet behavior. + // Related: https://github.com/hashicorp/terraform-plugin-sdk/issues/588. + if logDestinationConfig.LogDestination == nil && logDestinationConfig.LogDestinationType == "" && logDestinationConfig.LogType == "" { continue } - destConfigs = append(destConfigs, config) + + logDestinationConfigs = append(logDestinationConfigs, logDestinationConfig) } - loggingConfig.LogDestinationConfigs = destConfigs - } - return loggingConfig -} -func expandLogDestinationConfigLogDestination(dst map[string]interface{}) map[string]string { - m := map[string]string{} - for k, v := range dst { - m[k] = v.(string) + apiObject.LogDestinationConfigs = logDestinationConfigs } - return m + + return apiObject } -func flattenLoggingConfiguration(lc *networkfirewall.LoggingConfiguration) []interface{} { - if lc == nil || lc.LogDestinationConfigs == nil { +func flattenLoggingConfiguration(apiObject *awstypes.LoggingConfiguration) []interface{} { + if apiObject == nil || apiObject.LogDestinationConfigs == nil { return []interface{}{} } - m := map[string]interface{}{ - "log_destination_config": flattenLoggingConfigurationLogDestinationConfigs(lc.LogDestinationConfigs), + + tfMap := map[string]interface{}{ + "log_destination_config": flattenLoggingConfigurationLogDestinationConfigs(apiObject.LogDestinationConfigs), } - return []interface{}{m} + + return []interface{}{tfMap} } -func flattenLoggingConfigurationLogDestinationConfigs(configs []*networkfirewall.LogDestinationConfig) []interface{} { - l := make([]interface{}, 0, len(configs)) - for _, config := range configs { - m := map[string]interface{}{ - "log_destination": aws.StringValueMap(config.LogDestination), - "log_destination_type": aws.StringValue(config.LogDestinationType), - "log_type": aws.StringValue(config.LogType), +func flattenLoggingConfigurationLogDestinationConfigs(apiObjects []awstypes.LogDestinationConfig) []interface{} { + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + "log_destination": apiObject.LogDestination, + "log_destination_type": apiObject.LogDestinationType, + "log_type": apiObject.LogType, } - l = append(l, m) + + tfList = append(tfList, tfMap) } - return l + + return tfList } diff --git a/internal/service/networkfirewall/logging_configuration_test.go b/internal/service/networkfirewall/logging_configuration_test.go index f16bbe82199..bd804e4838c 100644 --- a/internal/service/networkfirewall/logging_configuration_test.go +++ b/internal/service/networkfirewall/logging_configuration_test.go @@ -8,14 +8,14 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfnetworkfirewall "github.com/hashicorp/terraform-provider-aws/internal/service/networkfirewall" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -33,26 +33,26 @@ func TestAccNetworkFirewallLoggingConfiguration_CloudWatchLogDestination_logGrou CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, networkfirewall.LogDestinationTypeCloudWatchLogs, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.logGroup": logGroupName, - "log_destination_type": networkfirewall.LogDestinationTypeCloudWatchLogs, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), }), ), }, { - Config: testAccLoggingConfigurationConfig_cloudWatch(updatedLogGroupName, rName, networkfirewall.LogDestinationTypeCloudWatchLogs, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_cloudWatch(updatedLogGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.logGroup": updatedLogGroupName, - "log_destination_type": networkfirewall.LogDestinationTypeCloudWatchLogs, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), }), ), }, @@ -78,22 +78,22 @@ func TestAccNetworkFirewallLoggingConfiguration_CloudWatchLogDestination_logType CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, networkfirewall.LogDestinationTypeCloudWatchLogs, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ - "log_type": networkfirewall.LogTypeFlow, + "log_type": string(awstypes.LogTypeFlow), }), ), }, { - Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, networkfirewall.LogDestinationTypeCloudWatchLogs, networkfirewall.LogTypeAlert), + Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeAlert)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ - "log_type": networkfirewall.LogTypeAlert, + "log_type": string(awstypes.LogTypeAlert), }), ), }, @@ -120,26 +120,26 @@ func TestAccNetworkFirewallLoggingConfiguration_KinesisLogDestination_deliverySt CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_kinesis(streamName, rName, networkfirewall.LogDestinationTypeKinesisDataFirehose, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_kinesis(streamName, rName, string(awstypes.LogDestinationTypeKinesisDataFirehose), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.deliveryStream": streamName, - "log_destination_type": networkfirewall.LogDestinationTypeKinesisDataFirehose, + "log_destination_type": string(awstypes.LogDestinationTypeKinesisDataFirehose), }), ), }, { - Config: testAccLoggingConfigurationConfig_kinesis(updatedStreamName, rName, networkfirewall.LogDestinationTypeKinesisDataFirehose, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_kinesis(updatedStreamName, rName, string(awstypes.LogDestinationTypeKinesisDataFirehose), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.deliveryStream": updatedStreamName, - "log_destination_type": networkfirewall.LogDestinationTypeKinesisDataFirehose, + "log_destination_type": string(awstypes.LogDestinationTypeKinesisDataFirehose), }), ), }, @@ -165,22 +165,22 @@ func TestAccNetworkFirewallLoggingConfiguration_KinesisLogDestination_logType(t CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_kinesis(streamName, rName, networkfirewall.LogDestinationTypeKinesisDataFirehose, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_kinesis(streamName, rName, string(awstypes.LogDestinationTypeKinesisDataFirehose), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ - "log_type": networkfirewall.LogTypeFlow, + "log_type": string(awstypes.LogTypeFlow), }), ), }, { - Config: testAccLoggingConfigurationConfig_kinesis(streamName, rName, networkfirewall.LogDestinationTypeKinesisDataFirehose, networkfirewall.LogTypeAlert), + Config: testAccLoggingConfigurationConfig_kinesis(streamName, rName, string(awstypes.LogDestinationTypeKinesisDataFirehose), string(awstypes.LogTypeAlert)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ - "log_type": networkfirewall.LogTypeAlert, + "log_type": string(awstypes.LogTypeAlert), }), ), }, @@ -208,7 +208,7 @@ func TestAccNetworkFirewallLoggingConfiguration_S3LogDestination_bucketName(t *t Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -219,7 +219,7 @@ func TestAccNetworkFirewallLoggingConfiguration_S3LogDestination_bucketName(t *t ), }, { - Config: testAccLoggingConfigurationConfig_s3(updatedBucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3(updatedBucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -252,22 +252,22 @@ func TestAccNetworkFirewallLoggingConfiguration_S3LogDestination_logType(t *test Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ - "log_type": networkfirewall.LogTypeFlow, + "log_type": string(awstypes.LogTypeFlow), }), ), }, { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeAlert), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeAlert)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ - "log_type": networkfirewall.LogTypeAlert, + "log_type": string(awstypes.LogTypeAlert), }), ), }, @@ -294,7 +294,7 @@ func TestAccNetworkFirewallLoggingConfiguration_S3LogDestination_prefix(t *testi Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -305,7 +305,7 @@ func TestAccNetworkFirewallLoggingConfiguration_S3LogDestination_prefix(t *testi ), }, { - Config: testAccLoggingConfigurationConfig_s3UpdatePrefix(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3UpdatePrefix(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -339,7 +339,7 @@ func TestAccNetworkFirewallLoggingConfiguration_updateFirewallARN(t *testing.T) CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "firewall_arn", firewallResourceName, names.AttrARN), @@ -347,7 +347,7 @@ func TestAccNetworkFirewallLoggingConfiguration_updateFirewallARN(t *testing.T) }, { // ForceNew Firewall i.e. LoggingConfiguration Resource - Config: testAccLoggingConfigurationConfig_s3UpdateFirewallARN(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3UpdateFirewallARN(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttrPair(resourceName, "firewall_arn", firewallResourceName, names.AttrARN), @@ -377,39 +377,39 @@ func TestAccNetworkFirewallLoggingConfiguration_updateLogDestinationType(t *test CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, networkfirewall.LogDestinationTypeCloudWatchLogs, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.logGroup": logGroupName, - "log_destination_type": networkfirewall.LogDestinationTypeCloudWatchLogs, - "log_type": networkfirewall.LogTypeFlow, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), + "log_type": string(awstypes.LogTypeFlow), }), ), }, { - Config: testAccLoggingConfigurationConfig_kinesis(streamName, rName, networkfirewall.LogDestinationTypeKinesisDataFirehose, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_kinesis(streamName, rName, string(awstypes.LogDestinationTypeKinesisDataFirehose), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.deliveryStream": streamName, - "log_destination_type": networkfirewall.LogDestinationTypeKinesisDataFirehose, - "log_type": networkfirewall.LogTypeFlow, + "log_destination_type": string(awstypes.LogDestinationTypeKinesisDataFirehose), + "log_type": string(awstypes.LogTypeFlow), }), ), }, { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ - "log_destination_type": networkfirewall.LogDestinationTypeS3, - "log_type": networkfirewall.LogTypeFlow, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeFlow), }), ), }, @@ -436,13 +436,13 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToMultipleLogDestinations( CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeAlert), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeAlert)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), ), }, { - Config: testAccLoggingConfigurationConfig_s3AndKinesis(bucketName, streamName, rName, networkfirewall.LogTypeAlert, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3AndKinesis(bucketName, streamName, rName, string(awstypes.LogTypeAlert), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -450,19 +450,19 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToMultipleLogDestinations( resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.deliveryStream": streamName, - "log_destination_type": networkfirewall.LogDestinationTypeKinesisDataFirehose, - "log_type": networkfirewall.LogTypeFlow, + "log_destination_type": string(awstypes.LogDestinationTypeKinesisDataFirehose), + "log_type": string(awstypes.LogTypeFlow), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.bucketName": bucketName, - "log_destination_type": networkfirewall.LogDestinationTypeS3, - "log_type": networkfirewall.LogTypeAlert, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeAlert), }), ), }, { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeAlert), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeAlert)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -492,7 +492,7 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToSingleAlertTypeLogDestin CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_s3AndCloudWatch(bucketName, logGroupName, rName, networkfirewall.LogTypeAlert, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3AndCloudWatch(bucketName, logGroupName, rName, string(awstypes.LogTypeAlert), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -500,19 +500,19 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToSingleAlertTypeLogDestin resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.logGroup": logGroupName, - "log_destination_type": networkfirewall.LogDestinationTypeCloudWatchLogs, - "log_type": networkfirewall.LogTypeFlow, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), + "log_type": string(awstypes.LogTypeFlow), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.bucketName": bucketName, - "log_destination_type": networkfirewall.LogDestinationTypeS3, - "log_type": networkfirewall.LogTypeAlert, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeAlert), }), ), }, { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeAlert), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeAlert)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -520,8 +520,8 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToSingleAlertTypeLogDestin resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.bucketName": bucketName, - "log_destination_type": networkfirewall.LogDestinationTypeS3, - "log_type": networkfirewall.LogTypeAlert, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeAlert), }), ), }, @@ -548,7 +548,7 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToSingleFlowTypeLogDestina CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_s3AndCloudWatch(bucketName, logGroupName, rName, networkfirewall.LogTypeAlert, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3AndCloudWatch(bucketName, logGroupName, rName, string(awstypes.LogTypeAlert), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -556,19 +556,19 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToSingleFlowTypeLogDestina resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.logGroup": logGroupName, - "log_destination_type": networkfirewall.LogDestinationTypeCloudWatchLogs, - "log_type": networkfirewall.LogTypeFlow, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), + "log_type": string(awstypes.LogTypeFlow), }), resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.bucketName": bucketName, - "log_destination_type": networkfirewall.LogDestinationTypeS3, - "log_type": networkfirewall.LogTypeAlert, + "log_destination_type": string(awstypes.LogDestinationTypeS3), + "log_type": string(awstypes.LogTypeAlert), }), ), }, { - Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, networkfirewall.LogDestinationTypeCloudWatchLogs, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, string(awstypes.LogDestinationTypeCloudwatchLogs), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), @@ -576,8 +576,8 @@ func TestAccNetworkFirewallLoggingConfiguration_updateToSingleFlowTypeLogDestina resource.TestCheckTypeSetElemNestedAttrs(resourceName, "logging_configuration.0.log_destination_config.*", map[string]string{ "log_destination.%": acctest.Ct1, "log_destination.logGroup": logGroupName, - "log_destination_type": networkfirewall.LogDestinationTypeCloudWatchLogs, - "log_type": networkfirewall.LogTypeFlow, + "log_destination_type": string(awstypes.LogDestinationTypeCloudwatchLogs), + "log_type": string(awstypes.LogTypeFlow), }), ), }, @@ -603,7 +603,7 @@ func TestAccNetworkFirewallLoggingConfiguration_disappears(t *testing.T) { CheckDestroy: testAccCheckLoggingConfigurationDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, networkfirewall.LogDestinationTypeS3, networkfirewall.LogTypeFlow), + Config: testAccLoggingConfigurationConfig_s3(bucketName, rName, string(awstypes.LogDestinationTypeS3), string(awstypes.LogTypeFlow)), Check: resource.ComposeTestCheckFunc( testAccCheckLoggingConfigurationExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfnetworkfirewall.ResourceLoggingConfiguration(), resourceName), @@ -621,17 +621,19 @@ func testAccCheckLoggingConfigurationDestroy(ctx context.Context) resource.TestC continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) - output, err := tfnetworkfirewall.FindLoggingConfiguration(ctx, conn, rs.Primary.ID) - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) + + _, err := tfnetworkfirewall.FindLoggingConfigurationByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { continue } + if err != nil { return err } - if output != nil { - return fmt.Errorf("NetworkFirewall Logging Configuration for firewall (%s) still exists", rs.Primary.ID) - } + + return fmt.Errorf("NetworkFirewall Logging Configuration %s still exists", rs.Primary.ID) } return nil @@ -645,54 +647,19 @@ func testAccCheckLoggingConfigurationExists(ctx context.Context, n string) resou return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No NetworkFirewall Logging Configuration ID is set") - } + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) - output, err := tfnetworkfirewall.FindLoggingConfiguration(ctx, conn, rs.Primary.ID) - if err != nil { - return err - } - if output == nil || output.LoggingConfiguration == nil { - return fmt.Errorf("NetworkFirewall Logging Configuration for firewall (%s) not found", rs.Primary.ID) - } + _, err := tfnetworkfirewall.FindLoggingConfigurationByARN(ctx, conn, rs.Primary.ID) - return nil + return err } } -func testAccLoggingConfigurationBaseConfig(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -resource "aws_vpc" "test" { - cidr_block = "192.168.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - +func testAccLoggingConfigurationConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` resource "aws_networkfirewall_firewall_policy" "test" { name = %[1]q + firewall_policy { stateless_fragment_default_actions = ["aws:drop"] stateless_default_actions = ["aws:pass"] @@ -705,43 +672,17 @@ resource "aws_networkfirewall_firewall" "test" { vpc_id = aws_vpc.test.id subnet_mapping { - subnet_id = aws_subnet.test.id - } -} -`, rName) -} - -func testAccLoggingConfigurationBaseConfig_updateFirewall(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -resource "aws_vpc" "test" { - cidr_block = "192.168.0.0/16" - - tags = { - Name = %[1]q + subnet_id = aws_subnet.test[0].id } } - -resource "aws_subnet" "test" { - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = cidrsubnet(aws_vpc.test.cidr_block, 8, 0) - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } +`, rName)) } +func testAccLoggingConfigurationConfig_baseFirewallUpdated(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` resource "aws_networkfirewall_firewall_policy" "test" { name = %[1]q + firewall_policy { stateless_fragment_default_actions = ["aws:drop"] stateless_default_actions = ["aws:pass"] @@ -754,16 +695,16 @@ resource "aws_networkfirewall_firewall" "test" { vpc_id = aws_vpc.test.id subnet_mapping { - subnet_id = aws_subnet.test.id + subnet_id = aws_subnet.test[0].id } } -`, rName) +`, rName)) } -func testAccLoggingConfigurationS3BucketDependencyConfig(rName string) string { +func testAccLoggingConfigurationConfig_baseS3Bucket(rName string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "test" { - bucket = %q + bucket = %[1]q force_destroy = true lifecycle { @@ -773,10 +714,10 @@ resource "aws_s3_bucket" "test" { `, rName) } -func testAccLoggingConfigurationCloudWatchDependencyConfig(rName string) string { +func testAccLoggingConfigurationConfig_baseCloudWatch(rName string) string { return fmt.Sprintf(` resource "aws_cloudwatch_log_group" "test" { - name = %q + name = %[1]q lifecycle { create_before_destroy = true @@ -785,7 +726,7 @@ resource "aws_cloudwatch_log_group" "test" { `, rName) } -func testAccLoggingConfiguration_kinesisDependenciesConfig(rName, streamName string) string { +func testAccLoggingConfigurationConfig_baseFirehose(rName, streamName string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} @@ -887,8 +828,8 @@ resource "aws_kinesis_firehose_delivery_stream" "test" { func testAccLoggingConfigurationConfig_s3(bucketName, rName, destinationType, logType string) string { return acctest.ConfigCompose( - testAccLoggingConfigurationBaseConfig(rName), - testAccLoggingConfigurationS3BucketDependencyConfig(bucketName), + testAccLoggingConfigurationConfig_base(rName), + testAccLoggingConfigurationConfig_baseS3Bucket(bucketName), fmt.Sprintf(` resource "aws_networkfirewall_logging_configuration" "test" { firewall_arn = aws_networkfirewall_firewall.test.arn @@ -908,8 +849,8 @@ resource "aws_networkfirewall_logging_configuration" "test" { func testAccLoggingConfigurationConfig_s3UpdateFirewallARN(bucketName, rName, destinationType, logType string) string { return acctest.ConfigCompose( - testAccLoggingConfigurationBaseConfig_updateFirewall(rName), - testAccLoggingConfigurationS3BucketDependencyConfig(bucketName), + testAccLoggingConfigurationConfig_baseFirewallUpdated(rName), + testAccLoggingConfigurationConfig_baseS3Bucket(bucketName), fmt.Sprintf(` resource "aws_networkfirewall_logging_configuration" "test" { firewall_arn = aws_networkfirewall_firewall.test.arn @@ -929,8 +870,8 @@ resource "aws_networkfirewall_logging_configuration" "test" { func testAccLoggingConfigurationConfig_s3UpdatePrefix(bucketName, rName, destinationType, logType string) string { return acctest.ConfigCompose( - testAccLoggingConfigurationBaseConfig(rName), - testAccLoggingConfigurationS3BucketDependencyConfig(bucketName), + testAccLoggingConfigurationConfig_base(rName), + testAccLoggingConfigurationConfig_baseS3Bucket(bucketName), fmt.Sprintf(` resource "aws_networkfirewall_logging_configuration" "test" { firewall_arn = aws_networkfirewall_firewall.test.arn @@ -951,8 +892,8 @@ resource "aws_networkfirewall_logging_configuration" "test" { func testAccLoggingConfigurationConfig_kinesis(streamName, rName, destinationType, logType string) string { return acctest.ConfigCompose( - testAccLoggingConfigurationBaseConfig(rName), - testAccLoggingConfiguration_kinesisDependenciesConfig(rName, streamName), + testAccLoggingConfigurationConfig_base(rName), + testAccLoggingConfigurationConfig_baseFirehose(rName, streamName), fmt.Sprintf(` resource "aws_networkfirewall_logging_configuration" "test" { firewall_arn = aws_networkfirewall_firewall.test.arn @@ -972,8 +913,8 @@ resource "aws_networkfirewall_logging_configuration" "test" { func testAccLoggingConfigurationConfig_cloudWatch(logGroupName, rName, destinationType, logType string) string { return acctest.ConfigCompose( - testAccLoggingConfigurationBaseConfig(rName), - testAccLoggingConfigurationCloudWatchDependencyConfig(logGroupName), + testAccLoggingConfigurationConfig_base(rName), + testAccLoggingConfigurationConfig_baseCloudWatch(logGroupName), fmt.Sprintf(` resource "aws_networkfirewall_logging_configuration" "test" { firewall_arn = aws_networkfirewall_firewall.test.arn @@ -993,9 +934,9 @@ resource "aws_networkfirewall_logging_configuration" "test" { func testAccLoggingConfigurationConfig_s3AndKinesis(bucketName, streamName, rName, logTypeS3, logTypeKinesis string) string { return acctest.ConfigCompose( - testAccLoggingConfigurationS3BucketDependencyConfig(bucketName), - testAccLoggingConfiguration_kinesisDependenciesConfig(rName, streamName), - testAccLoggingConfigurationBaseConfig(rName), + testAccLoggingConfigurationConfig_baseS3Bucket(bucketName), + testAccLoggingConfigurationConfig_baseFirehose(rName, streamName), + testAccLoggingConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_networkfirewall_logging_configuration" "test" { firewall_arn = aws_networkfirewall_firewall.test.arn @@ -1023,9 +964,9 @@ resource "aws_networkfirewall_logging_configuration" "test" { func testAccLoggingConfigurationConfig_s3AndCloudWatch(bucketName, logGroupName, rName, logTypeS3, logTypeCloudWatch string) string { return acctest.ConfigCompose( - testAccLoggingConfigurationBaseConfig(rName), - testAccLoggingConfigurationS3BucketDependencyConfig(bucketName), - testAccLoggingConfigurationCloudWatchDependencyConfig(logGroupName), + testAccLoggingConfigurationConfig_base(rName), + testAccLoggingConfigurationConfig_baseS3Bucket(bucketName), + testAccLoggingConfigurationConfig_baseCloudWatch(logGroupName), fmt.Sprintf(` resource "aws_networkfirewall_logging_configuration" "test" { firewall_arn = aws_networkfirewall_firewall.test.arn diff --git a/internal/service/networkfirewall/resource_policy.go b/internal/service/networkfirewall/resource_policy.go index 69799e4bf99..b9d8f99fcda 100644 --- a/internal/service/networkfirewall/resource_policy.go +++ b/internal/service/networkfirewall/resource_policy.go @@ -8,22 +8,24 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_networkfirewall_resource_policy") -func ResourceResourcePolicy() *schema.Resource { +// @SDKResource("aws_networkfirewall_resource_policy", name="Resource Policy") +func resourceResourcePolicy() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceResourcePolicyPut, ReadWithoutTimeout: resourceResourcePolicyRead, @@ -36,10 +38,11 @@ func ResourceResourcePolicy() *schema.Resource { Schema: map[string]*schema.Schema{ names.AttrPolicy: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentPolicyDiffs, + DiffSuppressOnRefresh: true, StateFunc: func(v interface{}) string { json, _ := structure.NormalizeJsonString(v) return json @@ -57,84 +60,74 @@ func ResourceResourcePolicy() *schema.Resource { func resourceResourcePolicyPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - resourceArn := d.Get(names.AttrResourceARN).(string) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) policy, err := structure.NormalizeJsonString(d.Get(names.AttrPolicy).(string)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "policy (%s) is invalid JSON: %s", policy, err) + return sdkdiag.AppendFromErr(diags, err) } + resourceARN := d.Get(names.AttrResourceARN).(string) input := &networkfirewall.PutResourcePolicyInput{ - ResourceArn: aws.String(resourceArn), Policy: aws.String(policy), + ResourceArn: aws.String(resourceARN), } - log.Printf("[DEBUG] Putting NetworkFirewall Resource Policy for resource: %s", resourceArn) + _, err = conn.PutResourcePolicy(ctx, input) - _, err = conn.PutResourcePolicyWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "putting NetworkFirewall Resource Policy (for resource: %s): %s", resourceArn, err) + return sdkdiag.AppendErrorf(diags, "putting NetworkFirewall Resource Policy (%s): %s", resourceARN, err) } - d.SetId(resourceArn) + if d.IsNewResource() { + d.SetId(resourceARN) + } return append(diags, resourceResourcePolicyRead(ctx, d, meta)...) } func resourceResourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - resourceArn := d.Id() + policy, err := findResourcePolicyByARN(ctx, conn, d.Id()) - log.Printf("[DEBUG] Reading NetworkFirewall Resource Policy for resource: %s", resourceArn) - - policy, err := FindResourcePolicy(ctx, conn, resourceArn) - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { - log.Printf("[WARN] NetworkFirewall Resource Policy (for resource: %s) not found, removing from state", resourceArn) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] NetworkFirewall Resource Policy (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Resource Policy (for resource: %s): %s", resourceArn, err) - } - if policy == nil { - return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Resource Policy (for resource: %s): empty output", resourceArn) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Resource Policy (%s): %s", d.Id(), err) } - d.Set(names.AttrResourceARN, resourceArn) - - policyToSet, err := verify.PolicyToSet(d.Get(names.AttrPolicy).(string), aws.StringValue(policy)) - + policyToSet, err := verify.PolicyToSet(d.Get(names.AttrPolicy).(string), aws.ToString(policy)) if err != nil { - return sdkdiag.AppendErrorf(diags, "setting policy %s: %s", aws.StringValue(policy), err) + return sdkdiag.AppendFromErr(diags, err) } d.Set(names.AttrPolicy, policyToSet) + d.Set(names.AttrResourceARN, d.Id()) return diags } func resourceResourcePolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) + log.Printf("[DEBUG] Deleting NetworkFirewall Resource Policy: %s", d.Id()) const ( timeout = 2 * time.Minute ) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - - log.Printf("[DEBUG] Deleting NetworkFirewall Resource Policy: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func() (interface{}, error) { - return conn.DeleteResourcePolicyWithContext(ctx, &networkfirewall.DeleteResourcePolicyInput{ + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidResourcePolicyException](ctx, timeout, func() (interface{}, error) { + return conn.DeleteResourcePolicy(ctx, &networkfirewall.DeleteResourcePolicyInput{ ResourceArn: aws.String(d.Id()), }) - }, networkfirewall.ErrCodeInvalidResourcePolicyException, "The supplied policy does not match RAM managed permissions") + }, "The supplied policy does not match RAM managed permissions") - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -144,3 +137,28 @@ func resourceResourcePolicyDelete(ctx context.Context, d *schema.ResourceData, m return diags } + +func findResourcePolicyByARN(ctx context.Context, conn *networkfirewall.Client, arn string) (*string, error) { + input := &networkfirewall.DescribeResourcePolicyInput{ + ResourceArn: aws.String(arn), + } + + output, err := conn.DescribeResourcePolicy(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.Policy == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Policy, nil +} diff --git a/internal/service/networkfirewall/firewall_resource_policy_data_source.go b/internal/service/networkfirewall/resource_policy_data_source.go similarity index 65% rename from internal/service/networkfirewall/firewall_resource_policy_data_source.go rename to internal/service/networkfirewall/resource_policy_data_source.go index d7036ac5cff..287526effc5 100644 --- a/internal/service/networkfirewall/firewall_resource_policy_data_source.go +++ b/internal/service/networkfirewall/resource_policy_data_source.go @@ -14,10 +14,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_networkfirewall_resource_policy") -func DataSourceFirewallResourcePolicy() *schema.Resource { +// @SDKDataSource("aws_networkfirewall_resource_policy", name="Resource Policy") +func dataSourceResourcePolicy() *schema.Resource { return &schema.Resource{ - ReadWithoutTimeout: dataSourceFirewallResourcePolicyRead, + ReadWithoutTimeout: dataSourceResourcePolicyRead, Schema: map[string]*schema.Schema{ names.AttrPolicy: { @@ -27,29 +27,23 @@ func DataSourceFirewallResourcePolicy() *schema.Resource { names.AttrResourceARN: { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: verify.ValidARN, }, }, } } -func dataSourceFirewallResourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func dataSourceResourcePolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) resourceARN := d.Get(names.AttrResourceARN).(string) - policy, err := FindResourcePolicy(ctx, conn, resourceARN) + policy, err := findResourcePolicyByARN(ctx, conn, resourceARN) if err != nil { return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Resource Policy (%s): %s", resourceARN, err) } - if policy == nil { - return sdkdiag.AppendErrorf(diags, "reading NetworkFirewall Resource Policy (%s): empty output", resourceARN) - } - d.SetId(resourceARN) d.Set(names.AttrPolicy, policy) d.Set(names.AttrResourceARN, resourceARN) diff --git a/internal/service/networkfirewall/firewall_resource_policy_data_source_test.go b/internal/service/networkfirewall/resource_policy_data_source_test.go similarity index 96% rename from internal/service/networkfirewall/firewall_resource_policy_data_source_test.go rename to internal/service/networkfirewall/resource_policy_data_source_test.go index d086fdd63c7..22206eaf0b1 100644 --- a/internal/service/networkfirewall/firewall_resource_policy_data_source_test.go +++ b/internal/service/networkfirewall/resource_policy_data_source_test.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccNetworkFirewallFirewallResourcePolicyDataSource_basic(t *testing.T) { +func TestAccNetworkFirewallResourcePolicyDataSource_basic(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_networkfirewall_resource_policy.test" diff --git a/internal/service/networkfirewall/resource_policy_test.go b/internal/service/networkfirewall/resource_policy_test.go index cefb310bacf..09721d48099 100644 --- a/internal/service/networkfirewall/resource_policy_test.go +++ b/internal/service/networkfirewall/resource_policy_test.go @@ -9,14 +9,13 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfnetworkfirewall "github.com/hashicorp/terraform-provider-aws/internal/service/networkfirewall" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -200,17 +199,19 @@ func testAccCheckResourcePolicyDestroy(ctx context.Context) resource.TestCheckFu continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) - policy, err := tfnetworkfirewall.FindResourcePolicy(ctx, conn, rs.Primary.ID) - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) + + _, err := tfnetworkfirewall.FindResourcePolicyByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { continue } + if err != nil { return err } - if policy != nil { - return fmt.Errorf("NetworkFirewall Resource Policy (for resource: %s) still exists", rs.Primary.ID) - } + + return fmt.Errorf("NetworkFirewall Resource Policy %s still exists", rs.Primary.ID) } return nil @@ -224,25 +225,15 @@ func testAccCheckResourcePolicyExists(ctx context.Context, n string) resource.Te return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No NetworkFirewall Resource Policy ID is set") - } + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) - policy, err := tfnetworkfirewall.FindResourcePolicy(ctx, conn, rs.Primary.ID) - if err != nil { - return err - } - - if policy == nil { - return fmt.Errorf("NetworkFirewall Resource Policy (for resource: %s) not found", rs.Primary.ID) - } + _, err := tfnetworkfirewall.FindResourcePolicyByARN(ctx, conn, rs.Primary.ID) - return nil + return err } } -func testAccResourcePolicyFirewallPolicyBaseConfig(rName string) string { +func testAccResourcePolicyFirewallPolicyConfig_base(rName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} @@ -250,6 +241,7 @@ data "aws_caller_identity" "current" {} resource "aws_networkfirewall_firewall_policy" "test" { name = %[1]q + firewall_policy { stateless_fragment_default_actions = ["aws:drop"] stateless_default_actions = ["aws:pass"] @@ -259,8 +251,7 @@ resource "aws_networkfirewall_firewall_policy" "test" { } func testAccResourcePolicyConfig_basic(rName string) string { - return acctest.ConfigCompose( - testAccResourcePolicyFirewallPolicyBaseConfig(rName), ` + return acctest.ConfigCompose(testAccResourcePolicyFirewallPolicyConfig_base(rName), ` resource "aws_networkfirewall_resource_policy" "test" { resource_arn = aws_networkfirewall_firewall_policy.test.arn # policy's Action element must include all of the following operations @@ -285,8 +276,7 @@ resource "aws_networkfirewall_resource_policy" "test" { } func testAccResourcePolicyConfig_equivalent(rName string) string { - return acctest.ConfigCompose( - testAccResourcePolicyFirewallPolicyBaseConfig(rName), ` + return acctest.ConfigCompose(testAccResourcePolicyFirewallPolicyConfig_base(rName), ` resource "aws_networkfirewall_resource_policy" "test" { resource_arn = aws_networkfirewall_firewall_policy.test.arn # policy's Action element must include all of the following operations @@ -310,7 +300,7 @@ resource "aws_networkfirewall_resource_policy" "test" { `) } -func testAccResourcePolicyRuleGroupBaseConfig(rName string) string { +func testAccResourcePolicyConfig_baseRuleGroup(rName string) string { return fmt.Sprintf(` data "aws_partition" "current" {} @@ -318,7 +308,7 @@ data "aws_caller_identity" "current" {} resource "aws_networkfirewall_rule_group" "test" { capacity = 100 - name = %q + name = %[1]q type = "STATEFUL" rule_group { rules_source { @@ -334,8 +324,7 @@ resource "aws_networkfirewall_rule_group" "test" { } func testAccResourcePolicyConfig_ruleGroup(rName string) string { - return acctest.ConfigCompose( - testAccResourcePolicyRuleGroupBaseConfig(rName), ` + return acctest.ConfigCompose(testAccResourcePolicyConfig_baseRuleGroup(rName), ` resource "aws_networkfirewall_resource_policy" "test" { resource_arn = aws_networkfirewall_rule_group.test.arn # policy's Action element must include all of the following operations @@ -359,8 +348,7 @@ resource "aws_networkfirewall_resource_policy" "test" { } func testAccResourcePolicyConfig_ruleGroupUpdate(rName string) string { - return acctest.ConfigCompose( - testAccResourcePolicyRuleGroupBaseConfig(rName), ` + return acctest.ConfigCompose(testAccResourcePolicyConfig_baseRuleGroup(rName), ` resource "aws_networkfirewall_resource_policy" "test" { resource_arn = aws_networkfirewall_rule_group.test.arn # policy's Action element must include all of the following operations diff --git a/internal/service/networkfirewall/rule_group.go b/internal/service/networkfirewall/rule_group.go index 133f2691717..44116d96867 100644 --- a/internal/service/networkfirewall/rule_group.go +++ b/internal/service/networkfirewall/rule_group.go @@ -9,15 +9,17 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -28,7 +30,7 @@ import ( // @SDKResource("aws_networkfirewall_rule_group", name="Rule Group") // @Tags(identifierAttribute="id") -func ResourceRuleGroup() *schema.Resource { +func resourceRuleGroup() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceRuleGroupCreate, ReadWithoutTimeout: resourceRuleGroupRead, @@ -39,290 +41,292 @@ func ResourceRuleGroup() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, - Schema: map[string]*schema.Schema{ - names.AttrARN: { - Type: schema.TypeString, - Computed: true, - }, - "capacity": { - Type: schema.TypeInt, - Required: true, - ForceNew: true, - }, - names.AttrDescription: { - Type: schema.TypeString, - Optional: true, - }, - names.AttrEncryptionConfiguration: encryptionConfigurationSchema(), - names.AttrName: { - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - "rule_group": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "reference_sets": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_set_references": { - Type: schema.TypeSet, - Optional: true, - MaxItems: 5, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_set_reference": { - Type: schema.TypeList, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "reference_arn": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidARN, + SchemaFunc: func() map[string]*schema.Schema { + return map[string]*schema.Schema{ + names.AttrARN: { + Type: schema.TypeString, + Computed: true, + }, + "capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + names.AttrDescription: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrEncryptionConfiguration: encryptionConfigurationSchema(), + names.AttrName: { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "rule_group": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "reference_sets": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_set_references": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 5, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_set_reference": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "reference_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidARN, + }, }, }, }, - }, - names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 32), - validation.StringMatch(regexache.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_]+$`), "must contain only alphanumeric and underscore characters"), - ), + names.AttrKey: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexache.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), + validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_]+$`), "must contain only alphanumeric and underscore characters"), + ), + }, }, }, }, }, }, }, - }, - "rules_source": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "rules_source_list": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "generated_rules_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.GeneratedRulesType_Values(), false), - }, - "target_types": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(networkfirewall.TargetType_Values(), false), + "rules_source": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rules_source_list": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "generated_rules_type": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.GeneratedRulesType](), + }, + "target_types": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.TargetType](), + }, + }, + "targets": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, }, - }, - "targets": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, }, }, }, - }, - "rules_string": { - Type: schema.TypeString, - Optional: true, - }, - "stateful_rule": { - Type: schema.TypeList, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrAction: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.StatefulAction_Values(), false), - }, - names.AttrHeader: { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrDestination: { - Type: schema.TypeString, - Required: true, - }, - "destination_port": { - Type: schema.TypeString, - Required: true, - }, - "direction": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.StatefulRuleDirection_Values(), false), - }, - names.AttrProtocol: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.StatefulRuleProtocol_Values(), false), - }, - names.AttrSource: { - Type: schema.TypeString, - Required: true, - }, - "source_port": { - Type: schema.TypeString, - Required: true, + "rules_string": { + Type: schema.TypeString, + Optional: true, + }, + "stateful_rule": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrAction: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.StatefulAction](), + }, + names.AttrHeader: { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDestination: { + Type: schema.TypeString, + Required: true, + }, + "destination_port": { + Type: schema.TypeString, + Required: true, + }, + "direction": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.StatefulRuleDirection](), + }, + names.AttrProtocol: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.StatefulRuleProtocol](), + }, + names.AttrSource: { + Type: schema.TypeString, + Required: true, + }, + "source_port": { + Type: schema.TypeString, + Required: true, + }, }, }, }, - }, - "rule_option": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "keyword": { - Type: schema.TypeString, - Required: true, - }, - "settings": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeString}, + "rule_option": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "keyword": { + Type: schema.TypeString, + Required: true, + }, + "settings": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, }, }, }, }, }, - }, - "stateless_rules_and_custom_actions": { - Type: schema.TypeList, - MaxItems: 1, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "custom_action": customActionSchema(), - "stateless_rule": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrPriority: { - Type: schema.TypeInt, - Required: true, - }, - "rule_definition": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrActions: { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "match_attributes": { - Type: schema.TypeList, - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrDestination: { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "address_definition": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidIPv4CIDRNetworkAddress, + "stateless_rules_and_custom_actions": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "custom_action": customActionSchema(), + "stateless_rule": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrPriority: { + Type: schema.TypeInt, + Required: true, + }, + "rule_definition": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrActions: { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "match_attributes": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrDestination: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address_definition": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidIPv4CIDRNetworkAddress, + }, }, }, }, - }, - "destination_port": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": { - Type: schema.TypeInt, - Required: true, - }, - "to_port": { - Type: schema.TypeInt, - Optional: true, + "destination_port": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_port": { + Type: schema.TypeInt, + Required: true, + }, + "to_port": { + Type: schema.TypeInt, + Optional: true, + }, }, }, }, - }, - "protocols": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{Type: schema.TypeInt}, - }, - names.AttrSource: { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "address_definition": { - Type: schema.TypeString, - Required: true, - ValidateFunc: verify.ValidIPv4CIDRNetworkAddress, + "protocols": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeInt}, + }, + names.AttrSource: { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "address_definition": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidIPv4CIDRNetworkAddress, + }, }, }, }, - }, - "source_port": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "from_port": { - Type: schema.TypeInt, - Required: true, - }, - "to_port": { - Type: schema.TypeInt, - Optional: true, + "source_port": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_port": { + Type: schema.TypeInt, + Required: true, + }, + "to_port": { + Type: schema.TypeInt, + Optional: true, + }, }, }, }, - }, - "tcp_flag": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "flags": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(networkfirewall.TCPFlag_Values(), false), + "tcp_flag": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "flags": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.TCPFlag](), + }, }, - }, - "masks": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(networkfirewall.TCPFlag_Values(), false), + "masks": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.TCPFlag](), + }, }, }, }, @@ -342,68 +346,68 @@ func ResourceRuleGroup() *schema.Resource { }, }, }, - }, - "rule_variables": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ip_sets": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 32), - validation.StringMatch(regexache.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_]+$`), "must contain only alphanumeric and underscore characters"), - ), - }, - "ip_set": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "definition": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, + "rule_variables": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_sets": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexache.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), + validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_]+$`), "must contain only alphanumeric and underscore characters"), + ), + }, + "ip_set": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "definition": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, }, }, }, }, }, - }, - "port_sets": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrKey: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.All( - validation.StringLenBetween(1, 32), - validation.StringMatch(regexache.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), - validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_]+$`), "must contain only alphanumeric and underscore characters"), - ), - }, - "port_set": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "definition": { - Type: schema.TypeSet, - Required: true, - Elem: &schema.Schema{Type: schema.TypeString}, + "port_sets": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrKey: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 32), + validation.StringMatch(regexache.MustCompile(`^[A-Za-z]`), "must begin with alphabetic character"), + validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_]+$`), "must contain only alphanumeric and underscore characters"), + ), + }, + "port_set": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "definition": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, }, }, @@ -413,39 +417,39 @@ func ResourceRuleGroup() *schema.Resource { }, }, }, - }, - "stateful_rule_options": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "rule_order": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.RuleOrder_Values(), false), + "stateful_rule_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_order": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RuleOrder](), + }, }, }, }, }, }, }, - }, - "rules": { - Type: schema.TypeString, - Optional: true, - }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), - names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(networkfirewall.RuleGroupType_Values(), false), - }, - "update_token": { - Type: schema.TypeString, - Computed: true, - }, + "rules": { + Type: schema.TypeString, + Optional: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + names.AttrType: { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.RuleGroupType](), + }, + "update_token": { + Type: schema.TypeString, + Computed: true, + }, + } }, CustomizeDiff: customdiff.Sequence( @@ -461,15 +465,14 @@ func ResourceRuleGroup() *schema.Resource { func resourceRuleGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) name := d.Get(names.AttrName).(string) input := &networkfirewall.CreateRuleGroupInput{ - Capacity: aws.Int64(int64(d.Get("capacity").(int))), + Capacity: aws.Int32(int32(d.Get("capacity").(int))), RuleGroupName: aws.String(name), Tags: getTagsIn(ctx), - Type: aws.String(d.Get(names.AttrType).(string)), + Type: awstypes.RuleGroupType(d.Get(names.AttrType).(string)), } if v, ok := d.GetOk(names.AttrDescription); ok { @@ -488,23 +491,22 @@ func resourceRuleGroupCreate(ctx context.Context, d *schema.ResourceData, meta i input.Rules = aws.String(v.(string)) } - output, err := conn.CreateRuleGroupWithContext(ctx, input) + output, err := conn.CreateRuleGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating NetworkFirewall Rule Group (%s): %s", name, err) } - d.SetId(aws.StringValue(output.RuleGroupResponse.RuleGroupArn)) + d.SetId(aws.ToString(output.RuleGroupResponse.RuleGroupArn)) return append(diags, resourceRuleGroupRead(ctx, d, meta)...) } func resourceRuleGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - - output, err := FindRuleGroupByARN(ctx, conn, d.Id()) + output, err := findRuleGroupByARN(ctx, conn, d.Id()) if err == nil && output.RuleGroup == nil { err = tfresource.NewEmptyResultError(d.Id()) @@ -524,7 +526,9 @@ func resourceRuleGroupRead(ctx context.Context, d *schema.ResourceData, meta int d.Set(names.AttrARN, response.RuleGroupArn) d.Set("capacity", response.Capacity) d.Set(names.AttrDescription, response.Description) - d.Set(names.AttrEncryptionConfiguration, flattenEncryptionConfiguration(response.EncryptionConfiguration)) + if err := d.Set(names.AttrEncryptionConfiguration, flattenEncryptionConfiguration(response.EncryptionConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting encryption_configuration: %s", err) + } d.Set(names.AttrName, response.RuleGroupName) if err := d.Set("rule_group", flattenRuleGroup(output.RuleGroup)); err != nil { return sdkdiag.AppendErrorf(diags, "setting rule_group: %s", err) @@ -539,14 +543,13 @@ func resourceRuleGroupRead(ctx context.Context, d *schema.ResourceData, meta int func resourceRuleGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) if d.HasChanges(names.AttrDescription, names.AttrEncryptionConfiguration, "rule_group", "rules", names.AttrType) { input := &networkfirewall.UpdateRuleGroupInput{ EncryptionConfiguration: expandEncryptionConfiguration(d.Get(names.AttrEncryptionConfiguration).([]interface{})), RuleGroupArn: aws.String(d.Id()), - Type: aws.String(d.Get(names.AttrType).(string)), + Type: awstypes.RuleGroupType(d.Get(names.AttrType).(string)), UpdateToken: aws.String(d.Get("update_token").(string)), } @@ -577,7 +580,7 @@ func resourceRuleGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i } } - _, err := conn.UpdateRuleGroupWithContext(ctx, input) + _, err := conn.UpdateRuleGroup(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating NetworkFirewall Rule Group (%s): %s", d.Id(), err) @@ -589,20 +592,19 @@ func resourceRuleGroupUpdate(ctx context.Context, d *schema.ResourceData, meta i func resourceRuleGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).NetworkFirewallClient(ctx) + log.Printf("[DEBUG] Deleting NetworkFirewall Rule Group: %s", d.Id()) const ( timeout = 10 * time.Minute ) - conn := meta.(*conns.AWSClient).NetworkFirewallConn(ctx) - - log.Printf("[DEBUG] Deleting NetworkFirewall Rule Group: %s", d.Id()) - _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, timeout, func() (interface{}, error) { - return conn.DeleteRuleGroupWithContext(ctx, &networkfirewall.DeleteRuleGroupInput{ + _, err := tfresource.RetryWhenIsAErrorMessageContains[*awstypes.InvalidOperationException](ctx, timeout, func() (interface{}, error) { + return conn.DeleteRuleGroup(ctx, &networkfirewall.DeleteRuleGroupInput{ RuleGroupArn: aws.String(d.Id()), }) - }, networkfirewall.ErrCodeInvalidOperationException, "Unable to delete the object because it is still in use") + }, "Unable to delete the object because it is still in use") - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -617,14 +619,14 @@ func resourceRuleGroupDelete(ctx context.Context, d *schema.ResourceData, meta i return diags } -func FindRuleGroupByARN(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) (*networkfirewall.DescribeRuleGroupOutput, error) { +func findRuleGroupByARN(ctx context.Context, conn *networkfirewall.Client, arn string) (*networkfirewall.DescribeRuleGroupOutput, error) { input := &networkfirewall.DescribeRuleGroupInput{ RuleGroupArn: aws.String(arn), } - output, err := conn.DescribeRuleGroupWithContext(ctx, input) + output, err := conn.DescribeRuleGroup(ctx, input) - if tfawserr.ErrCodeEquals(err, networkfirewall.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -642,9 +644,9 @@ func FindRuleGroupByARN(ctx context.Context, conn *networkfirewall.NetworkFirewa return output, nil } -func statusRuleGroup(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) retry.StateRefreshFunc { +func statusRuleGroup(ctx context.Context, conn *networkfirewall.Client, arn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindRuleGroupByARN(ctx, conn, arn) + output, err := findRuleGroupByARN(ctx, conn, arn) if tfresource.NotFound(err) { return nil, "", nil @@ -654,13 +656,13 @@ func statusRuleGroup(ctx context.Context, conn *networkfirewall.NetworkFirewall, return nil, "", err } - return output.RuleGroup, aws.StringValue(output.RuleGroupResponse.RuleGroupStatus), nil + return output.RuleGroup, string(output.RuleGroupResponse.RuleGroupStatus), nil } } -func waitRuleGroupDeleted(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string, timeout time.Duration) (*networkfirewall.RuleGroup, error) { +func waitRuleGroupDeleted(ctx context.Context, conn *networkfirewall.Client, arn string, timeout time.Duration) (*networkfirewall.DescribeRuleGroupOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{networkfirewall.ResourceStatusDeleting}, + Pending: enum.Slice(awstypes.ResourceStatusDeleting), Target: []string{}, Refresh: statusRuleGroup(ctx, conn, arn), Timeout: timeout, @@ -668,741 +670,757 @@ func waitRuleGroupDeleted(ctx context.Context, conn *networkfirewall.NetworkFire outputRaw, err := stateConf.WaitForStateContext(ctx) - if output, ok := outputRaw.(*networkfirewall.RuleGroup); ok { + if output, ok := outputRaw.(*networkfirewall.DescribeRuleGroupOutput); ok { return output, err } return nil, err } -func expandStatefulRuleHeader(l []interface{}) *networkfirewall.Header { - if len(l) == 0 || l[0] == nil { +func expandStatefulRuleHeader(tfList []interface{}) *awstypes.Header { + if len(tfList) == 0 || tfList[0] == nil { return nil } - tfMap, ok := l[0].(map[string]interface{}) + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - header := &networkfirewall.Header{} + + apiObject := &awstypes.Header{} + if v, ok := tfMap[names.AttrDestination].(string); ok && v != "" { - header.Destination = aws.String(v) + apiObject.Destination = aws.String(v) } if v, ok := tfMap["destination_port"].(string); ok && v != "" { - header.DestinationPort = aws.String(v) + apiObject.DestinationPort = aws.String(v) } if v, ok := tfMap["direction"].(string); ok && v != "" { - header.Direction = aws.String(v) + apiObject.Direction = awstypes.StatefulRuleDirection(v) } if v, ok := tfMap[names.AttrProtocol].(string); ok && v != "" { - header.Protocol = aws.String(v) + apiObject.Protocol = awstypes.StatefulRuleProtocol(v) } if v, ok := tfMap[names.AttrSource].(string); ok && v != "" { - header.Source = aws.String(v) + apiObject.Source = aws.String(v) } if v, ok := tfMap["source_port"].(string); ok && v != "" { - header.SourcePort = aws.String(v) + apiObject.SourcePort = aws.String(v) } - return header + return apiObject } -func expandStatefulRuleOptions(l []interface{}) []*networkfirewall.RuleOption { - if len(l) == 0 || l[0] == nil { +func expandStatefulRuleOptions(tfList []interface{}) []awstypes.RuleOption { + if len(tfList) == 0 || tfList[0] == nil { return nil } - ruleOptions := make([]*networkfirewall.RuleOption, 0, len(l)) - for _, tfMapRaw := range l { + apiObjects := make([]awstypes.RuleOption, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - keyword := tfMap["keyword"].(string) - option := &networkfirewall.RuleOption{ - Keyword: aws.String(keyword), + + apiObject := awstypes.RuleOption{ + Keyword: aws.String(tfMap["keyword"].(string)), } + if v, ok := tfMap["settings"].(*schema.Set); ok && v.Len() > 0 { - option.Settings = flex.ExpandStringSet(v) + apiObject.Settings = flex.ExpandStringValueSet(v) } - ruleOptions = append(ruleOptions, option) + + apiObjects = append(apiObjects, apiObject) } - return ruleOptions + return apiObjects } -func expandRulesSourceList(l []interface{}) *networkfirewall.RulesSourceList { - if len(l) == 0 || l[0] == nil { +func expandRulesSourceList(tfList []interface{}) *awstypes.RulesSourceList { + if len(tfList) == 0 || tfList[0] == nil { return nil } - tfMap, ok := l[0].(map[string]interface{}) + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - rulesSourceList := &networkfirewall.RulesSourceList{} + + apiObject := &awstypes.RulesSourceList{} + if v, ok := tfMap["generated_rules_type"].(string); ok && v != "" { - rulesSourceList.GeneratedRulesType = aws.String(v) + apiObject.GeneratedRulesType = awstypes.GeneratedRulesType(v) } if v, ok := tfMap["target_types"].(*schema.Set); ok && v.Len() > 0 { - rulesSourceList.TargetTypes = flex.ExpandStringSet(v) + apiObject.TargetTypes = flex.ExpandStringyValueSet[awstypes.TargetType](v) } if v, ok := tfMap["targets"].(*schema.Set); ok && v.Len() > 0 { - rulesSourceList.Targets = flex.ExpandStringSet(v) + apiObject.Targets = flex.ExpandStringValueSet(v) } - return rulesSourceList + return apiObject } -func expandStatefulRules(l []interface{}) []*networkfirewall.StatefulRule { - if len(l) == 0 || l[0] == nil { +func expandStatefulRules(tfList []interface{}) []awstypes.StatefulRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - rules := make([]*networkfirewall.StatefulRule, 0, len(l)) - for _, tfMapRaw := range l { + apiObjects := make([]awstypes.StatefulRule, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - rule := &networkfirewall.StatefulRule{} + + apiObject := awstypes.StatefulRule{} + if v, ok := tfMap[names.AttrAction].(string); ok && v != "" { - rule.Action = aws.String(v) + apiObject.Action = awstypes.StatefulAction(v) } if v, ok := tfMap[names.AttrHeader].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.Header = expandStatefulRuleHeader(v) + apiObject.Header = expandStatefulRuleHeader(v) } if v, ok := tfMap["rule_option"].(*schema.Set); ok && v.Len() > 0 { - rule.RuleOptions = expandStatefulRuleOptions(v.List()) + apiObject.RuleOptions = expandStatefulRuleOptions(v.List()) } - rules = append(rules, rule) + + apiObjects = append(apiObjects, apiObject) } - return rules + return apiObjects } -func expandRuleGroup(tfMap map[string]interface{}) *networkfirewall.RuleGroup { +func expandRuleGroup(tfMap map[string]interface{}) *awstypes.RuleGroup { if tfMap == nil { return nil } - ruleGroup := &networkfirewall.RuleGroup{} - if tfList, ok := tfMap["reference_sets"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { - referenceSets := &networkfirewall.ReferenceSets{} - rvMap, ok := tfList[0].(map[string]interface{}) - if ok { - if v, ok := rvMap["ip_set_references"].(*schema.Set); ok && v.Len() > 0 { + apiObject := &awstypes.RuleGroup{} + + if v, ok := tfMap["reference_sets"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if tfMap, ok := v[0].(map[string]interface{}); ok { + referenceSets := &awstypes.ReferenceSets{} + + if v, ok := tfMap["ip_set_references"].(*schema.Set); ok && v.Len() > 0 { referenceSets.IPSetReferences = expandIPSetReferences(v.List()) } - ruleGroup.ReferenceSets = referenceSets + apiObject.ReferenceSets = referenceSets } } - if tfList, ok := tfMap["rule_variables"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { - ruleVariables := &networkfirewall.RuleVariables{} - rvMap, ok := tfList[0].(map[string]interface{}) - if ok { - if v, ok := rvMap["ip_sets"].(*schema.Set); ok && v.Len() > 0 { + + if v, ok := tfMap["rule_variables"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if tfMap, ok := v[0].(map[string]interface{}); ok { + ruleVariables := &awstypes.RuleVariables{} + + if v, ok := tfMap["ip_sets"].(*schema.Set); ok && v.Len() > 0 { ruleVariables.IPSets = expandIPSets(v.List()) } - if v, ok := rvMap["port_sets"].(*schema.Set); ok && v.Len() > 0 { + if v, ok := tfMap["port_sets"].(*schema.Set); ok && v.Len() > 0 { ruleVariables.PortSets = expandPortSets(v.List()) } - ruleGroup.RuleVariables = ruleVariables + + apiObject.RuleVariables = ruleVariables } } - if tfList, ok := tfMap["rules_source"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { - rulesSource := &networkfirewall.RulesSource{} - rsMap, ok := tfList[0].(map[string]interface{}) - if ok { - if v, ok := rsMap["rules_source_list"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + + if v, ok := tfMap["rules_source"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if tfMap, ok := v[0].(map[string]interface{}); ok { + rulesSource := &awstypes.RulesSource{} + + if v, ok := tfMap["rules_source_list"].([]interface{}); ok && len(v) > 0 && v[0] != nil { rulesSource.RulesSourceList = expandRulesSourceList(v) } - if v, ok := rsMap["rules_string"].(string); ok && v != "" { + if v, ok := tfMap["rules_string"].(string); ok && v != "" { rulesSource.RulesString = aws.String(v) } - if v, ok := rsMap["stateful_rule"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["stateful_rule"].([]interface{}); ok && len(v) > 0 { rulesSource.StatefulRules = expandStatefulRules(v) } - if v, ok := rsMap["stateless_rules_and_custom_actions"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if v, ok := tfMap["stateless_rules_and_custom_actions"].([]interface{}); ok && len(v) > 0 && v[0] != nil { rulesSource.StatelessRulesAndCustomActions = expandStatelessRulesAndCustomActions(v) } - ruleGroup.RulesSource = rulesSource + + apiObject.RulesSource = rulesSource } } - if tfList, ok := tfMap["stateful_rule_options"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { - statefulRuleOptions := &networkfirewall.StatefulRuleOptions{} - sroMap, ok := tfList[0].(map[string]interface{}) - if ok { - if v, ok := sroMap["rule_order"].(string); ok && v != "" { - statefulRuleOptions.RuleOrder = aws.String(v) + + if v, ok := tfMap["stateful_rule_options"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if tfMap, ok := v[0].(map[string]interface{}); ok { + statefulRuleOptions := &awstypes.StatefulRuleOptions{} + + if v, ok := tfMap["rule_order"].(string); ok && v != "" { + statefulRuleOptions.RuleOrder = awstypes.RuleOrder(v) } + + apiObject.StatefulRuleOptions = statefulRuleOptions } - ruleGroup.StatefulRuleOptions = statefulRuleOptions } - return ruleGroup + return apiObject } -func expandIPSets(l []interface{}) map[string]*networkfirewall.IPSet { - if len(l) == 0 || l[0] == nil { +func expandIPSetReferences(tfList []interface{}) map[string]awstypes.IPSetReference { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := make(map[string]*networkfirewall.IPSet) - for _, tfMapRaw := range l { + apiObject := make(map[string]awstypes.IPSetReference) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - if key, ok := tfMap[names.AttrKey].(string); ok && key != "" { - if tfList, ok := tfMap["ip_set"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { - tfMap, ok := tfList[0].(map[string]interface{}) - if ok { - if tfSet, ok := tfMap["definition"].(*schema.Set); ok && tfSet.Len() > 0 { - ipSet := &networkfirewall.IPSet{ - Definition: flex.ExpandStringSet(tfSet), + if k, ok := tfMap[names.AttrKey].(string); ok && k != "" { + if v, ok := tfMap["ip_set_reference"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if tfMap, ok := v[0].(map[string]interface{}); ok { + if v, ok := tfMap["reference_arn"].(string); ok && v != "" { + apiObject[k] = awstypes.IPSetReference{ + ReferenceArn: aws.String(v), } - m[key] = ipSet } } } } } - return m + return apiObject } -func expandIPSetReferences(l []interface{}) map[string]*networkfirewall.IPSetReference { - if len(l) == 0 || l[0] == nil { +func expandPortSets(tfList []interface{}) map[string]awstypes.PortSet { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := make(map[string]*networkfirewall.IPSetReference) - for _, tfMapRaw := range l { + apiObject := make(map[string]awstypes.PortSet) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - if key, ok := tfMap[names.AttrKey].(string); ok && key != "" { - if tfList, ok := tfMap["ip_set_reference"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { - tfMap, ok := tfList[0].(map[string]interface{}) - if ok { - if tfSet, ok := tfMap["reference_arn"].(string); ok && tfSet != "" { - ipSetReference := &networkfirewall.IPSetReference{ - ReferenceArn: aws.String(tfSet), + if k, ok := tfMap[names.AttrKey].(string); ok && k != "" { + if v, ok := tfMap["port_set"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if tfMap, ok := v[0].(map[string]interface{}); ok { + if v, ok := tfMap["definition"].(*schema.Set); ok && v.Len() > 0 { + apiObject[k] = awstypes.PortSet{ + Definition: flex.ExpandStringValueSet(v), } - m[key] = ipSetReference } } } } } - return m + return apiObject } -func expandPortSets(l []interface{}) map[string]*networkfirewall.PortSet { - if len(l) == 0 || l[0] == nil { + +func expandAddresses(tfList []interface{}) []awstypes.Address { + if len(tfList) == 0 || tfList[0] == nil { return nil } - m := make(map[string]*networkfirewall.PortSet) - for _, tfMapRaw := range l { + apiObjects := make([]awstypes.Address, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - if key, ok := tfMap[names.AttrKey].(string); ok && key != "" { - if tfList, ok := tfMap["port_set"].([]interface{}); ok && len(tfList) > 0 && tfList[0] != nil { - tfMap, ok := tfList[0].(map[string]interface{}) - if ok { - if tfSet, ok := tfMap["definition"].(*schema.Set); ok && tfSet.Len() > 0 { - ipSet := &networkfirewall.PortSet{ - Definition: flex.ExpandStringSet(tfSet), - } - m[key] = ipSet - } - } - } - } - } - - return m -} + apiObject := awstypes.Address{} -func expandAddresses(l []interface{}) []*networkfirewall.Address { - if len(l) == 0 || l[0] == nil { - return nil - } - destinations := make([]*networkfirewall.Address, 0, len(l)) - for _, tfMapRaw := range l { - tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { - continue - } - destination := &networkfirewall.Address{} if v, ok := tfMap["address_definition"].(string); ok && v != "" { - destination.AddressDefinition = aws.String(v) + apiObject.AddressDefinition = aws.String(v) } - destinations = append(destinations, destination) + + apiObjects = append(apiObjects, apiObject) } - return destinations + + return apiObjects } -func expandPortRanges(l []interface{}) []*networkfirewall.PortRange { - if len(l) == 0 || l[0] == nil { +func expandPortRanges(tfList []interface{}) []awstypes.PortRange { + if len(tfList) == 0 || tfList[0] == nil { return nil } - ports := make([]*networkfirewall.PortRange, 0, len(l)) - for _, tfMapRaw := range l { + + apiObjects := make([]awstypes.PortRange, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - port := &networkfirewall.PortRange{} + + apiObject := awstypes.PortRange{} + if v, ok := tfMap["from_port"].(int); ok { - port.FromPort = aws.Int64(int64(v)) + apiObject.FromPort = int32(v) } if v, ok := tfMap["to_port"].(int); ok { - port.ToPort = aws.Int64(int64(v)) + apiObject.ToPort = int32(v) } - ports = append(ports, port) + + apiObjects = append(apiObjects, apiObject) } - return ports + + return apiObjects } -func expandTCPFlags(l []interface{}) []*networkfirewall.TCPFlagField { - if len(l) == 0 || l[0] == nil { +func expandTCPFlags(tfList []interface{}) []awstypes.TCPFlagField { + if len(tfList) == 0 || tfList[0] == nil { return nil } - tcpFlags := make([]*networkfirewall.TCPFlagField, 0, len(l)) - for _, tfMapRaw := range l { + + apiObjects := make([]awstypes.TCPFlagField, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - tcpFlag := &networkfirewall.TCPFlagField{} + + apiObject := awstypes.TCPFlagField{} + if v, ok := tfMap["flags"].(*schema.Set); ok && v.Len() > 0 { - tcpFlag.Flags = flex.ExpandStringSet(v) + apiObject.Flags = flex.ExpandStringyValueSet[awstypes.TCPFlag](v) } if v, ok := tfMap["masks"].(*schema.Set); ok && v.Len() > 0 { - tcpFlag.Masks = flex.ExpandStringSet(v) + apiObject.Masks = flex.ExpandStringyValueSet[awstypes.TCPFlag](v) } - tcpFlags = append(tcpFlags, tcpFlag) + + apiObjects = append(apiObjects, apiObject) } - return tcpFlags + + return apiObjects } -func expandMatchAttributes(l []interface{}) *networkfirewall.MatchAttributes { - if len(l) == 0 || l[0] == nil { +func expandMatchAttributes(tfList []interface{}) *awstypes.MatchAttributes { + if len(tfList) == 0 || tfList[0] == nil { return nil } - tfMap, ok := l[0].(map[string]interface{}) + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - matchAttributes := &networkfirewall.MatchAttributes{} + + apiObject := &awstypes.MatchAttributes{} + if v, ok := tfMap[names.AttrDestination].(*schema.Set); ok && v.Len() > 0 { - matchAttributes.Destinations = expandAddresses(v.List()) + apiObject.Destinations = expandAddresses(v.List()) } if v, ok := tfMap["destination_port"].(*schema.Set); ok && v.Len() > 0 { - matchAttributes.DestinationPorts = expandPortRanges(v.List()) + apiObject.DestinationPorts = expandPortRanges(v.List()) } if v, ok := tfMap["protocols"].(*schema.Set); ok && v.Len() > 0 { - matchAttributes.Protocols = flex.ExpandInt64Set(v) + apiObject.Protocols = flex.ExpandInt32ValueSet(v) } if v, ok := tfMap[names.AttrSource].(*schema.Set); ok && v.Len() > 0 { - matchAttributes.Sources = expandAddresses(v.List()) + apiObject.Sources = expandAddresses(v.List()) } if v, ok := tfMap["source_port"].(*schema.Set); ok && v.Len() > 0 { - matchAttributes.SourcePorts = expandPortRanges(v.List()) + apiObject.SourcePorts = expandPortRanges(v.List()) } if v, ok := tfMap["tcp_flag"].(*schema.Set); ok && v.Len() > 0 { - matchAttributes.TCPFlags = expandTCPFlags(v.List()) + apiObject.TCPFlags = expandTCPFlags(v.List()) } - return matchAttributes + return apiObject } -func expandRuleDefinition(l []interface{}) *networkfirewall.RuleDefinition { - if len(l) == 0 || l[0] == nil { +func expandRuleDefinition(tfList []interface{}) *awstypes.RuleDefinition { + if len(tfList) == 0 || tfList[0] == nil { return nil } - tfMap, ok := l[0].(map[string]interface{}) + + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } - rd := &networkfirewall.RuleDefinition{} + + apiObject := &awstypes.RuleDefinition{} + if v, ok := tfMap[names.AttrActions].(*schema.Set); ok && v.Len() > 0 { - rd.Actions = flex.ExpandStringSet(v) + apiObject.Actions = flex.ExpandStringValueSet(v) } if v, ok := tfMap["match_attributes"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rd.MatchAttributes = expandMatchAttributes(v) + apiObject.MatchAttributes = expandMatchAttributes(v) } - return rd + + return apiObject } -func expandStatelessRules(l []interface{}) []*networkfirewall.StatelessRule { - if len(l) == 0 || l[0] == nil { +func expandStatelessRules(tfList []interface{}) []awstypes.StatelessRule { + if len(tfList) == 0 || tfList[0] == nil { return nil } - statelessRules := make([]*networkfirewall.StatelessRule, 0, len(l)) - for _, tfMapRaw := range l { + + apiObjects := make([]awstypes.StatelessRule, 0, len(tfList)) + + for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - statelessRule := &networkfirewall.StatelessRule{} + + apiObject := awstypes.StatelessRule{} + if v, ok := tfMap[names.AttrPriority].(int); ok && v > 0 { - statelessRule.Priority = aws.Int64(int64(v)) + apiObject.Priority = aws.Int32(int32(v)) } if v, ok := tfMap["rule_definition"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - statelessRule.RuleDefinition = expandRuleDefinition(v) + apiObject.RuleDefinition = expandRuleDefinition(v) } - statelessRules = append(statelessRules, statelessRule) + + apiObjects = append(apiObjects, apiObject) } - return statelessRules + return apiObjects } -func expandStatelessRulesAndCustomActions(l []interface{}) *networkfirewall.StatelessRulesAndCustomActions { - if len(l) == 0 || l[0] == nil { +func expandStatelessRulesAndCustomActions(tfList []interface{}) *awstypes.StatelessRulesAndCustomActions { + if len(tfList) == 0 || tfList[0] == nil { return nil } - s := &networkfirewall.StatelessRulesAndCustomActions{} - tfMap, ok := l[0].(map[string]interface{}) + apiObject := &awstypes.StatelessRulesAndCustomActions{} + + tfMap, ok := tfList[0].(map[string]interface{}) if !ok { return nil } + if v, ok := tfMap["custom_action"].(*schema.Set); ok && v.Len() > 0 { - s.CustomActions = expandCustomActions(v.List()) + apiObject.CustomActions = expandCustomActions(v.List()) } if v, ok := tfMap["stateless_rule"].(*schema.Set); ok && v.Len() > 0 { - s.StatelessRules = expandStatelessRules(v.List()) + apiObject.StatelessRules = expandStatelessRules(v.List()) } - return s + return apiObject } -func flattenRuleGroup(r *networkfirewall.RuleGroup) []interface{} { - if r == nil { +func flattenRuleGroup(apiObject *awstypes.RuleGroup) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "reference_sets": flattenReferenceSets(r.ReferenceSets), - "rule_variables": flattenRuleVariables(r.RuleVariables), - "rules_source": flattenRulesSource(r.RulesSource), - "stateful_rule_options": flattenStatefulRulesOptions(r.StatefulRuleOptions), + tfMap := map[string]interface{}{ + "reference_sets": flattenReferenceSets(apiObject.ReferenceSets), + "rule_variables": flattenRuleVariables(apiObject.RuleVariables), + "rules_source": flattenRulesSource(apiObject.RulesSource), + "stateful_rule_options": flattenStatefulRulesOptions(apiObject.StatefulRuleOptions), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenReferenceSets(rv *networkfirewall.ReferenceSets) []interface{} { - if rv == nil { +func flattenReferenceSets(apiObject *awstypes.ReferenceSets) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "ip_set_references": flattenIPSetReferences(rv.IPSetReferences), + + tfMap := map[string]interface{}{ + "ip_set_references": flattenIPSetReferences(apiObject.IPSetReferences), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenIPSetReferences(m map[string]*networkfirewall.IPSetReference) []interface{} { - if m == nil { +func flattenIPSetReferences(apiObject map[string]awstypes.IPSetReference) []interface{} { + if apiObject == nil { return []interface{}{} } - sets := make([]interface{}, 0, len(m)) - for k, v := range m { + + tfList := make([]interface{}, 0, len(apiObject)) + + for k, v := range apiObject { tfMap := map[string]interface{}{ + "ip_set_reference": flattenIPSetReference(&v), names.AttrKey: k, - "ip_set_reference": flattenIPSetReference(v), } - sets = append(sets, tfMap) + + tfList = append(tfList, tfMap) } - return sets + return tfList } -func flattenIPSetReference(i *networkfirewall.IPSetReference) []interface{} { - if i == nil { +func flattenIPSetReference(apiObject *awstypes.IPSetReference) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "reference_arn": aws.StringValue(i.ReferenceArn), + + tfMap := map[string]interface{}{ + "reference_arn": aws.ToString(apiObject.ReferenceArn), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenRuleVariables(rv *networkfirewall.RuleVariables) []interface{} { - if rv == nil { +func flattenRuleVariables(apiObject *awstypes.RuleVariables) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "ip_sets": flattenIPSets(rv.IPSets), - "port_sets": flattenPortSets(rv.PortSets), + + tfMap := map[string]interface{}{ + "ip_sets": flattenIPSets(apiObject.IPSets), + "port_sets": flattenPortSets(apiObject.PortSets), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenIPSets(m map[string]*networkfirewall.IPSet) []interface{} { - if m == nil { +func flattenPortSets(apiObject map[string]awstypes.PortSet) []interface{} { + if apiObject == nil { return []interface{}{} } - sets := make([]interface{}, 0, len(m)) - for k, v := range m { - tfMap := map[string]interface{}{ - names.AttrKey: k, - "ip_set": flattenIPSet(v), - } - sets = append(sets, tfMap) - } - return sets -} + tfList := make([]interface{}, 0, len(apiObject)) -func flattenPortSets(m map[string]*networkfirewall.PortSet) []interface{} { - if m == nil { - return []interface{}{} - } - sets := make([]interface{}, 0, len(m)) - for k, v := range m { + for k, v := range apiObject { tfMap := map[string]interface{}{ names.AttrKey: k, - "port_set": flattenPortSet(v), + "port_set": flattenPortSet(&v), } - sets = append(sets, tfMap) - } - return sets -} - -func flattenIPSet(i *networkfirewall.IPSet) []interface{} { - if i == nil { - return []interface{}{} - } - m := map[string]interface{}{ - "definition": flex.FlattenStringSet(i.Definition), + tfList = append(tfList, tfMap) } - return []interface{}{m} + return tfList } -func flattenPortSet(p *networkfirewall.PortSet) []interface{} { - if p == nil { +func flattenPortSet(apiObject *awstypes.PortSet) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "definition": flex.FlattenStringSet(p.Definition), + + tfMap := map[string]interface{}{ + "definition": apiObject.Definition, } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenRulesSource(rs *networkfirewall.RulesSource) []interface{} { - if rs == nil { +func flattenRulesSource(apiObject *awstypes.RulesSource) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "rules_source_list": flattenRulesSourceList(rs.RulesSourceList), - "rules_string": aws.StringValue(rs.RulesString), - "stateful_rule": flattenStatefulRules(rs.StatefulRules), - "stateless_rules_and_custom_actions": flattenStatelessRulesAndCustomActions(rs.StatelessRulesAndCustomActions), + tfMap := map[string]interface{}{ + "rules_source_list": flattenRulesSourceList(apiObject.RulesSourceList), + "rules_string": aws.ToString(apiObject.RulesString), + "stateful_rule": flattenStatefulRules(apiObject.StatefulRules), + "stateless_rules_and_custom_actions": flattenStatelessRulesAndCustomActions(apiObject.StatelessRulesAndCustomActions), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenRulesSourceList(r *networkfirewall.RulesSourceList) []interface{} { - if r == nil { +func flattenRulesSourceList(apiObject *awstypes.RulesSourceList) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "generated_rules_type": aws.StringValue(r.GeneratedRulesType), - "target_types": flex.FlattenStringSet(r.TargetTypes), - "targets": flex.FlattenStringSet(r.Targets), + tfMap := map[string]interface{}{ + "generated_rules_type": apiObject.GeneratedRulesType, + "target_types": apiObject.TargetTypes, + "targets": apiObject.Targets, } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenStatefulRules(sr []*networkfirewall.StatefulRule) []interface{} { - if sr == nil { +func flattenStatefulRules(apiObjects []awstypes.StatefulRule) []interface{} { + if apiObjects == nil { return []interface{}{} } - rules := make([]interface{}, 0, len(sr)) - for _, s := range sr { + + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { m := map[string]interface{}{ - names.AttrAction: aws.StringValue(s.Action), - names.AttrHeader: flattenHeader(s.Header), - "rule_option": flattenRuleOptions(s.RuleOptions), + names.AttrAction: apiObject.Action, + names.AttrHeader: flattenHeader(apiObject.Header), + "rule_option": flattenRuleOptions(apiObject.RuleOptions), } - rules = append(rules, m) + + tfList = append(tfList, m) } - return rules + + return tfList } -func flattenHeader(h *networkfirewall.Header) []interface{} { - if h == nil { +func flattenHeader(apiObject *awstypes.Header) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - names.AttrDestination: aws.StringValue(h.Destination), - "destination_port": aws.StringValue(h.DestinationPort), - "direction": aws.StringValue(h.Direction), - names.AttrProtocol: aws.StringValue(h.Protocol), - names.AttrSource: aws.StringValue(h.Source), - "source_port": aws.StringValue(h.SourcePort), + tfMap := map[string]interface{}{ + names.AttrDestination: aws.ToString(apiObject.Destination), + "destination_port": aws.ToString(apiObject.DestinationPort), + "direction": apiObject.Direction, + names.AttrProtocol: apiObject.Protocol, + names.AttrSource: aws.ToString(apiObject.Source), + "source_port": aws.ToString(apiObject.SourcePort), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenRuleOptions(o []*networkfirewall.RuleOption) []interface{} { - if o == nil { +func flattenRuleOptions(apiObjects []awstypes.RuleOption) []interface{} { + if apiObjects == nil { return []interface{}{} } - options := make([]interface{}, 0, len(o)) - for _, option := range o { - m := map[string]interface{}{ - "keyword": aws.StringValue(option.Keyword), - "settings": aws.StringValueSlice(option.Settings), + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + "keyword": aws.ToString(apiObject.Keyword), + "settings": apiObject.Settings, } - options = append(options, m) + + tfList = append(tfList, tfMap) } - return options + return tfList } -func flattenStatelessRulesAndCustomActions(sr *networkfirewall.StatelessRulesAndCustomActions) []interface{} { - if sr == nil { +func flattenStatelessRulesAndCustomActions(apiObject *awstypes.StatelessRulesAndCustomActions) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "custom_action": flattenCustomActions(sr.CustomActions), - "stateless_rule": flattenStatelessRules(sr.StatelessRules), + tfMap := map[string]interface{}{ + "custom_action": flattenCustomActions(apiObject.CustomActions), + "stateless_rule": flattenStatelessRules(apiObject.StatelessRules), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenStatelessRules(sr []*networkfirewall.StatelessRule) []interface{} { - if sr == nil { +func flattenStatelessRules(apiObjects []awstypes.StatelessRule) []interface{} { + if apiObjects == nil { return []interface{}{} } - rules := make([]interface{}, 0, len(sr)) - for _, s := range sr { - rule := map[string]interface{}{ - names.AttrPriority: int(aws.Int64Value(s.Priority)), - "rule_definition": flattenRuleDefinition(s.RuleDefinition), + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + names.AttrPriority: aws.ToInt32(apiObject.Priority), + "rule_definition": flattenRuleDefinition(apiObject.RuleDefinition), } - rules = append(rules, rule) + + tfList = append(tfList, tfMap) } - return rules + return tfList } -func flattenRuleDefinition(r *networkfirewall.RuleDefinition) []interface{} { - if r == nil { +func flattenRuleDefinition(apiObject *awstypes.RuleDefinition) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - names.AttrActions: flex.FlattenStringSet(r.Actions), - "match_attributes": flattenMatchAttributes(r.MatchAttributes), + tfMap := map[string]interface{}{ + names.AttrActions: apiObject.Actions, + "match_attributes": flattenMatchAttributes(apiObject.MatchAttributes), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenMatchAttributes(ma *networkfirewall.MatchAttributes) []interface{} { - if ma == nil { +func flattenMatchAttributes(apiObject *awstypes.MatchAttributes) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - names.AttrDestination: flattenAddresses(ma.Destinations), - "destination_port": flattenPortRanges(ma.DestinationPorts), - "protocols": flex.FlattenInt64Set(ma.Protocols), - names.AttrSource: flattenAddresses(ma.Sources), - "source_port": flattenPortRanges(ma.SourcePorts), - "tcp_flag": flattenTCPFlags(ma.TCPFlags), + tfMap := map[string]interface{}{ + names.AttrDestination: flattenAddresses(apiObject.Destinations), + "destination_port": flattenPortRanges(apiObject.DestinationPorts), + "protocols": apiObject.Protocols, + names.AttrSource: flattenAddresses(apiObject.Sources), + "source_port": flattenPortRanges(apiObject.SourcePorts), + "tcp_flag": flattenTCPFlags(apiObject.TCPFlags), } - return []interface{}{m} + return []interface{}{tfMap} } -func flattenAddresses(d []*networkfirewall.Address) []interface{} { - if d == nil { +func flattenAddresses(apiObjects []awstypes.Address) []interface{} { + if apiObjects == nil { return []interface{}{} } - destinations := make([]interface{}, 0, len(d)) - for _, addr := range d { - m := map[string]interface{}{ - "address_definition": aws.StringValue(addr.AddressDefinition), + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + "address_definition": aws.ToString(apiObject.AddressDefinition), } - destinations = append(destinations, m) + + tfList = append(tfList, tfMap) } - return destinations + return tfList } -func flattenPortRanges(pr []*networkfirewall.PortRange) []interface{} { - if pr == nil { +func flattenPortRanges(apiObjects []awstypes.PortRange) []interface{} { + if apiObjects == nil { return []interface{}{} } - portRanges := make([]interface{}, 0, len(pr)) - for _, r := range pr { - m := map[string]interface{}{ - "from_port": int(aws.Int64Value(r.FromPort)), - "to_port": int(aws.Int64Value(r.ToPort)), + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { + tfMap := map[string]interface{}{ + "from_port": apiObject.FromPort, + "to_port": apiObject.ToPort, } - portRanges = append(portRanges, m) + + tfList = append(tfList, tfMap) } - return portRanges + return tfList } -func flattenTCPFlags(t []*networkfirewall.TCPFlagField) []interface{} { - if t == nil { +func flattenTCPFlags(apiObjects []awstypes.TCPFlagField) []interface{} { + if apiObjects == nil { return []interface{}{} } - flagFields := make([]interface{}, 0, len(t)) - for _, v := range t { + + tfList := make([]interface{}, 0, len(apiObjects)) + + for _, apiObject := range apiObjects { m := map[string]interface{}{ - "flags": flex.FlattenStringSet(v.Flags), - "masks": flex.FlattenStringSet(v.Masks), + "flags": apiObject.Flags, + "masks": apiObject.Masks, } - flagFields = append(flagFields, m) + + tfList = append(tfList, m) } - return flagFields + return tfList } -func flattenStatefulRulesOptions(sro *networkfirewall.StatefulRuleOptions) []interface{} { - if sro == nil { +func flattenStatefulRulesOptions(apiObject *awstypes.StatefulRuleOptions) []interface{} { + if apiObject == nil { return []interface{}{} } - m := map[string]interface{}{ - "rule_order": aws.StringValue(sro.RuleOrder), + tfMap := map[string]interface{}{ + "rule_order": apiObject.RuleOrder, } - return []interface{}{m} + return []interface{}{tfMap} } diff --git a/internal/service/networkfirewall/rule_group_test.go b/internal/service/networkfirewall/rule_group_test.go index 1ccf7ffa11b..f4636ff8e46 100644 --- a/internal/service/networkfirewall/rule_group_test.go +++ b/internal/service/networkfirewall/rule_group_test.go @@ -8,8 +8,9 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -39,14 +40,14 @@ func TestAccNetworkFirewallRuleGroup_Basic_rulesSourceList(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateful-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateful), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateful)), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.reference_sets.#", acctest.Ct0), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.generated_rules_type", networkfirewall.GeneratedRulesTypeAllowlist), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.generated_rules_type", string(awstypes.GeneratedRulesTypeAllowlist)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.target_types.#", acctest.Ct1), - resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.target_types.*", networkfirewall.TargetTypeHttpHost), + resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.target_types.*", string(awstypes.TargetTypeHttpHost)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.targets.#", acctest.Ct1), resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.targets.*", "test.example.com"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rule_variables.#", acctest.Ct0), @@ -82,7 +83,7 @@ func TestAccNetworkFirewallRuleGroup_Basic_referenceSets(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateful-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateful), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateful)), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.reference_sets.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.reference_sets.0.ip_set_references.#", acctest.Ct3), @@ -119,7 +120,7 @@ func TestAccNetworkFirewallRuleGroup_Basic_updateReferenceSets(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateful-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateful), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateful)), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.reference_sets.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.reference_sets.0.ip_set_references.#", acctest.Ct3), @@ -140,7 +141,7 @@ func TestAccNetworkFirewallRuleGroup_Basic_updateReferenceSets(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateful-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateful), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateful)), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.reference_sets.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.reference_sets.0.ip_set_references.#", acctest.Ct3), @@ -172,16 +173,16 @@ func TestAccNetworkFirewallRuleGroup_Basic_statefulRule(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateful-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateful), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateful)), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionPass), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionPass)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination", "124.1.1.24/32"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination_port", "53"), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", networkfirewall.StatefulRuleDirectionAny), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", networkfirewall.StatefulRuleProtocolTcp), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", string(awstypes.StatefulRuleDirectionAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", string(awstypes.StatefulRuleProtocolTcp)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source", "1.2.3.4/32"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source_port", "53"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.rule_option.#", acctest.Ct1), @@ -222,7 +223,7 @@ func TestAccNetworkFirewallRuleGroup_Basic_statelessRule(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateless-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateless), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateless)), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateless_rules_and_custom_actions.#", acctest.Ct1), @@ -268,7 +269,7 @@ alert http any any -> any any (http_response_line; content:"403 Forbidden"; sid: acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateful-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateful), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateful)), resource.TestCheckResourceAttr(resourceName, "rules", rules), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.#", acctest.Ct1), @@ -305,7 +306,7 @@ func TestAccNetworkFirewallRuleGroup_statefulRuleOptions(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.0.rule_order", networkfirewall.RuleOrderStrictOrder), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.0.rule_order", string(awstypes.RuleOrderStrictOrder)), ), }, { @@ -335,7 +336,7 @@ func TestAccNetworkFirewallRuleGroup_updateStatefulRuleOptions(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.0.rule_order", networkfirewall.RuleOrderStrictOrder), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.0.rule_order", string(awstypes.RuleOrderStrictOrder)), ), }, { @@ -344,7 +345,7 @@ func TestAccNetworkFirewallRuleGroup_updateStatefulRuleOptions(t *testing.T) { testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup2), testAccCheckRuleGroupRecreated(&ruleGroup1, &ruleGroup2), resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.0.rule_order", networkfirewall.RuleOrderDefaultActionOrder), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.stateful_rule_options.0.rule_order", string(awstypes.RuleOrderDefaultActionOrder)), ), }, { @@ -383,7 +384,7 @@ func TestAccNetworkFirewallRuleGroup_statelessRuleWithCustomAction(t *testing.T) acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateless-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateless), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateless)), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateless_rules_and_custom_actions.#", acctest.Ct1), @@ -482,14 +483,14 @@ func TestAccNetworkFirewallRuleGroup_updateRulesSourceList(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", fmt.Sprintf("stateful-rulegroup/%s", rName)), resource.TestCheckResourceAttr(resourceName, "capacity", "100"), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), - resource.TestCheckResourceAttr(resourceName, names.AttrType, networkfirewall.RuleGroupTypeStateful), + resource.TestCheckResourceAttr(resourceName, names.AttrType, string(awstypes.RuleGroupTypeStateful)), resource.TestCheckResourceAttr(resourceName, "rule_group.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.generated_rules_type", networkfirewall.GeneratedRulesTypeDenylist), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.generated_rules_type", string(awstypes.GeneratedRulesTypeDenylist)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.target_types.#", acctest.Ct2), - resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.target_types.*", networkfirewall.TargetTypeHttpHost), - resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.target_types.*", networkfirewall.TargetTypeTlsSni), + resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.target_types.*", string(awstypes.TargetTypeHttpHost)), + resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.target_types.*", string(awstypes.TargetTypeTlsSni)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.targets.#", acctest.Ct2), resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.targets.*", "test.example.com"), resource.TestCheckTypeSetElemAttr(resourceName, "rule_group.0.rules_source.0.rules_source_list.0.targets.*", "test2.example.com"), @@ -608,12 +609,12 @@ func TestAccNetworkFirewallRuleGroup_updateStatefulRule(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionDrop), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionDrop)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination", "1.2.3.4/32"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination_port", "1001"), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", networkfirewall.StatefulRuleDirectionForward), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", networkfirewall.StatefulRuleProtocolIp), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", string(awstypes.StatefulRuleDirectionForward)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", string(awstypes.StatefulRuleProtocolAny)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source", "124.1.1.24/32"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source_port", "1001"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.rule_option.#", acctest.Ct2), @@ -665,23 +666,23 @@ func TestAccNetworkFirewallRuleGroup_updateMultipleStatefulRules(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct2), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionPass), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionPass)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination", "124.1.1.24/32"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination_port", "53"), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", networkfirewall.StatefulRuleDirectionAny), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", networkfirewall.StatefulRuleProtocolTcp), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", string(awstypes.StatefulRuleDirectionAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", string(awstypes.StatefulRuleProtocolTcp)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source", "1.2.3.4/32"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source_port", "53"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.rule_option.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.action", networkfirewall.StatefulActionAlert), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.action", string(awstypes.StatefulActionAlert)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.destination", networkfirewall.StatefulRuleDirectionAny), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.destination_port", networkfirewall.StatefulRuleDirectionAny), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.direction", networkfirewall.StatefulRuleDirectionAny), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.protocol", networkfirewall.StatefulRuleProtocolIp), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.source", networkfirewall.StatefulRuleDirectionAny), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.source_port", networkfirewall.StatefulRuleDirectionAny), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.destination", string(awstypes.StatefulRuleDirectionAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.destination_port", string(awstypes.StatefulRuleDirectionAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.direction", string(awstypes.StatefulRuleDirectionAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.protocol", string(awstypes.StatefulRuleProtocolAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.source", string(awstypes.StatefulRuleDirectionAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.header.0.source_port", string(awstypes.StatefulRuleDirectionAny)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.1.rule_option.#", acctest.Ct1), ), }, @@ -695,12 +696,12 @@ func TestAccNetworkFirewallRuleGroup_updateMultipleStatefulRules(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionDrop), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionDrop)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination", "1.2.3.4/32"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination_port", "1001"), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", networkfirewall.StatefulRuleDirectionForward), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", networkfirewall.StatefulRuleProtocolIp), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", string(awstypes.StatefulRuleDirectionForward)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", string(awstypes.StatefulRuleProtocolAny)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source", "124.1.1.24/32"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source_port", "1001"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.rule_option.#", acctest.Ct2), @@ -731,11 +732,11 @@ func TestAccNetworkFirewallRuleGroup_StatefulRule_action(t *testing.T) { CheckDestroy: testAccCheckRuleGroupDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccRuleGroupConfig_statefulAction(rName, networkfirewall.StatefulActionAlert), + Config: testAccRuleGroupConfig_statefulAction(rName, string(awstypes.StatefulActionAlert)), Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionAlert), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionAlert)), ), }, { @@ -744,11 +745,11 @@ func TestAccNetworkFirewallRuleGroup_StatefulRule_action(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccRuleGroupConfig_statefulAction(rName, networkfirewall.StatefulActionPass), + Config: testAccRuleGroupConfig_statefulAction(rName, string(awstypes.StatefulActionPass)), Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionPass), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionPass)), ), }, { @@ -757,11 +758,11 @@ func TestAccNetworkFirewallRuleGroup_StatefulRule_action(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccRuleGroupConfig_statefulAction(rName, networkfirewall.StatefulActionDrop), + Config: testAccRuleGroupConfig_statefulAction(rName, string(awstypes.StatefulActionDrop)), Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionDrop), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionDrop)), ), }, { @@ -770,11 +771,11 @@ func TestAccNetworkFirewallRuleGroup_StatefulRule_action(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccRuleGroupConfig_statefulAction(rName, networkfirewall.StatefulActionReject), + Config: testAccRuleGroupConfig_statefulAction(rName, string(awstypes.StatefulActionReject)), Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionReject), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionReject)), ), }, { @@ -804,12 +805,12 @@ func TestAccNetworkFirewallRuleGroup_StatefulRule_header(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionPass), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionPass)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination", "ANY"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination_port", "1990"), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", networkfirewall.StatefulRuleDirectionAny), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", networkfirewall.StatefulRuleProtocolTcp), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", string(awstypes.StatefulRuleDirectionAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", string(awstypes.StatefulRuleProtocolTcp)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source", "ANY"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source_port", "1994"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.rule_option.#", acctest.Ct1), @@ -825,12 +826,12 @@ func TestAccNetworkFirewallRuleGroup_StatefulRule_header(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckRuleGroupExists(ctx, resourceName, &ruleGroup), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", networkfirewall.StatefulActionPass), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.action", string(awstypes.StatefulActionPass)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination", "ANY"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.destination_port", "ANY"), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", networkfirewall.StatefulRuleDirectionAny), - resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", networkfirewall.StatefulRuleProtocolTcp), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.direction", string(awstypes.StatefulRuleDirectionAny)), + resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.protocol", string(awstypes.StatefulRuleProtocolTcp)), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source", "ANY"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.header.0.source_port", "ANY"), resource.TestCheckResourceAttr(resourceName, "rule_group.0.rules_source.0.stateful_rule.0.rule_option.#", acctest.Ct1), @@ -1024,7 +1025,7 @@ func testAccCheckRuleGroupDestroy(ctx context.Context) resource.TestCheckFunc { continue } - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) _, err := tfnetworkfirewall.FindRuleGroupByARN(ctx, conn, rs.Primary.ID) @@ -1050,11 +1051,7 @@ func testAccCheckRuleGroupExists(ctx context.Context, n string, v *networkfirewa return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No NetworkFirewall Rule Group ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) output, err := tfnetworkfirewall.FindRuleGroupByARN(ctx, conn, rs.Primary.ID) @@ -1070,7 +1067,7 @@ func testAccCheckRuleGroupExists(ctx context.Context, n string, v *networkfirewa func testAccCheckRuleGroupNotRecreated(i, j *networkfirewall.DescribeRuleGroupOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - if before, after := aws.StringValue(i.RuleGroupResponse.RuleGroupId), aws.StringValue(j.RuleGroupResponse.RuleGroupId); before != after { + if before, after := aws.ToString(i.RuleGroupResponse.RuleGroupId), aws.ToString(j.RuleGroupResponse.RuleGroupId); before != after { return fmt.Errorf("NetworkFirewall Rule Group was recreated. got: %s, expected: %s", after, before) } return nil @@ -1079,7 +1076,7 @@ func testAccCheckRuleGroupNotRecreated(i, j *networkfirewall.DescribeRuleGroupOu func testAccCheckRuleGroupRecreated(i, j *networkfirewall.DescribeRuleGroupOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - if before, after := aws.StringValue(i.RuleGroupResponse.RuleGroupId), aws.StringValue(j.RuleGroupResponse.RuleGroupId); before == after { + if before, after := aws.ToString(i.RuleGroupResponse.RuleGroupId), aws.ToString(j.RuleGroupResponse.RuleGroupId); before == after { return fmt.Errorf("NetworkFirewall Rule Group (%s) was not recreated", before) } return nil diff --git a/internal/service/networkfirewall/service_endpoint_resolver_gen.go b/internal/service/networkfirewall/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..cde2ef0198d --- /dev/null +++ b/internal/service/networkfirewall/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package networkfirewall + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + networkfirewall_sdkv2 "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ networkfirewall_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver networkfirewall_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: networkfirewall_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params networkfirewall_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up networkfirewall endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*networkfirewall_sdkv2.Options) { + return func(o *networkfirewall_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/networkfirewall/service_endpoints_gen_test.go b/internal/service/networkfirewall/service_endpoints_gen_test.go index f61943cd786..c11305bd94a 100644 --- a/internal/service/networkfirewall/service_endpoints_gen_test.go +++ b/internal/service/networkfirewall/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package networkfirewall_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - networkfirewall_sdkv1 "github.com/aws/aws-sdk-go/service/networkfirewall" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + networkfirewall_sdkv2 "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -83,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,55 +243,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := networkfirewall_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(networkfirewall_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), networkfirewall_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := networkfirewall_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(networkfirewall_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), networkfirewall_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.NetworkFirewallConn(ctx) - - req, _ := client.ListFirewallsRequest(&networkfirewall_sdkv1.ListFirewallsInput{}) + client := meta.NetworkFirewallClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListFirewalls(ctx, &networkfirewall_sdkv2.ListFirewallsInput{}, + func(opts *networkfirewall_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -323,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -443,6 +479,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/networkfirewall/service_package_gen.go b/internal/service/networkfirewall/service_package_gen.go index 97879c0a68e..267439726ec 100644 --- a/internal/service/networkfirewall/service_package_gen.go +++ b/internal/service/networkfirewall/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package networkfirewall import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - networkfirewall_sdkv1 "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + networkfirewall_sdkv2 "github.com/aws/aws-sdk-go-v2/service/networkfirewall" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -22,22 +19,35 @@ func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.Serv } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { - return []*types.ServicePackageFrameworkResource{} + return []*types.ServicePackageFrameworkResource{ + { + Factory: newTLSInspectionConfigurationResource, + Name: "TLS Inspection Configuration", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, + }, + } } func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceFirewall, + Factory: dataSourceFirewall, TypeName: "aws_networkfirewall_firewall", + Name: "Firewall", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceFirewallPolicy, + Factory: dataSourceFirewallPolicy, TypeName: "aws_networkfirewall_firewall_policy", + Name: "Firewall Policy", + Tags: &types.ServicePackageResourceTags{}, }, { - Factory: DataSourceFirewallResourcePolicy, + Factory: dataSourceResourcePolicy, TypeName: "aws_networkfirewall_resource_policy", + Name: "Resource Policy", }, } } @@ -45,7 +55,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceFirewall, + Factory: resourceFirewall, TypeName: "aws_networkfirewall_firewall", Name: "Firewall", Tags: &types.ServicePackageResourceTags{ @@ -53,7 +63,7 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceFirewallPolicy, + Factory: resourceFirewallPolicy, TypeName: "aws_networkfirewall_firewall_policy", Name: "Firewall Policy", Tags: &types.ServicePackageResourceTags{ @@ -61,15 +71,17 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceLoggingConfiguration, + Factory: resourceLoggingConfiguration, TypeName: "aws_networkfirewall_logging_configuration", + Name: "Logging Configuration", }, { - Factory: ResourceResourcePolicy, + Factory: resourceResourcePolicy, TypeName: "aws_networkfirewall_resource_policy", + Name: "Resource Policy", }, { - Factory: ResourceRuleGroup, + Factory: resourceRuleGroup, TypeName: "aws_networkfirewall_rule_group", Name: "Rule Group", Tags: &types.ServicePackageResourceTags{ @@ -83,25 +95,14 @@ func (p *servicePackage) ServicePackageName() string { return names.NetworkFirewall } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*networkfirewall_sdkv1.NetworkFirewall, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*networkfirewall_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return networkfirewall_sdkv1.New(sess.Copy(&cfg)), nil + return networkfirewall_sdkv2.NewFromConfig(cfg, + networkfirewall_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/networkfirewall/sweep.go b/internal/service/networkfirewall/sweep.go index 5f06cb358ab..1d4f7859bbd 100644 --- a/internal/service/networkfirewall/sweep.go +++ b/internal/service/networkfirewall/sweep.go @@ -7,11 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -51,33 +51,30 @@ func sweepFirewallPolicies(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.NetworkFirewallConn(ctx) + conn := client.NetworkFirewallClient(ctx) input := &networkfirewall.ListFirewallPoliciesInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListFirewallPoliciesPagesWithContext(ctx, input, func(page *networkfirewall.ListFirewallPoliciesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := networkfirewall.NewListFirewallPoliciesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping NetworkFirewall Firewall Policy sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing NetworkFirewall Firewall Policies (%s): %w", region, err) } for _, v := range page.FirewallPolicies { - r := ResourceFirewallPolicy() + r := resourceFirewallPolicy() d := r.Data(nil) - d.SetId(aws.StringValue(v.Arn)) + d.SetId(aws.ToString(v.Arn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping NetworkFirewall Firewall Policy sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing NetworkFirewall Firewall Policies (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -95,33 +92,30 @@ func sweepFirewalls(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.NetworkFirewallConn(ctx) + conn := client.NetworkFirewallClient(ctx) input := &networkfirewall.ListFirewallsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListFirewallsPagesWithContext(ctx, input, func(page *networkfirewall.ListFirewallsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := networkfirewall.NewListFirewallsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping NetworkFirewall Firewall sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing NetworkFirewall Firewalls (%s): %w", region, err) } for _, v := range page.Firewalls { - r := ResourceFirewall() + r := resourceFirewall() d := r.Data(nil) - d.SetId(aws.StringValue(v.FirewallArn)) + d.SetId(aws.ToString(v.FirewallArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping NetworkFirewall Firewall sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing NetworkFirewall Firewalls (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -139,33 +133,30 @@ func sweepLoggingConfigurations(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.NetworkFirewallConn(ctx) + conn := client.NetworkFirewallClient(ctx) input := &networkfirewall.ListFirewallsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListFirewallsPagesWithContext(ctx, input, func(page *networkfirewall.ListFirewallsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := networkfirewall.NewListFirewallsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping NetworkFirewall Logging Configuration sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing NetworkFirewall Firewalls (%s): %w", region, err) } for _, v := range page.Firewalls { - r := ResourceLoggingConfiguration() + r := resourceLoggingConfiguration() d := r.Data(nil) - d.SetId(aws.StringValue(v.FirewallArn)) + d.SetId(aws.ToString(v.FirewallArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping NetworkFirewall Logging Configuration sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing NetworkFirewall Firewalls (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -183,33 +174,30 @@ func sweepRuleGroups(region string) error { if err != nil { return fmt.Errorf("error getting client: %w", err) } - conn := client.NetworkFirewallConn(ctx) + conn := client.NetworkFirewallClient(ctx) input := &networkfirewall.ListRuleGroupsInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListRuleGroupsPagesWithContext(ctx, input, func(page *networkfirewall.ListRuleGroupsOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := networkfirewall.NewListRuleGroupsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping NetworkFirewall Rule Group sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing NetworkFirewall Rule Groups (%s): %w", region, err) } for _, v := range page.RuleGroups { - r := ResourceRuleGroup() + r := resourceRuleGroup() d := r.Data(nil) - d.SetId(aws.StringValue(v.Arn)) + d.SetId(aws.ToString(v.Arn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping NetworkFirewall Rule Group sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing NetworkFirewall Rule Groups (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/networkfirewall/tags_gen.go b/internal/service/networkfirewall/tags_gen.go index 7ef08be33ec..50cf108ade5 100644 --- a/internal/service/networkfirewall/tags_gen.go +++ b/internal/service/networkfirewall/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/networkfirewall" - "github.com/aws/aws-sdk-go/service/networkfirewall/networkfirewalliface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists networkfirewall service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn networkfirewalliface.NetworkFirewallAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *networkfirewall.Client, identifier string, optFns ...func(*networkfirewall.Options)) (tftags.KeyValueTags, error) { input := &networkfirewall.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn networkfirewalliface.NetworkFirewallAPI, // ListTags lists networkfirewall service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).NetworkFirewallConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).NetworkFirewallClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns networkfirewall service tags. -func Tags(tags tftags.KeyValueTags) []*networkfirewall.Tag { - result := make([]*networkfirewall.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &networkfirewall.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*networkfirewall.Tag { } // KeyValueTags creates tftags.KeyValueTags from networkfirewall service tags. -func KeyValueTags(ctx context.Context, tags []*networkfirewall.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*networkfirewall.Tag) tftags.KeyVa // getTagsIn returns networkfirewall service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*networkfirewall.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*networkfirewall.Tag { } // setTagsOut sets networkfirewall service tags in Context. -func setTagsOut(ctx context.Context, tags []*networkfirewall.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*networkfirewall.Tag) { // updateTags updates networkfirewall service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn networkfirewalliface.NetworkFirewallAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *networkfirewall.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*networkfirewall.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn networkfirewalliface.NetworkFirewallAP if len(removedTags) > 0 { input := &networkfirewall.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn networkfirewalliface.NetworkFirewallAP Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn networkfirewalliface.NetworkFirewallAP // UpdateTags updates networkfirewall service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).NetworkFirewallConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).NetworkFirewallClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/networkfirewall/tls_inspection_configuration.go b/internal/service/networkfirewall/tls_inspection_configuration.go new file mode 100644 index 00000000000..d4444801b56 --- /dev/null +++ b/internal/service/networkfirewall/tls_inspection_configuration.go @@ -0,0 +1,686 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkfirewall + +import ( + "context" + "fmt" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkfirewall/types" + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/resourcevalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource(name="TLS Inspection Configuration") +// @Tags(identifierAttribute="arn") +func newTLSInspectionConfigurationResource(_ context.Context) (resource.ResourceWithConfigure, error) { + r := &tlsInspectionConfigurationResource{} + + r.SetDefaultCreateTimeout(30 * time.Minute) + r.SetDefaultUpdateTimeout(30 * time.Minute) + r.SetDefaultDeleteTimeout(30 * time.Minute) + + return r, nil +} + +type tlsInspectionConfigurationResource struct { + framework.ResourceWithConfigure + framework.WithImportByID + framework.WithTimeouts +} + +func (*tlsInspectionConfigurationResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_networkfirewall_tls_inspection_configuration" +} + +func (r *tlsInspectionConfigurationResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + names.AttrARN: framework.ARNAttributeComputedOnly(), + "certificate_authority": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[tlsCertificateDataModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[tlsCertificateDataModel](ctx), + }, + }, + "certificates": schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[tlsCertificateDataModel](ctx), + Computed: true, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[tlsCertificateDataModel](ctx), + }, + }, + names.AttrDescription: schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 512), + }, + }, + names.AttrEncryptionConfiguration: schema.ListAttribute{ + CustomType: fwtypes.NewListNestedObjectTypeOf[encryptionConfigurationModel](ctx), + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + ElementType: types.ObjectType{ + AttrTypes: fwtypes.AttributeTypesMust[encryptionConfigurationModel](ctx), + }, + }, + names.AttrID: framework.IDAttribute(), + names.AttrName: schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 128), + stringvalidator.RegexMatches(regexache.MustCompile(`^[a-zA-Z0-9-]+$`), "Must contain only a-z, A-Z, 0-9 and - (hyphen)"), + }, + }, + "number_of_associations": schema.Int64Attribute{ + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + "tls_inspection_configuration_id": framework.IDAttribute(), + "update_token": schema.StringAttribute{ + Computed: true, + }, + }, + Blocks: map[string]schema.Block{ + names.AttrTimeouts: timeouts.Block(ctx, timeouts.Opts{ + Create: true, + Update: true, + Delete: true, + }), + "tls_inspection_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[tlsInspectionConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "server_certificate_configuration": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[serverCertificateConfigurationModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "certificate_authority_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + }, + Blocks: map[string]schema.Block{ + "check_certificate_revocation_status": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[checkCertificateRevocationStatusActionsModel](ctx), + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "revoked_status_action": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.RevocationCheckAction](), + Optional: true, + }, + "unknown_status_action": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.RevocationCheckAction](), + Optional: true, + }, + }, + }, + }, + names.AttrScope: schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[serverCertificateScopeModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "protocols": schema.SetAttribute{ + CustomType: fwtypes.NewSetTypeOf[types.Int64](ctx), + ElementType: types.Int64Type, + Required: true, + Validators: []validator.Set{ + setvalidator.ValueInt64sAre(int64validator.Between(0, 255)), + }, + }, + }, + Blocks: map[string]schema.Block{ + "destination_ports": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[portRangeModel](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "from_port": schema.Int64Attribute{ + Required: true, + Validators: []validator.Int64{ + int64validator.Between(0, 65535), + }, + }, + "to_port": schema.Int64Attribute{ + Required: true, + Validators: []validator.Int64{ + int64validator.Between(0, 65535), + }, + }, + }, + }, + }, + names.AttrDestination: schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[addressModel](ctx), + Validators: []validator.List{ + listvalidator.IsRequired(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "address_definition": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + stringvalidator.RegexMatches(regexache.MustCompile(`^([a-fA-F\d:\.]+($|/\d{1,3}))$`), "Must contain IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation"), + }, + }, + }, + }, + }, + "source_ports": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[portRangeModel](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "from_port": schema.Int64Attribute{ + Required: true, + Validators: []validator.Int64{ + int64validator.Between(0, 65535), + }, + }, + "to_port": schema.Int64Attribute{ + Required: true, + Validators: []validator.Int64{ + int64validator.Between(0, 65535), + }, + }, + }, + }, + }, + names.AttrSource: schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[addressModel](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "address_definition": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + stringvalidator.RegexMatches(regexache.MustCompile(`^([a-fA-F\d:\.]+($|/\d{1,3}))$`), "Must contain IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation"), + }, + }, + }, + }, + }, + }, + }, + }, + "server_certificate": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[serverCertificateModel](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + names.AttrResourceARN: schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +func (r *tlsInspectionConfigurationResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data tlsInspectionConfigurationResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + name := data.TLSInspectionConfigurationName.ValueString() + input := &networkfirewall.CreateTLSInspectionConfigurationInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + + input.Tags = getTagsIn(ctx) + + outputC, err := conn.CreateTLSInspectionConfiguration(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating NetworkFirewall TLS Inspection Configuration (%s)", name), err.Error()) + + return + } + + // Set values for unknowns. + data.TLSInspectionConfigurationARN = fwflex.StringToFramework(ctx, outputC.TLSInspectionConfigurationResponse.TLSInspectionConfigurationArn) + data.TLSInspectionConfigurationID = fwflex.StringToFramework(ctx, outputC.TLSInspectionConfigurationResponse.TLSInspectionConfigurationId) + data.UpdateToken = fwflex.StringToFramework(ctx, outputC.UpdateToken) + data.setID() + + outputR, err := waitTLSInspectionConfigurationCreated(ctx, conn, data.ID.ValueString(), r.CreateTimeout(ctx, data.Timeouts)) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for NetworkFirewall TLS Inspection Configuration (%s) create", data.ID.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + response.Diagnostics.Append(flattenDescribeTLSInspectionConfigurationOutput(ctx, &data, outputR)...) + if response.Diagnostics.HasError() { + return + } + + response.Diagnostics.Append(response.State.Set(ctx, data)...) +} + +func (r *tlsInspectionConfigurationResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data tlsInspectionConfigurationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + output, err := findTLSInspectionConfigurationByARN(ctx, conn, data.ID.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading NetworkFirewall TLS Inspection Configuration (%s)", data.ID.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(flattenDescribeTLSInspectionConfigurationOutput(ctx, &data, output)...) + if response.Diagnostics.HasError() { + return + } + + setTagsOut(ctx, output.TLSInspectionConfigurationResponse.Tags) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *tlsInspectionConfigurationResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new tlsInspectionConfigurationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + if !new.Description.Equal(old.Description) || + !new.EncryptionConfiguration.Equal(old.EncryptionConfiguration) || + !new.TLSInspectionConfiguration.Equal(old.TLSInspectionConfiguration) { + input := &networkfirewall.UpdateTLSInspectionConfigurationInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) + if response.Diagnostics.HasError() { + return + } + + input.UpdateToken = aws.String(old.UpdateToken.ValueString()) + + output, err := conn.UpdateTLSInspectionConfiguration(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating NetworkFirewall TLS Inspection Configuration (%s)", new.ID.ValueString()), err.Error()) + + return + } + + new.UpdateToken = fwflex.StringToFramework(ctx, output.UpdateToken) + + outputR, err := waitTLSInspectionConfigurationUpdated(ctx, conn, new.ID.ValueString(), r.CreateTimeout(ctx, new.Timeouts)) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for NetworkFirewall TLS Inspection Configuration (%s) update", new.ID.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + response.Diagnostics.Append(flattenDescribeTLSInspectionConfigurationOutput(ctx, &new, outputR)...) + if response.Diagnostics.HasError() { + return + } + } else { + new.CertificateAuthority = old.CertificateAuthority + new.Certificates = old.Certificates + new.UpdateToken = old.UpdateToken + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *tlsInspectionConfigurationResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data tlsInspectionConfigurationResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkFirewallClient(ctx) + + _, err := conn.DeleteTLSInspectionConfiguration(ctx, &networkfirewall.DeleteTLSInspectionConfigurationInput{ + TLSInspectionConfigurationArn: aws.String(data.ID.ValueString()), + }) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting NetworkFirewall TLS Inspection Configuration (%s)", data.ID.ValueString()), err.Error()) + + return + } + + if _, err := waitTLSInspectionConfigurationDeleted(ctx, conn, data.ID.ValueString(), r.DeleteTimeout(ctx, data.Timeouts)); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for NetworkFirewall TLS Inspection Configuration (%s) delete", data.ID.ValueString()), err.Error()) + + return + } +} + +func (r *tlsInspectionConfigurationResource) ConfigValidators(context.Context) []resource.ConfigValidator { + return []resource.ConfigValidator{ + resourcevalidator.AtLeastOneOf( + path.MatchRoot("tls_inspection_configuration").AtListIndex(0).AtName("server_certificate_configuration").AtListIndex(0).AtName("certificate_authority_arn"), + path.MatchRoot("tls_inspection_configuration").AtListIndex(0).AtName("server_certificate_configuration").AtListIndex(0).AtName("server_certificate"), + ), + } +} + +func (r *tlsInspectionConfigurationResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func findTLSInspectionConfigurationByARN(ctx context.Context, conn *networkfirewall.Client, arn string) (*networkfirewall.DescribeTLSInspectionConfigurationOutput, error) { + input := &networkfirewall.DescribeTLSInspectionConfigurationInput{ + TLSInspectionConfigurationArn: aws.String(arn), + } + + output, err := conn.DescribeTLSInspectionConfiguration(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.TLSInspectionConfigurationResponse == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusTLSInspectionConfiguration(ctx context.Context, conn *networkfirewall.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findTLSInspectionConfigurationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.TLSInspectionConfigurationResponse.TLSInspectionConfigurationStatus), nil + } +} + +const ( + resourceStatusPending = "PENDING" +) + +func statusTLSInspectionConfigurationCertificates(ctx context.Context, conn *networkfirewall.Client, arn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findTLSInspectionConfigurationByARN(ctx, conn, arn) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + certificates := output.TLSInspectionConfigurationResponse.Certificates + certificateAuthority := output.TLSInspectionConfigurationResponse.CertificateAuthority + + // The API does not immediately return data for certificates and certificate authority even when the resource status is "ACTIVE", + // which causes unexpected diffs when reading. This sets the status to "PENDING" until either the certificates or the certificate + // authority is populated (the API will always return at least one of the two). + if status := output.TLSInspectionConfigurationResponse.TLSInspectionConfigurationStatus; status == awstypes.ResourceStatusActive && (certificates != nil || certificateAuthority != nil) { + return output, string(status), nil + } + + return output, resourceStatusPending, nil + } +} + +func waitTLSInspectionConfigurationCreated(ctx context.Context, conn *networkfirewall.Client, arn string, timeout time.Duration) (*networkfirewall.DescribeTLSInspectionConfigurationOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{resourceStatusPending}, + Target: enum.Slice(awstypes.ResourceStatusActive), + Refresh: statusTLSInspectionConfigurationCertificates(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkfirewall.DescribeTLSInspectionConfigurationOutput); ok { + return output, err + } + + return nil, err +} + +func waitTLSInspectionConfigurationUpdated(ctx context.Context, conn *networkfirewall.Client, arn string, timeout time.Duration) (*networkfirewall.DescribeTLSInspectionConfigurationOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{resourceStatusPending}, + Target: enum.Slice(awstypes.ResourceStatusActive), + Refresh: statusTLSInspectionConfigurationCertificates(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkfirewall.DescribeTLSInspectionConfigurationOutput); ok { + return output, err + } + + return nil, err +} + +func waitTLSInspectionConfigurationDeleted(ctx context.Context, conn *networkfirewall.Client, arn string, timeout time.Duration) (*networkfirewall.DescribeTLSInspectionConfigurationOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ResourceStatusActive, awstypes.ResourceStatusDeleting), + Target: []string{}, + Refresh: statusTLSInspectionConfiguration(ctx, conn, arn), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkfirewall.DescribeTLSInspectionConfigurationOutput); ok { + return output, err + } + + return nil, err +} + +func flattenDescribeTLSInspectionConfigurationOutput(ctx context.Context, data *tlsInspectionConfigurationResourceModel, apiObject *networkfirewall.DescribeTLSInspectionConfigurationOutput) diag.Diagnostics { + var diags diag.Diagnostics + + d := fwflex.Flatten(ctx, apiObject.TLSInspectionConfigurationResponse, data) + diags.Append(d...) + if diags.HasError() { + return diags + } + + d = fwflex.Flatten(ctx, apiObject.TLSInspectionConfiguration, &data.TLSInspectionConfiguration) + diags.Append(d...) + if diags.HasError() { + return diags + } + + return diags +} + +type tlsInspectionConfigurationResourceModel struct { + CertificateAuthority fwtypes.ListNestedObjectValueOf[tlsCertificateDataModel] `tfsdk:"certificate_authority"` + Certificates fwtypes.ListNestedObjectValueOf[tlsCertificateDataModel] `tfsdk:"certificates"` + Description types.String `tfsdk:"description"` + EncryptionConfiguration fwtypes.ListNestedObjectValueOf[encryptionConfigurationModel] `tfsdk:"encryption_configuration"` + ID types.String `tfsdk:"id"` + NumberOfAssociations types.Int64 `tfsdk:"number_of_associations"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + TLSInspectionConfiguration fwtypes.ListNestedObjectValueOf[tlsInspectionConfigurationModel] `tfsdk:"tls_inspection_configuration"` + TLSInspectionConfigurationARN types.String `tfsdk:"arn"` + TLSInspectionConfigurationID types.String `tfsdk:"tls_inspection_configuration_id"` + TLSInspectionConfigurationName types.String `tfsdk:"name"` + UpdateToken types.String `tfsdk:"update_token"` +} + +func (model *tlsInspectionConfigurationResourceModel) InitFromID() error { + model.TLSInspectionConfigurationARN = model.ID + + return nil +} + +func (model *tlsInspectionConfigurationResourceModel) setID() { + model.ID = model.TLSInspectionConfigurationARN +} + +type encryptionConfigurationModel struct { + KeyID types.String `tfsdk:"key_id"` + Type types.String `tfsdk:"type"` +} + +type tlsInspectionConfigurationModel struct { + ServerCertificateConfigurations fwtypes.ListNestedObjectValueOf[serverCertificateConfigurationModel] `tfsdk:"server_certificate_configuration"` +} + +type serverCertificateConfigurationModel struct { + CertificateAuthorityARN fwtypes.ARN `tfsdk:"certificate_authority_arn"` + CheckCertificateRevocationsStatus fwtypes.ListNestedObjectValueOf[checkCertificateRevocationStatusActionsModel] `tfsdk:"check_certificate_revocation_status"` + Scopes fwtypes.ListNestedObjectValueOf[serverCertificateScopeModel] `tfsdk:"scope"` + ServerCertificates fwtypes.ListNestedObjectValueOf[serverCertificateModel] `tfsdk:"server_certificate"` +} + +type checkCertificateRevocationStatusActionsModel struct { + RevokedStatusAction fwtypes.StringEnum[awstypes.RevocationCheckAction] `tfsdk:"revoked_status_action"` + UnknownStatusAction fwtypes.StringEnum[awstypes.RevocationCheckAction] `tfsdk:"unknown_status_action"` +} + +type serverCertificateScopeModel struct { + DestinationPorts fwtypes.ListNestedObjectValueOf[portRangeModel] `tfsdk:"destination_ports"` + Destinations fwtypes.ListNestedObjectValueOf[addressModel] `tfsdk:"destination"` + SourcePorts fwtypes.ListNestedObjectValueOf[portRangeModel] `tfsdk:"source_ports"` + Protocols fwtypes.SetValueOf[types.Int64] `tfsdk:"protocols"` + Sources fwtypes.ListNestedObjectValueOf[addressModel] `tfsdk:"source"` +} + +type portRangeModel struct { + FromPort types.Int64 `tfsdk:"from_port"` + ToPort types.Int64 `tfsdk:"to_port"` +} + +type addressModel struct { + AddressDefinition types.String `tfsdk:"address_definition"` +} + +type serverCertificateModel struct { + ResourceARN fwtypes.ARN `tfsdk:"resource_arn"` +} + +type tlsCertificateDataModel struct { + CertificateARN types.String `tfsdk:"certificate_arn"` + CertificateSerial types.String `tfsdk:"certificate_serial"` + Status types.String `tfsdk:"status"` + StatusMessage types.String `tfsdk:"status_message"` +} diff --git a/internal/service/networkfirewall/tls_inspection_configuration_test.go b/internal/service/networkfirewall/tls_inspection_configuration_test.go new file mode 100644 index 00000000000..6ce206e9bea --- /dev/null +++ b/internal/service/networkfirewall/tls_inspection_configuration_test.go @@ -0,0 +1,584 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkfirewall_test + +import ( + "context" + "fmt" + "testing" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/service/networkfirewall" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfnetworkfirewall "github.com/hashicorp/terraform-provider-aws/internal/service/networkfirewall" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkFirewallTLSInspectionConfiguration_basic(t *testing.T) { + ctx := acctest.Context(t) + var v networkfirewall.DescribeTLSInspectionConfigurationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + commonName := acctest.RandomDomain() + certificateDomainName := commonName.RandomSubdomain().String() + resourceName := "aws_networkfirewall_tls_inspection_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewall), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTLSInspectionConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTLSInspectionConfigurationConfig_basic(rName, commonName.String(), certificateDomainName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", regexache.MustCompile(`tls-configuration/+.`)), + resource.TestCheckNoResourceAttr(resourceName, "certificate_authority"), + resource.TestCheckResourceAttr(resourceName, "certificates.#", acctest.Ct1), + resource.TestCheckNoResourceAttr(resourceName, names.AttrDescription), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.key_id", "AWS_OWNED_KMS_KEY"), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.type", "AWS_OWNED_KMS_KEY"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, "number_of_associations"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.#", acctest.Ct1), + resource.TestCheckNoResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.certificate_authority_arn"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.check_certificate_revocation_status.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination.0.address_definition", "0.0.0.0/0"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.protocols.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.protocols.*", "6"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.server_certificate.#", acctest.Ct1), + resource.TestCheckResourceAttrSet(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.server_certificate.0.resource_arn"), + resource.TestCheckResourceAttrSet(resourceName, "tls_inspection_configuration_id"), + resource.TestCheckResourceAttrSet(resourceName, "update_token"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"tls_inspection_configuration", "update_token"}, + }, + }, + }) +} + +func TestAccNetworkFirewallTLSInspectionConfiguration_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v networkfirewall.DescribeTLSInspectionConfigurationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + commonName := acctest.RandomDomain() + certificateDomainName := commonName.RandomSubdomain().String() + resourceName := "aws_networkfirewall_tls_inspection_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewall), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTLSInspectionConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTLSInspectionConfigurationConfig_basic(rName, commonName.String(), certificateDomainName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfnetworkfirewall.ResourceTLSInspectionConfiguration, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccNetworkFirewallTLSInspectionConfiguration_tags(t *testing.T) { + ctx := acctest.Context(t) + var v networkfirewall.DescribeTLSInspectionConfigurationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + commonName := acctest.RandomDomain() + certificateDomainName := commonName.RandomSubdomain().String() + resourceName := "aws_networkfirewall_tls_inspection_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewall), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTLSInspectionConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTLSInspectionConfigurationConfig_tags1(rName, commonName.String(), certificateDomainName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"tls_inspection_configuration", "update_token"}, + }, + { + Config: testAccTLSInspectionConfigurationConfig_tags2(rName, commonName.String(), certificateDomainName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccTLSInspectionConfigurationConfig_tags1(rName, commonName.String(), certificateDomainName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + +func TestAccNetworkFirewallTLSInspectionConfiguration_encryptionConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var v networkfirewall.DescribeTLSInspectionConfigurationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + commonName := acctest.RandomDomain() + certificateDomainName := commonName.RandomSubdomain().String() + resourceName := "aws_networkfirewall_tls_inspection_configuration.test" + kmsKeyResourceName := "aws_kms_key.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewall), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTLSInspectionConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTLSInspectionConfigurationConfig_encryptionConfiguration(rName, commonName.String(), certificateDomainName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", regexache.MustCompile(`tls-configuration/+.`)), + resource.TestCheckNoResourceAttr(resourceName, "certificate_authority"), + resource.TestCheckResourceAttr(resourceName, "certificates.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "test"), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttrPair(resourceName, "encryption_configuration.0.key_id", kmsKeyResourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.type", "CUSTOMER_KMS"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, "number_of_associations"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.#", acctest.Ct1), + resource.TestCheckNoResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.certificate_authority_arn"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.check_certificate_revocation_status.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination.0.address_definition", "0.0.0.0/0"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.0.from_port", "443"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.0.to_port", "8080"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.protocols.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.protocols.*", "6"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source.0.address_definition", "10.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.0.from_port", "1024"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.0.to_port", "65534"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.server_certificate.#", acctest.Ct1), + resource.TestCheckResourceAttrSet(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.server_certificate.0.resource_arn"), + resource.TestCheckResourceAttrSet(resourceName, "tls_inspection_configuration_id"), + resource.TestCheckResourceAttrSet(resourceName, "update_token"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"tls_inspection_configuration", "update_token"}, + }, + }, + }) +} + +func TestAccNetworkFirewallTLSInspectionConfiguration_checkCertificateRevocationStatus(t *testing.T) { + ctx := acctest.Context(t) + var v networkfirewall.DescribeTLSInspectionConfigurationOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + commonName := acctest.RandomDomain() + certificateDomainName := commonName.RandomSubdomain().String() + resourceName := "aws_networkfirewall_tls_inspection_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkFirewall), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTLSInspectionConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTLSInspectionConfigurationConfig_checkCertificateRevocationStatus(rName, commonName.String(), certificateDomainName, "REJECT", "PASS"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", regexache.MustCompile(`tls-configuration/+.`)), + resource.TestCheckNoResourceAttr(resourceName, "certificate_authority"), + resource.TestCheckResourceAttr(resourceName, "certificates.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "test"), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.key_id", "AWS_OWNED_KMS_KEY"), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.type", "AWS_OWNED_KMS_KEY"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, "number_of_associations"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.#", acctest.Ct1), + resource.TestCheckNoResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.certificate_authority_arn"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.check_certificate_revocation_status.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.check_certificate_revocation_status.0.revoked_status_action", "REJECT"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.check_certificate_revocation_status.0.unknown_status_action", "PASS"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination.0.address_definition", "0.0.0.0/0"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.0.from_port", "443"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.0.to_port", "8080"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.protocols.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.protocols.*", "6"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source.0.address_definition", "10.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.0.from_port", "1024"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.0.to_port", "65534"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.server_certificate.#", acctest.Ct1), + resource.TestCheckResourceAttrSet(resourceName, "tls_inspection_configuration_id"), + resource.TestCheckResourceAttrSet(resourceName, "update_token"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"tls_inspection_configuration", "update_token"}, + }, + { + Config: testAccTLSInspectionConfigurationConfig_checkCertificateRevocationStatus(rName, commonName.String(), certificateDomainName, "DROP", "PASS"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTLSInspectionConfigurationExists(ctx, resourceName, &v), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "network-firewall", regexache.MustCompile(`tls-configuration/+.`)), + resource.TestCheckNoResourceAttr(resourceName, "certificate_authority"), + resource.TestCheckResourceAttr(resourceName, "certificates.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, "test"), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.key_id", "AWS_OWNED_KMS_KEY"), + resource.TestCheckResourceAttr(resourceName, "encryption_configuration.0.type", "AWS_OWNED_KMS_KEY"), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttrSet(resourceName, "number_of_associations"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.#", acctest.Ct1), + resource.TestCheckNoResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.certificate_authority_arn"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.check_certificate_revocation_status.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.check_certificate_revocation_status.0.revoked_status_action", "DROP"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.check_certificate_revocation_status.0.unknown_status_action", "PASS"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination.0.address_definition", "0.0.0.0/0"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.0.from_port", "443"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.destination_ports.0.to_port", "8080"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.protocols.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.protocols.*", "6"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source.0.address_definition", "10.0.0.0/8"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.0.from_port", "1024"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.scope.0.source_ports.0.to_port", "65534"), + resource.TestCheckResourceAttr(resourceName, "tls_inspection_configuration.0.server_certificate_configuration.0.server_certificate.#", acctest.Ct1), + resource.TestCheckResourceAttrSet(resourceName, "tls_inspection_configuration_id"), + resource.TestCheckResourceAttrSet(resourceName, "update_token"), + ), + }, + }, + }) +} + +func testAccCheckTLSInspectionConfigurationDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_networkfirewall_tls_inspection_configuration" { + continue + } + + _, err := tfnetworkfirewall.FindTLSInspectionConfigurationByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("NetworkFirewall TLS Inspection Configuration %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckTLSInspectionConfigurationExists(ctx context.Context, n string, v *networkfirewall.DescribeTLSInspectionConfigurationOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkFirewallClient(ctx) + + output, err := tfnetworkfirewall.FindTLSInspectionConfigurationByARN(ctx, conn, rs.Primary.ID) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccTLSInspectionConfigurationConfig_certificateBase(rName, commonName, certificateDomainName string) string { + return fmt.Sprintf(` +resource "aws_acmpca_certificate_authority" "test" { + permanent_deletion_time_in_days = 7 + type = "ROOT" + + certificate_authority_configuration { + key_algorithm = "RSA_4096" + signing_algorithm = "SHA512WITHRSA" + + subject { + common_name = %[2]q + } + } + + tags = { + Name = %[1]q + } +} + +resource "aws_acmpca_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + certificate_signing_request = aws_acmpca_certificate_authority.test.certificate_signing_request + signing_algorithm = "SHA512WITHRSA" + + template_arn = "arn:${data.aws_partition.current.partition}:acm-pca:::template/RootCACertificate/V1" + + validity { + type = "YEARS" + value = 2 + } +} + +resource "aws_acmpca_certificate_authority_certificate" "test" { + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + + certificate = aws_acmpca_certificate.test.certificate + certificate_chain = aws_acmpca_certificate.test.certificate_chain +} + +data "aws_partition" "current" {} + +resource "aws_acm_certificate" "test" { + domain_name = %[3]q + certificate_authority_arn = aws_acmpca_certificate_authority.test.arn + + tags = { + Name = %[1]q + } + + depends_on = [ + aws_acmpca_certificate_authority_certificate.test, + ] +} +`, rName, commonName, certificateDomainName) +} + +func testAccTLSInspectionConfigurationConfig_basic(rName, commonName, certificateDomainName string) string { + return acctest.ConfigCompose(testAccTLSInspectionConfigurationConfig_certificateBase(rName, commonName, certificateDomainName), fmt.Sprintf(` +resource "aws_networkfirewall_tls_inspection_configuration" "test" { + name = %[1]q + + tls_inspection_configuration { + server_certificate_configuration { + server_certificate { + resource_arn = aws_acm_certificate.test.arn + } + scope { + protocols = [6] + destination { + address_definition = "0.0.0.0/0" + } + } + } + } +} +`, rName)) +} + +func testAccTLSInspectionConfigurationConfig_tags1(rName, commonName, certificateDomainName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccTLSInspectionConfigurationConfig_certificateBase(rName, commonName, certificateDomainName), fmt.Sprintf(` +resource "aws_networkfirewall_tls_inspection_configuration" "test" { + name = %[1]q + + tls_inspection_configuration { + server_certificate_configuration { + server_certificate { + resource_arn = aws_acm_certificate.test.arn + } + scope { + protocols = [6] + destination { + address_definition = "0.0.0.0/0" + } + } + } + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccTLSInspectionConfigurationConfig_tags2(rName, commonName, certificateDomainName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccTLSInspectionConfigurationConfig_certificateBase(rName, commonName, certificateDomainName), fmt.Sprintf(` +resource "aws_networkfirewall_tls_inspection_configuration" "test" { + name = %[1]q + + tls_inspection_configuration { + server_certificate_configuration { + server_certificate { + resource_arn = aws_acm_certificate.test.arn + } + scope { + protocols = [6] + destination { + address_definition = "0.0.0.0/0" + } + } + } + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} + +func testAccTLSInspectionConfigurationConfig_encryptionConfiguration(rName, commonName, certificateDomainName string) string { + return acctest.ConfigCompose(testAccTLSInspectionConfigurationConfig_certificateBase(rName, commonName, certificateDomainName), fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 +} + +resource "aws_networkfirewall_tls_inspection_configuration" "test" { + name = %[1]q + description = "test" + + encryption_configuration { + key_id = aws_kms_key.test.arn + type = "CUSTOMER_KMS" + } + + tls_inspection_configuration { + server_certificate_configuration { + server_certificate { + resource_arn = aws_acm_certificate.test.arn + } + scope { + protocols = [6] + + destination { + address_definition = "0.0.0.0/0" + } + destination_ports { + from_port = 443 + to_port = 8080 + } + + source { + address_definition = "10.0.0.0/8" + } + source_ports { + from_port = 1024 + to_port = 65534 + } + } + } + } +} +`, rName)) +} + +func testAccTLSInspectionConfigurationConfig_checkCertificateRevocationStatus(rName, commonName, certificateDomainName, revokedStatusAction, unknownStatusAction string) string { + return acctest.ConfigCompose(testAccTLSInspectionConfigurationConfig_certificateBase(rName, commonName, certificateDomainName), fmt.Sprintf(` +resource "aws_networkfirewall_tls_inspection_configuration" "test" { + name = %[1]q + description = "test" + + encryption_configuration { + key_id = "AWS_OWNED_KMS_KEY" + type = "AWS_OWNED_KMS_KEY" + } + + tls_inspection_configuration { + server_certificate_configuration { + check_certificate_revocation_status { + revoked_status_action = %[2]q + unknown_status_action = %[3]q + } + server_certificate { + resource_arn = aws_acm_certificate.test.arn + } + scope { + protocols = [6] + + destination { + address_definition = "0.0.0.0/0" + } + destination_ports { + from_port = 443 + to_port = 8080 + } + + source { + address_definition = "10.0.0.0/8" + } + source_ports { + from_port = 1024 + to_port = 65534 + } + } + } + } +} +`, rName, revokedStatusAction, unknownStatusAction)) +} diff --git a/internal/service/networkmanager/core_network.go b/internal/service/networkmanager/core_network.go index 4f2339c3720..4cc8cbfddb1 100644 --- a/internal/service/networkmanager/core_network.go +++ b/internal/service/networkmanager/core_network.go @@ -625,18 +625,18 @@ func waitCoreNetworkPolicyCreated(ctx context.Context, conn *networkmanager.Netw // buildCoreNetworkBasePolicyDocument returns a base policy document func buildCoreNetworkBasePolicyDocument(regions []interface{}) (string, error) { - edgeLocations := make([]*CoreNetworkEdgeLocation, len(regions)) + edgeLocations := make([]*coreNetworkPolicyCoreNetworkEdgeLocation, len(regions)) for i, location := range regions { - edgeLocations[i] = &CoreNetworkEdgeLocation{Location: location.(string)} + edgeLocations[i] = &coreNetworkPolicyCoreNetworkEdgeLocation{Location: location.(string)} } - basePolicy := &CoreNetworkPolicyDoc{ + basePolicy := &coreNetworkPolicyDocument{ Version: "2021.12", - CoreNetworkConfiguration: &CoreNetworkPolicyCoreNetworkConfiguration{ - AsnRanges: CoreNetworkPolicyDecodeConfigStringList([]interface{}{"64512-65534"}), + CoreNetworkConfiguration: &coreNetworkPolicyCoreNetworkConfiguration{ + AsnRanges: coreNetworkPolicyExpandStringList([]interface{}{"64512-65534"}), EdgeLocations: edgeLocations, }, - Segments: []*CoreNetworkPolicySegment{ + Segments: []*coreNetworkPolicySegment{ { Name: "segment", Description: "base-policy", diff --git a/internal/service/networkmanager/core_network_policy_document_data_source.go b/internal/service/networkmanager/core_network_policy_document_data_source.go index 2e44cdad037..82130697fca 100644 --- a/internal/service/networkmanager/core_network_policy_document_data_source.go +++ b/internal/service/networkmanager/core_network_policy_document_data_source.go @@ -31,104 +31,17 @@ func DataSourceCoreNetworkPolicyDocument() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceCoreNetworkPolicyDocumentRead, + + // Order attributes to match model structures and documentation: + // https://docs.aws.amazon.com/network-manager/latest/cloudwan/cloudwan-policies-json.html. Schema: map[string]*schema.Schema{ - "attachment_policies": { - Type: schema.TypeList, + names.AttrVersion: { + Type: schema.TypeString, Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "condition_logic": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "and", - "or", - }, false), - }, - names.AttrDescription: { - Type: schema.TypeString, - Optional: true, - }, - "rule_number": { - Type: schema.TypeInt, - Required: true, - ValidateFunc: validation.IntBetween(1, 65535), - }, - - "conditions": { - Type: schema.TypeList, - Required: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - names.AttrType: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "account-id", - "any", - "tag-value", - "tag-exists", - "resource-id", - names.AttrRegion, - "attachment-type", - }, false), - }, - "operator": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "equals", - "not-equals", - "contains", - "begins-with", - }, false), - }, - names.AttrKey: { - Type: schema.TypeString, - Optional: true, - }, - names.AttrValue: { - Type: schema.TypeString, - Optional: true, - }, - }, - }, - }, - names.AttrAction: { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "association_method": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "tag", - "constant", - }, false), - }, - "segment": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z][0-9A-Za-z]{0,63}$`), - "must begin with a letter and contain only alphanumeric characters"), - }, - "tag_value_of_key": { - Type: schema.TypeString, - Optional: true, - }, - "require_acceptance": { - Type: schema.TypeBool, - Optional: true, - Default: false, - }, - }, - }, - }, - }, - }, + Default: "2021.12", + ValidateFunc: validation.StringInSlice([]string{ + "2021.12", + }, false), }, "core_network_configuration": { Type: schema.TypeList, @@ -142,11 +55,6 @@ func DataSourceCoreNetworkPolicyDocument() *schema.Resource { Type: schema.TypeString, }, }, - "vpn_ecmp_support": { - Type: schema.TypeBool, - Default: true, - Optional: true, - }, "inside_cidr_blocks": { Type: schema.TypeSet, Optional: true, @@ -154,6 +62,11 @@ func DataSourceCoreNetworkPolicyDocument() *schema.Resource { Type: schema.TypeString, }, }, + "vpn_ecmp_support": { + Type: schema.TypeBool, + Default: true, + Optional: true, + }, "edge_locations": { Type: schema.TypeList, Required: true, @@ -182,34 +95,12 @@ func DataSourceCoreNetworkPolicyDocument() *schema.Resource { }, }, }, - names.AttrJSON: { - Type: schema.TypeString, - Computed: true, - }, "segments": { Type: schema.TypeList, Required: true, MinItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "allow_filter": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z][0-9A-Za-z]{0,63}$`), - "must begin with a letter and contain only alphanumeric characters"), - }, - }, - "deny_filter": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z][0-9A-Za-z]{0,63}$`), - "must begin with a letter and contain only alphanumeric characters"), - }, - }, names.AttrName: { Type: schema.TypeString, Required: true, @@ -238,27 +129,90 @@ func DataSourceCoreNetworkPolicyDocument() *schema.Resource { Default: true, Optional: true, }, + "deny_filter": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z][0-9A-Za-z]{0,63}$`), + "must begin with a letter and contain only alphanumeric characters"), + }, + }, + "allow_filter": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z][0-9A-Za-z]{0,63}$`), + "must begin with a letter and contain only alphanumeric characters"), + }, + }, }, }, }, - "segment_actions": { + "network_function_groups": { Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + names.AttrName: { + Type: schema.TypeString, + Required: true, + }, names.AttrDescription: { Type: schema.TypeString, Optional: true, }, + "require_attachment_acceptance": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "segment_actions": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ names.AttrAction: { Type: schema.TypeString, Required: true, ValidateFunc: validation.StringInSlice([]string{ "share", "create-route", + "send-via", + "send-to", }, false), }, - + "segment": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z][0-9A-Za-z]{0,63}$`), + "must begin with a letter and contain only alphanumeric characters"), + }, + names.AttrMode: { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "attachment-route", + "single-hop", + "dual-hop", + }, false), + }, + "share_with": setOfString, + "share_with_except": setOfString, + "destination_cidr_blocks": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.Any( + verify.ValidIPv4CIDRNetworkAddress, + verify.ValidIPv6CIDRNetworkAddress, + ), + }, + }, "destinations": { Type: schema.TypeSet, Optional: true, @@ -273,42 +227,150 @@ func DataSourceCoreNetworkPolicyDocument() *schema.Resource { ), }, }, - "destination_cidr_blocks": { - Type: schema.TypeSet, + names.AttrDescription: { + Type: schema.TypeString, Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.Any( - verify.ValidIPv4CIDRNetworkAddress, - verify.ValidIPv6CIDRNetworkAddress, - ), + }, + "when_sent_to": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "segments": setOfString, + }, }, }, - names.AttrMode: { + "via": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_function_groups": setOfString, + "with_edge_override": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "edge_sets": setOfString, + "use_edge": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "attachment_policies": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_number": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 65535), + }, + names.AttrDescription: { + Type: schema.TypeString, + Optional: true, + }, + "condition_logic": { Type: schema.TypeString, Optional: true, ValidateFunc: validation.StringInSlice([]string{ - "attachment-route", + "and", + "or", }, false), }, - "segment": { - Type: schema.TypeString, + "conditions": { + Type: schema.TypeList, Required: true, - ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z][0-9A-Za-z]{0,63}$`), - "must begin with a letter and contain only alphanumeric characters"), + MinItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrType: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{ + "account-id", + "any", + "tag-value", + "tag-exists", + "resource-id", + names.AttrRegion, + "attachment-type", + }, false), + }, + "operator": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "equals", + "not-equals", + "contains", + "begins-with", + }, false), + }, + names.AttrKey: { + Type: schema.TypeString, + Optional: true, + }, + names.AttrValue: { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + names.AttrAction: { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "association_method": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "tag", + "constant", + }, false), + }, + "segment": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringMatch(regexache.MustCompile(`^[A-Za-z][0-9A-Za-z]{0,63}$`), + "must begin with a letter and contain only alphanumeric characters"), + }, + "tag_value_of_key": { + Type: schema.TypeString, + Optional: true, + }, + "require_acceptance": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "add_to_network_function_group": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, }, - "share_with": setOfString, - "share_with_except": setOfString, }, }, }, - names.AttrVersion: { + names.AttrJSON: { Type: schema.TypeString, - Optional: true, - Default: "2021.12", - ValidateFunc: validation.StringInSlice([]string{ - "2021.12", - }, false), + Computed: true, }, }, } @@ -316,42 +378,49 @@ func DataSourceCoreNetworkPolicyDocument() *schema.Resource { func dataSourceCoreNetworkPolicyDocumentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - mergedDoc := &CoreNetworkPolicyDoc{ + + mergedDoc := &coreNetworkPolicyDocument{ Version: d.Get(names.AttrVersion).(string), } // CoreNetworkConfiguration - networkConfiguration, err := expandDataCoreNetworkPolicyNetworkConfiguration(d.Get("core_network_configuration").([]interface{})) + networkConfiguration, err := expandCoreNetworkPolicyCoreNetworkConfiguration(d.Get("core_network_configuration").([]interface{})) if err != nil { - return sdkdiag.AppendErrorf(diags, "writing Network Manager Core Network Policy Document: %s", err) + return sdkdiag.AppendFromErr(diags, err) } mergedDoc.CoreNetworkConfiguration = networkConfiguration - // AttachmentPolicies - attachmentPolicies, err := expandDataCoreNetworkPolicyAttachmentPolicies(d.Get("attachment_policies").([]interface{})) + // Segments + segments, err := expandCoreNetworkPolicySegments(d.Get("segments").([]interface{})) if err != nil { - return sdkdiag.AppendErrorf(diags, "writing Network Manager Core Network Policy Document: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - mergedDoc.AttachmentPolicies = attachmentPolicies + mergedDoc.Segments = segments + + // NetworkFunctionGroups + networkFunctionGroups, err := expandCoreNetworkPolicyNetworkFunctionGroups(d.Get("network_function_groups").([]interface{})) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + mergedDoc.NetworkFunctionGroups = networkFunctionGroups // SegmentActions - segment_actions, err := expandDataCoreNetworkPolicySegmentActions(d.Get("segment_actions").([]interface{})) + segment_actions, err := expandCoreNetworkPolicySegmentActions(d.Get("segment_actions").([]interface{})) if err != nil { - return sdkdiag.AppendErrorf(diags, "writing Network Manager Core Network Policy Document: %s", err) + return sdkdiag.AppendFromErr(diags, err) } mergedDoc.SegmentActions = segment_actions - // Segments - segments, err := expandDataCoreNetworkPolicySegments(d.Get("segments").([]interface{})) + // AttachmentPolicies + attachmentPolicies, err := expandCoreNetworkPolicyAttachmentPolicies(d.Get("attachment_policies").([]interface{})) if err != nil { - return sdkdiag.AppendErrorf(diags, "writing Network Manager Core Network Policy Document: %s", err) + return sdkdiag.AppendFromErr(diags, err) } - mergedDoc.Segments = segments + mergedDoc.AttachmentPolicies = attachmentPolicies jsonDoc, err := json.MarshalIndent(mergedDoc, "", " ") if err != nil { - // should never happen if the above code is correct - return sdkdiag.AppendErrorf(diags, "writing Network Manager Core Network Policy Document: formatting JSON: %s", err) + return sdkdiag.AppendFromErr(diags, err) } jsonString := string(jsonDoc) @@ -361,290 +430,416 @@ func dataSourceCoreNetworkPolicyDocumentRead(ctx context.Context, d *schema.Reso return diags } -func expandDataCoreNetworkPolicySegmentActions(cfgSegmentActionsIntf []interface{}) ([]*CoreNetworkPolicySegmentAction, error) { - sgmtActions := make([]*CoreNetworkPolicySegmentAction, len(cfgSegmentActionsIntf)) - for i, sgmtActionI := range cfgSegmentActionsIntf { - cfgSA := sgmtActionI.(map[string]interface{}) - sgmtAction := &CoreNetworkPolicySegmentAction{} - action := cfgSA[names.AttrAction].(string) - sgmtAction.Action = action - var shareWith, shareWithExcept interface{} - - if action == "share" { - if mode, ok := cfgSA[names.AttrMode]; ok { - sgmtAction.Mode = mode.(string) +func expandCoreNetworkPolicySegmentActions(tfList []interface{}) ([]*coreNetworkPolicySegmentAction, error) { + apiObjects := make([]*coreNetworkPolicySegmentAction, 0) + + for i, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + apiObject := &coreNetworkPolicySegmentAction{} + + action := tfMap[names.AttrAction].(string) + apiObject.Action = action + switch action { + case "share": + if v, ok := tfMap["segment"]; ok { + apiObject.Segment = v.(string) } - if sgmt, ok := cfgSA["segment"]; ok { - sgmtAction.Segment = sgmt.(string) + if v, ok := tfMap[names.AttrMode]; ok { + apiObject.Mode = v.(string) } - if sW := cfgSA["share_with"].(*schema.Set).List(); len(sW) > 0 { - shareWith = CoreNetworkPolicyDecodeConfigStringList(sW) - sgmtAction.ShareWith = shareWith + var shareWith, shareWithExcept interface{} + + if v := tfMap["share_with"].(*schema.Set).List(); len(v) > 0 { + shareWith = coreNetworkPolicyExpandStringList(v) + apiObject.ShareWith = shareWith } - if sWE := cfgSA["share_with_except"].(*schema.Set).List(); len(sWE) > 0 { - shareWithExcept = CoreNetworkPolicyDecodeConfigStringList(sWE) - sgmtAction.ShareWithExcept = shareWithExcept + if v := tfMap["share_with_except"].(*schema.Set).List(); len(v) > 0 { + shareWithExcept = coreNetworkPolicyExpandStringList(v) + apiObject.ShareWithExcept = shareWithExcept } if (shareWith != nil && shareWithExcept != nil) || (shareWith == nil && shareWithExcept == nil) { - return nil, fmt.Errorf("You must specify only 1 of \"share_with\" or \"share_with_except\". See segment_actions[%s].", strconv.Itoa(i)) + return nil, fmt.Errorf(`you must specify only 1 of "share_with" or "share_with_except". See segment_actions[%d]`, i) } - } - if action == "create-route" { - if mode := cfgSA[names.AttrMode]; mode != "" { - return nil, fmt.Errorf("Cannot specify \"mode\" if action = \"create-route\". See segment_actions[%s].", strconv.Itoa(i)) + case "create-route": + if v, ok := tfMap["segment"]; ok { + apiObject.Segment = v.(string) } - if dest := cfgSA["destinations"].(*schema.Set).List(); len(dest) > 0 { - sgmtAction.Destinations = CoreNetworkPolicyDecodeConfigStringList(dest) + if v := tfMap[names.AttrMode]; v != "" { + return nil, fmt.Errorf(`you cannot specify "mode" if action = "create-route". See segment_actions[%d]`, i) } - if destCidrB := cfgSA["destination_cidr_blocks"].(*schema.Set).List(); len(destCidrB) > 0 { - sgmtAction.DestinationCidrBlocks = CoreNetworkPolicyDecodeConfigStringList(destCidrB) + if v := tfMap["destination_cidr_blocks"].(*schema.Set).List(); len(v) > 0 { + apiObject.DestinationCidrBlocks = coreNetworkPolicyExpandStringList(v) } - } - if sgmt, ok := cfgSA["segment"]; ok { - sgmtAction.Segment = sgmt.(string) + if v := tfMap["destinations"].(*schema.Set).List(); len(v) > 0 { + apiObject.Destinations = coreNetworkPolicyExpandStringList(v) + } + + if v, ok := tfMap[names.AttrDescription]; ok { + apiObject.Description = v.(string) + } + + case "send-via", "send-to": + if v, ok := tfMap["segment"]; ok { + apiObject.Segment = v.(string) + } + + if v, ok := tfMap[names.AttrMode]; ok { + apiObject.Mode = v.(string) + } + + if v, ok := tfMap["when_sent_to"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.WhenSentTo = &coreNetworkPolicySegmentActionWhenSentTo{} + + tfMap := v[0].(map[string]interface{}) + + if v := tfMap["segments"].(*schema.Set).List(); len(v) > 0 { + apiObject.WhenSentTo.Segments = coreNetworkPolicyExpandStringList(v) + } + } + + if v, ok := tfMap["via"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + apiObject.Via = &coreNetworkPolicySegmentActionVia{} + + tfMap := v[0].(map[string]interface{}) + + if v := tfMap["network_function_groups"].(*schema.Set).List(); len(v) > 0 { + apiObject.Via.NetworkFunctionGroups = coreNetworkPolicyExpandStringList(v) + } + + if v, ok := tfMap["with_edge_override"].([]interface{}); ok && len(v) > 0 { + apiObjects := []*coreNetworkPolicySegmentActionViaEdgeOverride{} + + for _, tfMapRaw := range v { + tfMap := tfMapRaw.(map[string]interface{}) + apiObject := &coreNetworkPolicySegmentActionViaEdgeOverride{} + + if v := tfMap["edge_sets"].(*schema.Set).List(); len(v) > 0 { + apiObject.EdgeSets = coreNetworkPolicyExpandStringList(v) + } + + if v, ok := tfMap["use_edge"]; ok { + apiObject.UseEdge = v.(string) + } + + apiObjects = append(apiObjects, apiObject) + } + + apiObject.Via.WithEdgeOverrides = apiObjects + } + } } - sgmtActions[i] = sgmtAction + apiObjects = append(apiObjects, apiObject) } - return sgmtActions, nil + + return apiObjects, nil } -func expandDataCoreNetworkPolicyAttachmentPolicies(cfgAttachmentPolicyIntf []interface{}) ([]*CoreNetworkAttachmentPolicy, error) { - aPolicies := make([]*CoreNetworkAttachmentPolicy, len(cfgAttachmentPolicyIntf)) - ruleMap := make(map[string]struct{}) +func expandCoreNetworkPolicyAttachmentPolicies(tfList []interface{}) ([]*coreNetworkPolicyAttachmentPolicy, error) { + apiObjects := make([]*coreNetworkPolicyAttachmentPolicy, 0) + ruleMap := make(map[int]struct{}) + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } - for i, polI := range cfgAttachmentPolicyIntf { - cfgPol := polI.(map[string]interface{}) - policy := &CoreNetworkAttachmentPolicy{} + apiObject := &coreNetworkPolicyAttachmentPolicy{} - rule := cfgPol["rule_number"].(int) - ruleStr := strconv.Itoa(rule) - if _, ok := ruleMap[ruleStr]; ok { - return nil, fmt.Errorf("duplicate Rule Number (%s). Remove the Rule Number or ensure the Rule Number is unique.", ruleStr) + if v, ok := tfMap["rule_number"].(int); ok { + if _, ok := ruleMap[v]; ok { + return nil, fmt.Errorf("duplicate Rule Number (%d). Remove the Rule Number or ensure the Rule Number is unique", v) + } + apiObject.RuleNumber = v + ruleMap[apiObject.RuleNumber] = struct{}{} } - policy.RuleNumber = rule - ruleMap[ruleStr] = struct{}{} - if desc, ok := cfgPol[names.AttrDescription]; ok { - policy.Description = desc.(string) + if v, ok := tfMap[names.AttrDescription].(string); ok && v != "" { + apiObject.Description = v } - if cL, ok := cfgPol["condition_logic"]; ok { - policy.ConditionLogic = cL.(string) + + if v, ok := tfMap["condition_logic"].(string); ok && v != "" { + apiObject.ConditionLogic = v } - action, err := expandDataCoreNetworkPolicyAttachmentPoliciesAction(cfgPol[names.AttrAction].([]interface{})) + action, err := expandDataCoreNetworkPolicyAttachmentPoliciesAction(tfMap[names.AttrAction].([]interface{})) if err != nil { - return nil, fmt.Errorf("Problem with attachment policy rule number (%s). See attachment_policy[%s].action: %q", ruleStr, strconv.Itoa(i), err) + return nil, err } - policy.Action = action + apiObject.Action = action - conditions, err := expandDataCoreNetworkPolicyAttachmentPoliciesConditions(cfgPol["conditions"].([]interface{})) + conditions, err := expandDataCoreNetworkPolicyAttachmentPoliciesConditions(tfMap["conditions"].([]interface{})) if err != nil { - return nil, fmt.Errorf("Problem with attachment policy rule number (%s). See attachment_policy[%s].conditions %q", ruleStr, strconv.Itoa(i), err) + return nil, err } - policy.Conditions = conditions + apiObject.Conditions = conditions - aPolicies[i] = policy + apiObjects = append(apiObjects, apiObject) } - // adjust - return aPolicies, nil + return apiObjects, nil } -func expandDataCoreNetworkPolicyAttachmentPoliciesConditions(tfList []interface{}) ([]*CoreNetworkAttachmentPolicyCondition, error) { - conditions := make([]*CoreNetworkAttachmentPolicyCondition, len(tfList)) +func expandDataCoreNetworkPolicyAttachmentPoliciesConditions(tfList []interface{}) ([]*coreNetworkPolicyAttachmentPolicyCondition, error) { + apiObjects := make([]*coreNetworkPolicyAttachmentPolicyCondition, 0) + + for i, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } - for i, condI := range tfList { - cfgCond := condI.(map[string]interface{}) - condition := &CoreNetworkAttachmentPolicyCondition{} + apiObject := &coreNetworkPolicyAttachmentPolicyCondition{} k := map[string]bool{ "operator": false, names.AttrKey: false, names.AttrValue: false, } - t := cfgCond[names.AttrType].(string) - condition.Type = t + typ := tfMap[names.AttrType].(string) + apiObject.Type = typ - if o := cfgCond["operator"]; o != "" { + if v, ok := tfMap["operator"].(string); ok && v != "" { k["operator"] = true - condition.Operator = o.(string) + apiObject.Operator = v } - if key := cfgCond[names.AttrKey]; key != "" { + + if v := tfMap[names.AttrKey].(string); ok && v != "" { k[names.AttrKey] = true - condition.Key = key.(string) + apiObject.Key = v } - if v := cfgCond[names.AttrValue]; v != "" { + + if v, ok := tfMap[names.AttrValue].(string); ok && v != "" { k[names.AttrValue] = true - condition.Value = v.(string) + apiObject.Value = v } - if t == "any" { + switch typ { + case "any": for _, key := range k { if key { - return nil, fmt.Errorf("Conditions %s: You cannot set \"operator\", \"key\", or \"value\" if type = \"any\".", strconv.Itoa(i)) + return nil, fmt.Errorf("Conditions %d: You cannot set \"operator\", \"key\", or \"value\" if type = \"any\".", i) } } - } - if t == "tag-exists" { + + case "tag-exists": if !k[names.AttrKey] || k["operator"] || k[names.AttrValue] { - return nil, fmt.Errorf("Conditions %s: You must set \"key\" and cannot set \"operator\", or \"value\" if type = \"tag-exists\".", strconv.Itoa(i)) + return nil, fmt.Errorf("Conditions %d: You must set \"key\" and cannot set \"operator\", or \"value\" if type = \"tag-exists\".", i) } - } - if t == "tag-value" { + + case "tag-value": if !k[names.AttrKey] || !k["operator"] || !k[names.AttrValue] { - return nil, fmt.Errorf("Conditions %s: You must set \"key\", \"operator\", and \"value\" if type = \"tag-value\".", strconv.Itoa(i)) + return nil, fmt.Errorf("Conditions %d: You must set \"key\", \"operator\", and \"value\" if type = \"tag-value\".", i) } - } - if t == names.AttrRegion || t == "resource-id" || t == "account-id" { + + case names.AttrRegion, "resource-id", "account-id": if k[names.AttrKey] || !k["operator"] || !k[names.AttrValue] { - return nil, fmt.Errorf("Conditions %s: You must set \"value\" and \"operator\" and cannot set \"key\" if type = \"region\", \"resource-id\", or \"account-id\".", strconv.Itoa(i)) + return nil, fmt.Errorf("Conditions %d: You must set \"value\" and \"operator\" and cannot set \"key\" if type = \"region\", \"resource-id\", or \"account-id\".", i) } - } - if t == "attachment-type" { - if k[names.AttrKey] || !k[names.AttrValue] || cfgCond["operator"].(string) != "equals" { - return nil, fmt.Errorf("Conditions %s: You must set \"value\", cannot set \"key\" and \"operator\" must be \"equals\" if type = \"attachment-type\".", strconv.Itoa(i)) + + case "attachment-type": + if k[names.AttrKey] || !k[names.AttrValue] || tfMap["operator"].(string) != "equals" { + return nil, fmt.Errorf("Conditions %d: You must set \"value\", cannot set \"key\" and \"operator\" must be \"equals\" if type = \"attachment-type\".", i) } } - conditions[i] = condition + + apiObjects = append(apiObjects, apiObject) } - return conditions, nil + + return apiObjects, nil } -func expandDataCoreNetworkPolicyAttachmentPoliciesAction(tfList []interface{}) (*CoreNetworkAttachmentPolicyAction, error) { - cfgAP := tfList[0].(map[string]interface{}) - assocMethod := cfgAP["association_method"].(string) - aP := &CoreNetworkAttachmentPolicyAction{ - AssociationMethod: assocMethod, +func expandDataCoreNetworkPolicyAttachmentPoliciesAction(tfList []interface{}) (*coreNetworkPolicyAttachmentPolicyAction, error) { + tfMap := tfList[0].(map[string]interface{}) + + associationMethod := tfMap["association_method"].(string) + apiObject := &coreNetworkPolicyAttachmentPolicyAction{ + AssociationMethod: associationMethod, } - if segment := cfgAP["segment"]; segment != "" { - if assocMethod == "tag" { - return nil, fmt.Errorf("Cannot set \"segment\" argument if association_method = \"tag\".") + if v, ok := tfMap["segment"].(string); ok && v != "" { + if associationMethod == "tag" { + return nil, fmt.Errorf(`cannot set "segment" argument if association_method = "tag"`) } - aP.Segment = segment.(string) + apiObject.Segment = v } - if tag := cfgAP["tag_value_of_key"]; tag != "" { - if assocMethod == "constant" { - return nil, fmt.Errorf("Cannot set \"tag_value_of_key\" argument if association_method = \"constant\".") + + if v, ok := tfMap["tag_value_of_key"].(string); ok && v != "" { + if associationMethod == "constant" { + return nil, fmt.Errorf(`cannot set "tag_value_of_key" argument if association_method = "constant"`) } - aP.TagValueOfKey = tag.(string) + apiObject.TagValueOfKey = v } - if acceptance, ok := cfgAP["require_acceptance"]; ok { - aP.RequireAcceptance = acceptance.(bool) + + if v, ok := tfMap["require_acceptance"].(bool); ok { + apiObject.RequireAcceptance = v + } + + if v, ok := tfMap["add_to_network_function_group"].(string); ok && v != "" { + apiObject.AddToNetworkFunctionGroup = v } - return aP, nil + + return apiObject, nil } -func expandDataCoreNetworkPolicySegments(cfgSgmtIntf []interface{}) ([]*CoreNetworkPolicySegment, error) { - Sgmts := make([]*CoreNetworkPolicySegment, len(cfgSgmtIntf)) +func expandCoreNetworkPolicySegments(tfList []interface{}) ([]*coreNetworkPolicySegment, error) { + apiObjects := make([]*coreNetworkPolicySegment, 0) nameMap := make(map[string]struct{}) - for i, sgmtI := range cfgSgmtIntf { - cfgSgmt := sgmtI.(map[string]interface{}) - sgmt := &CoreNetworkPolicySegment{} + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + apiObject := &coreNetworkPolicySegment{} - if name, ok := cfgSgmt[names.AttrName]; ok { - if _, ok := nameMap[name.(string)]; ok { - return nil, fmt.Errorf("duplicate Name (%s). Remove the Name or ensure the Name is unique.", name.(string)) + if v, ok := tfMap[names.AttrName].(string); ok { + if _, ok := nameMap[v]; ok { + return nil, fmt.Errorf("duplicate Name (%s). Remove the Name or ensure the Name is unique", v) } - sgmt.Name = name.(string) - if len(sgmt.Name) > 0 { - nameMap[sgmt.Name] = struct{}{} + apiObject.Name = v + if len(apiObject.Name) > 0 { + nameMap[apiObject.Name] = struct{}{} } } - if description, ok := cfgSgmt[names.AttrDescription]; ok { - sgmt.Description = description.(string) + + if v, ok := tfMap[names.AttrDescription].(string); ok && v != "" { + apiObject.Description = v } - if actions := cfgSgmt["allow_filter"].(*schema.Set).List(); len(actions) > 0 { - sgmt.AllowFilter = CoreNetworkPolicyDecodeConfigStringList(actions) + + if v := tfMap["allow_filter"].(*schema.Set).List(); len(v) > 0 { + apiObject.AllowFilter = coreNetworkPolicyExpandStringList(v) } - if actions := cfgSgmt["deny_filter"].(*schema.Set).List(); len(actions) > 0 { - sgmt.DenyFilter = CoreNetworkPolicyDecodeConfigStringList(actions) + + if v := tfMap["deny_filter"].(*schema.Set).List(); len(v) > 0 { + apiObject.DenyFilter = coreNetworkPolicyExpandStringList(v) } - if edgeLocations := cfgSgmt["edge_locations"].(*schema.Set).List(); len(edgeLocations) > 0 { - sgmt.EdgeLocations = CoreNetworkPolicyDecodeConfigStringList(edgeLocations) + + if v := tfMap["edge_locations"].(*schema.Set).List(); len(v) > 0 { + apiObject.EdgeLocations = coreNetworkPolicyExpandStringList(v) } - if b, ok := cfgSgmt["require_attachment_acceptance"]; ok { - sgmt.RequireAttachmentAcceptance = b.(bool) + + if v, ok := tfMap["require_attachment_acceptance"].(bool); ok { + apiObject.RequireAttachmentAcceptance = v } - if b, ok := cfgSgmt["isolate_attachments"]; ok { - sgmt.IsolateAttachments = b.(bool) + + if v, ok := tfMap["isolate_attachments"].(bool); ok { + apiObject.IsolateAttachments = v } - Sgmts[i] = sgmt + + apiObjects = append(apiObjects, apiObject) } - return Sgmts, nil + return apiObjects, nil } -func expandDataCoreNetworkPolicyNetworkConfiguration(networkCfgIntf []interface{}) (*CoreNetworkPolicyCoreNetworkConfiguration, error) { - m := networkCfgIntf[0].(map[string]interface{}) - - nc := &CoreNetworkPolicyCoreNetworkConfiguration{} +func expandCoreNetworkPolicyCoreNetworkConfiguration(tfList []interface{}) (*coreNetworkPolicyCoreNetworkConfiguration, error) { + tfMap := tfList[0].(map[string]interface{}) + apiObject := &coreNetworkPolicyCoreNetworkConfiguration{} - nc.AsnRanges = CoreNetworkPolicyDecodeConfigStringList(m["asn_ranges"].(*schema.Set).List()) + apiObject.AsnRanges = coreNetworkPolicyExpandStringList(tfMap["asn_ranges"].(*schema.Set).List()) - if cidrs := m["inside_cidr_blocks"].(*schema.Set).List(); len(cidrs) > 0 { - nc.InsideCidrBlocks = CoreNetworkPolicyDecodeConfigStringList(cidrs) + if v := tfMap["inside_cidr_blocks"].(*schema.Set).List(); len(v) > 0 { + apiObject.InsideCidrBlocks = coreNetworkPolicyExpandStringList(v) } - nc.VpnEcmpSupport = m["vpn_ecmp_support"].(bool) - - el, err := expandDataCoreNetworkPolicyNetworkConfigurationEdgeLocations(m["edge_locations"].([]interface{})) + apiObject.VpnEcmpSupport = tfMap["vpn_ecmp_support"].(bool) + el, err := expandDataCoreNetworkPolicyNetworkConfigurationEdgeLocations(tfMap["edge_locations"].([]interface{})) if err != nil { return nil, err } - nc.EdgeLocations = el + apiObject.EdgeLocations = el - return nc, nil + return apiObject, nil } -func expandDataCoreNetworkPolicyNetworkConfigurationEdgeLocations(tfList []interface{}) ([]*CoreNetworkEdgeLocation, error) { - edgeLocations := make([]*CoreNetworkEdgeLocation, len(tfList)) - locMap := make(map[string]struct{}) - - for i, edgeLocationsRaw := range tfList { - cfgEdgeLocation, ok := edgeLocationsRaw.(map[string]interface{}) - edgeLocation := &CoreNetworkEdgeLocation{} +func expandDataCoreNetworkPolicyNetworkConfigurationEdgeLocations(tfList []interface{}) ([]*coreNetworkPolicyCoreNetworkEdgeLocation, error) { + apiObjects := make([]*coreNetworkPolicyCoreNetworkEdgeLocation, 0) + locationMap := make(map[string]struct{}) + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } - location := cfgEdgeLocation[names.AttrLocation].(string) + apiObject := &coreNetworkPolicyCoreNetworkEdgeLocation{} - if _, ok := locMap[location]; ok { - return nil, fmt.Errorf("duplicate Location (%s). Remove the Location or ensure the Location is unique.", location) - } - edgeLocation.Location = location - if len(edgeLocation.Location) > 0 { - locMap[edgeLocation.Location] = struct{}{} + if v, ok := tfMap[names.AttrLocation].(string); ok { + if _, ok := locationMap[v]; ok { + return nil, fmt.Errorf("duplicate Location (%s). Remove the Location or ensure the Location is unique", v) + } + apiObject.Location = v + if len(apiObject.Location) > 0 { + locationMap[apiObject.Location] = struct{}{} + } } - if v, ok := cfgEdgeLocation["asn"].(string); ok && v != "" { + if v, ok := tfMap["asn"].(string); ok && v != "" { v, err := strconv.ParseInt(v, 10, 64) - if err != nil { return nil, err } + apiObject.Asn = v + } - edgeLocation.Asn = v + if v := tfMap["inside_cidr_blocks"].([]interface{}); len(v) > 0 { + apiObject.InsideCidrBlocks = coreNetworkPolicyExpandStringList(v) } - if cidrs := cfgEdgeLocation["inside_cidr_blocks"].([]interface{}); len(cidrs) > 0 { - edgeLocation.InsideCidrBlocks = CoreNetworkPolicyDecodeConfigStringList(cidrs) + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects, nil +} + +func expandCoreNetworkPolicyNetworkFunctionGroups(tfList []interface{}) ([]*coreNetworkPolicyNetworkFunctionGroup, error) { + apiObjects := make([]*coreNetworkPolicyNetworkFunctionGroup, 0) + nameMap := make(map[string]struct{}) + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue } - edgeLocations[i] = edgeLocation + apiObject := &coreNetworkPolicyNetworkFunctionGroup{} + + if v, ok := tfMap[names.AttrName].(string); ok { + if _, ok := nameMap[v]; ok { + return nil, fmt.Errorf("duplicate Name (%s). Remove the Name or ensure the Name is unique", v) + } + apiObject.Name = v + if len(apiObject.Name) > 0 { + nameMap[apiObject.Name] = struct{}{} + } + } + + if v, ok := tfMap[names.AttrDescription].(string); ok && v != "" { + apiObject.Description = v + } + + if v, ok := tfMap["require_attachment_acceptance"].(bool); ok { + apiObject.RequireAttachmentAcceptance = v + } + + apiObjects = append(apiObjects, apiObject) } - return edgeLocations, nil + + return apiObjects, nil } diff --git a/internal/service/networkmanager/core_network_policy_document_data_source_test.go b/internal/service/networkmanager/core_network_policy_document_data_source_test.go index 25b2be03052..085ba666f06 100644 --- a/internal/service/networkmanager/core_network_policy_document_data_source_test.go +++ b/internal/service/networkmanager/core_network_policy_document_data_source_test.go @@ -12,9 +12,6 @@ import ( ) func TestAccNetworkManagerCoreNetworkPolicyDocumentDataSource_basic(t *testing.T) { - // This really ought to be able to be a unit test rather than an - // acceptance test, but just instantiating the AWS provider requires - // some AWS API calls, and so this needs valid AWS credentials to work. ctx := acctest.Context(t) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -24,9 +21,24 @@ func TestAccNetworkManagerCoreNetworkPolicyDocumentDataSource_basic(t *testing.T { Config: testAccCoreNetworkPolicyDocumentDataSourceConfig_basic, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("data.aws_networkmanager_core_network_policy_document.test", names.AttrJSON, - testAccPolicyDocumentExpectedJSON(), - ), + acctest.CheckResourceAttrEquivalentJSON("data.aws_networkmanager_core_network_policy_document.test", names.AttrJSON, testAccPolicyDocumentBasicExpectedJSON), + ), + }, + }, + }) +} + +func TestAccNetworkManagerCoreNetworkPolicyDocumentDataSource_serviceInsertion(t *testing.T) { + ctx := acctest.Context(t) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkManagerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccCoreNetworkPolicyDocumentDataSourceConfig_serviceInsertion, + Check: resource.ComposeTestCheckFunc( + acctest.CheckResourceAttrEquivalentJSON("data.aws_networkmanager_core_network_policy_document.test", names.AttrJSON, testAccPolicyDocumentServiceInsertionExpectedJSON), ), }, }, @@ -34,7 +46,7 @@ func TestAccNetworkManagerCoreNetworkPolicyDocumentDataSource_basic(t *testing.T } // lintignore:AWSAT003 -var testAccCoreNetworkPolicyDocumentDataSourceConfig_basic = ` +const testAccCoreNetworkPolicyDocumentDataSourceConfig_basic = ` data "aws_networkmanager_core_network_policy_document" "test" { core_network_configuration { vpn_ecmp_support = false @@ -144,7 +156,7 @@ data "aws_networkmanager_core_network_policy_document" "test" { action = "share" mode = "attachment-route" segment = "GoodSegmentSpecification" - share_with_except = [ + share_with = [ "a", "b", "c" @@ -276,8 +288,7 @@ data "aws_networkmanager_core_network_policy_document" "test" { ` // lintignore:AWSAT003 -func testAccPolicyDocumentExpectedJSON() string { - return `{ +const testAccPolicyDocumentBasicExpectedJSON = `{ "version": "2021.12", "core-network-configuration": { "asn-ranges": [ @@ -494,11 +505,13 @@ func testAccPolicyDocumentExpectedJSON() string { "action": "share", "mode": "attachment-route", "segment": "AnotherGoodSegmentSpecification", - "share-with": [ - "c", - "b", - "a" - ] + "share-with": { + "except": [ + "c", + "b", + "a" + ] + } }, { "action": "share", @@ -510,6 +523,160 @@ func testAccPolicyDocumentExpectedJSON() string { "a" ] } - ] + ], + "network-function-groups": [] }` + +// lintignore:AWSAT003 +const testAccCoreNetworkPolicyDocumentDataSourceConfig_serviceInsertion = ` +data "aws_networkmanager_core_network_policy_document" "test" { + core_network_configuration { + vpn_ecmp_support = true + asn_ranges = [ + "64512-65534" + ] + inside_cidr_blocks = [ + "10.0.0.0/16" + ] + edge_locations { + location = "us-east-2" + } + edge_locations { + location = "us-west-2" + } + } + + segments { + name = "development" + require_attachment_acceptance = true + isolate_attachments = true + edge_locations = [ + "us-east-2" + ] + } + + segments { + name = "production" + require_attachment_acceptance = true + isolate_attachments = true + edge_locations = [ + "us-east-2" + ] + } + + segment_actions { + action = "send-via" + segment = "development" + mode = "single-hop" + + when_sent_to { + segments = [ + "production", + ] + } + + via { + network_function_groups = ["InspectionVPC"] + } + } + + attachment_policies { + rule_number = 125 + condition_logic = "and" + + conditions { + type = "tag-exists" + key = "InspectionVpcs" + } + + action { + add_to_network_function_group = "InspectionVPC" + } + } + + network_function_groups { + name = "InspectionVPC" + description = "Route segment traffic to the inspection VPC" + require_attachment_acceptance = true + } } +` + +// lintignore:AWSAT003 +const testAccPolicyDocumentServiceInsertionExpectedJSON = `{ + "version": "2021.12", + "core-network-configuration": { + "vpn-ecmp-support": true, + "inside-cidr-blocks": [ + "10.0.0.0/16" + ], + "asn-ranges": [ + "64512-65534" + ], + "edge-locations": [ + { + "location": "us-east-2" + }, + { + "location": "us-west-2" + } + ] + }, + "segments": [ + { + "name": "development", + "edge-locations": [ + "us-east-2" + ], + "require-attachment-acceptance": true, + "isolate-attachments": true + }, + { + "name": "production", + "edge-locations": [ + "us-east-2" + ], + "require-attachment-acceptance": true, + "isolate-attachments": true + } + ], + "network-function-groups": [ + { + "name": "InspectionVPC", + "description": "Route segment traffic to the inspection VPC", + "require-attachment-acceptance": true + } + ], + "segment-actions": [ + { + "action": "send-via", + "segment": "development", + "mode": "single-hop", + "when-sent-to": { + "segments": [ + "production" + ] + }, + "via": { + "network-function-groups": [ + "InspectionVPC" + ] + } + } + ], + "attachment-policies": [ + { + "rule-number": 125, + "condition-logic": "and", + "conditions": [ + { + "type": "tag-exists", + "key": "InspectionVpcs" + } + ], + "action": { + "add-to-network-function-group": "InspectionVPC" + } + } + ] +}` diff --git a/internal/service/networkmanager/core_network_policy_model.go b/internal/service/networkmanager/core_network_policy_model.go index daf1a732a0f..8db896a38d3 100644 --- a/internal/service/networkmanager/core_network_policy_model.go +++ b/internal/service/networkmanager/core_network_policy_model.go @@ -6,88 +6,112 @@ package networkmanager import ( "encoding/json" "sort" + + "github.com/hashicorp/terraform-provider-aws/internal/flex" ) -type CoreNetworkPolicyDoc struct { +type coreNetworkPolicyDocument struct { Version string `json:"version,omitempty"` - CoreNetworkConfiguration *CoreNetworkPolicyCoreNetworkConfiguration `json:"core-network-configuration"` - Segments []*CoreNetworkPolicySegment `json:"segments"` - AttachmentPolicies []*CoreNetworkAttachmentPolicy `json:"attachment-policies,omitempty"` - SegmentActions []*CoreNetworkPolicySegmentAction `json:"segment-actions,omitempty"` -} - -type CoreNetworkPolicySegmentAction struct { - Action string `json:"action"` - Destinations interface{} `json:"destinations,omitempty"` - DestinationCidrBlocks interface{} `json:"destination-cidr-blocks,omitempty"` - Mode string `json:"mode,omitempty"` - Segment string `json:"segment,omitempty"` - ShareWith interface{} `json:"share-with,omitempty"` - ShareWithExcept interface{} `json:",omitempty"` -} - -type CoreNetworkAttachmentPolicy struct { - RuleNumber int `json:"rule-number,omitempty"` - Action *CoreNetworkAttachmentPolicyAction `json:"action"` - Conditions []*CoreNetworkAttachmentPolicyCondition `json:"conditions"` - Description string `json:"description,omitempty"` - ConditionLogic string `json:"condition-logic,omitempty"` + CoreNetworkConfiguration *coreNetworkPolicyCoreNetworkConfiguration `json:"core-network-configuration"` + Segments []*coreNetworkPolicySegment `json:"segments"` + NetworkFunctionGroups []*coreNetworkPolicyNetworkFunctionGroup `json:"network-function-groups"` + SegmentActions []*coreNetworkPolicySegmentAction `json:"segment-actions,omitempty"` + AttachmentPolicies []*coreNetworkPolicyAttachmentPolicy `json:"attachment-policies,omitempty"` } -type CoreNetworkAttachmentPolicyAction struct { - AssociationMethod string `json:"association-method,omitempty"` - Segment string `json:"segment,omitempty"` - TagValueOfKey string `json:"tag-value-of-key,omitempty"` - RequireAcceptance bool `json:"require-acceptance,omitempty"` +type coreNetworkPolicyCoreNetworkConfiguration struct { + AsnRanges interface{} `json:"asn-ranges"` + InsideCidrBlocks interface{} `json:"inside-cidr-blocks,omitempty"` + VpnEcmpSupport bool `json:"vpn-ecmp-support"` + EdgeLocations []*coreNetworkPolicyCoreNetworkEdgeLocation `json:"edge-locations,omitempty"` } -type CoreNetworkAttachmentPolicyCondition struct { - Type string `json:"type,omitempty"` - Operator string `json:"operator,omitempty"` - Key string `json:"key,omitempty"` - Value string `json:"value,omitempty"` +type coreNetworkPolicyCoreNetworkEdgeLocation struct { + Location string `json:"location"` + Asn int64 `json:"asn,omitempty"` + InsideCidrBlocks interface{} `json:"inside-cidr-blocks,omitempty"` } -type CoreNetworkPolicySegment struct { +type coreNetworkPolicySegment struct { Name string `json:"name"` Description string `json:"description,omitempty"` - AllowFilter interface{} `json:"allow-filter,omitempty"` - DenyFilter interface{} `json:"deny-filter,omitempty"` EdgeLocations interface{} `json:"edge-locations,omitempty"` IsolateAttachments bool `json:"isolate-attachments"` RequireAttachmentAcceptance bool `json:"require-attachment-acceptance"` + DenyFilter interface{} `json:"deny-filter,omitempty"` + AllowFilter interface{} `json:"allow-filter,omitempty"` } -type CoreNetworkPolicyCoreNetworkConfiguration struct { - AsnRanges interface{} `json:"asn-ranges"` - VpnEcmpSupport bool `json:"vpn-ecmp-support"` - EdgeLocations []*CoreNetworkEdgeLocation `json:"edge-locations,omitempty"` - InsideCidrBlocks interface{} `json:"inside-cidr-blocks,omitempty"` +type coreNetworkPolicyNetworkFunctionGroup struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + RequireAttachmentAcceptance bool `json:"require-attachment-acceptance"` } -type CoreNetworkEdgeLocation struct { - Location string `json:"location"` - Asn int64 `json:"asn,omitempty"` - InsideCidrBlocks interface{} `json:"inside-cidr-blocks,omitempty"` +type coreNetworkPolicySegmentAction struct { + Action string `json:"action"` + Segment string `json:"segment,omitempty"` + Mode string `json:"mode,omitempty"` + ShareWith interface{} `json:"share-with,omitempty"` + ShareWithExcept interface{} `json:",omitempty"` + DestinationCidrBlocks interface{} `json:"destination-cidr-blocks,omitempty"` + Destinations interface{} `json:"destinations,omitempty"` + Description string `json:"description,omitempty"` + WhenSentTo *coreNetworkPolicySegmentActionWhenSentTo `json:"when-sent-to,omitempty"` + Via *coreNetworkPolicySegmentActionVia `json:"via,omitempty"` } -func (c CoreNetworkPolicySegmentAction) MarshalJSON() ([]byte, error) { - type Alias CoreNetworkPolicySegmentAction +type coreNetworkPolicySegmentActionWhenSentTo struct { + Segments interface{} `json:"segments,omitempty"` +} - var share interface{} +type coreNetworkPolicySegmentActionVia struct { + NetworkFunctionGroups interface{} `json:"network-function-groups,omitempty"` + WithEdgeOverrides []*coreNetworkPolicySegmentActionViaEdgeOverride `json:"with-edge-overrides,omitempty"` +} +type coreNetworkPolicySegmentActionViaEdgeOverride struct { + EdgeSets interface{} `json:"edge-sets,omitempty"` + UseEdge string `json:"use-edge,omitempty"` +} + +type coreNetworkPolicyAttachmentPolicy struct { + RuleNumber int `json:"rule-number,omitempty"` + Description string `json:"description,omitempty"` + ConditionLogic string `json:"condition-logic,omitempty"` + Conditions []*coreNetworkPolicyAttachmentPolicyCondition `json:"conditions"` + Action *coreNetworkPolicyAttachmentPolicyAction `json:"action"` +} - if c.ShareWith != nil { - sWIntf := c.ShareWith.([]string) +type coreNetworkPolicyAttachmentPolicyCondition struct { + Type string `json:"type,omitempty"` + Operator string `json:"operator,omitempty"` + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` +} - if sWIntf[0] == "*" { - share = sWIntf[0] +type coreNetworkPolicyAttachmentPolicyAction struct { + AssociationMethod string `json:"association-method,omitempty"` + Segment string `json:"segment,omitempty"` + TagValueOfKey string `json:"tag-value-of-key,omitempty"` + RequireAcceptance bool `json:"require-acceptance,omitempty"` + AddToNetworkFunctionGroup string `json:"add-to-network-function-group,omitempty"` +} + +func (c coreNetworkPolicySegmentAction) MarshalJSON() ([]byte, error) { + type Alias coreNetworkPolicySegmentAction + var share interface{} + + if v := c.ShareWith; v != nil { + v := v.([]string) + if v[0] == "*" { + share = v[0] } else { - share = sWIntf + share = v + } + } else if v := c.ShareWithExcept; v != nil { + share = map[string]interface{}{ + "except": v.([]string), } - } - - if c.ShareWithExcept != nil { - share = c.ShareWithExcept.([]string) } return json.Marshal(&Alias{ @@ -97,14 +121,14 @@ func (c CoreNetworkPolicySegmentAction) MarshalJSON() ([]byte, error) { DestinationCidrBlocks: c.DestinationCidrBlocks, Segment: c.Segment, ShareWith: share, + Via: c.Via, + WhenSentTo: c.WhenSentTo, }) } -func CoreNetworkPolicyDecodeConfigStringList(lI []interface{}) interface{} { - ret := make([]string, len(lI)) - for i, vI := range lI { - ret[i] = vI.(string) - } - sort.Sort(sort.Reverse(sort.StringSlice(ret))) - return ret +func coreNetworkPolicyExpandStringList(configured []interface{}) interface{} { + vs := flex.ExpandStringValueList(configured) + sort.Sort(sort.Reverse(sort.StringSlice(vs))) + + return vs } diff --git a/internal/service/networkmanager/service_endpoint_resolver_gen.go b/internal/service/networkmanager/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..85d9eaab057 --- /dev/null +++ b/internal/service/networkmanager/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package networkmanager + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/networkmanager/service_endpoints_gen_test.go b/internal/service/networkmanager/service_endpoints_gen_test.go index f309aa0f608..22e63eca7d6 100644 --- a/internal/service/networkmanager/service_endpoints_gen_test.go +++ b/internal/service/networkmanager/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(networkmanager_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(networkmanager_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/networkmanager/service_package_gen.go b/internal/service/networkmanager/service_package_gen.go index 503cb4d0005..660c9593902 100644 --- a/internal/service/networkmanager/service_package_gen.go +++ b/internal/service/networkmanager/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package networkmanager @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" networkmanager_sdkv1 "github.com/aws/aws-sdk-go/service/networkmanager" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -214,11 +213,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*n "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return networkmanager_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/networkmonitor/exports_test.go b/internal/service/networkmonitor/exports_test.go new file mode 100644 index 00000000000..c7e8a69c051 --- /dev/null +++ b/internal/service/networkmonitor/exports_test.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkmonitor + +// Exports for use in tests only. +var ( + ResourceMonitor = newMonitorResource + ResourceProbe = newProbeResource + + FindMonitorByName = findMonitorByName + FindProbeByTwoPartKey = findProbeByTwoPartKey +) diff --git a/internal/service/networkmonitor/generate.go b/internal/service/networkmonitor/generate.go new file mode 100644 index 00000000000..5c92af04d47 --- /dev/null +++ b/internal/service/networkmonitor/generate.go @@ -0,0 +1,8 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +//go:generate go run ../../generate/servicepackage/main.go +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -SkipTypesImp -ServiceTagsMap -ListTags -UpdateTags +// ONLY generate directives and package declaration! Do not add anything else to this file. + +package networkmonitor diff --git a/internal/service/networkmonitor/monitor.go b/internal/service/networkmonitor/monitor.go new file mode 100644 index 00000000000..c47a7836865 --- /dev/null +++ b/internal/service/networkmonitor/monitor.go @@ -0,0 +1,330 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkmonitor + +import ( + "context" + "fmt" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkmonitor" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmonitor/types" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource(name="Monitor") +// @Tags(identifierAttribute="arn") +func newMonitorResource(context.Context) (resource.ResourceWithConfigure, error) { + return &monitorResource{}, nil +} + +type monitorResource struct { + framework.ResourceWithConfigure + framework.WithImportByID +} + +func (*monitorResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_networkmonitor_monitor" +} + +func (r *monitorResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "aggregation_period": schema.Int64Attribute{ + Optional: true, + Validators: []validator.Int64{ + int64validator.OneOf(30, 60), + }, + }, + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrID: framework.IDAttribute(), + "monitor_name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexache.MustCompile("[a-zA-Z0-9_-]+"), "Must match [a-zA-Z0-9_-]+"), + stringvalidator.LengthBetween(1, 255), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + } +} + +func (r *monitorResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data monitorResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkMonitorClient(ctx) + + name := data.MonitorName.ValueString() + input := &networkmonitor.CreateMonitorInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, input)...) + if response.Diagnostics.HasError() { + return + } + + input.ClientToken = aws.String(id.UniqueId()) + input.Tags = getTagsIn(ctx) + + output, err := conn.CreateMonitor(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("creating CloudWatch Network Monitor Monitor (%s)", name), err.Error()) + + return + } + + // Set values for unknowns. + data.MonitorARN = fwflex.StringToFramework(ctx, output.MonitorArn) + data.setID() + + if _, err := waitMonitorReady(ctx, conn, data.ID.ValueString()); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for CloudWatch Network Monitor Monitor (%s) create", data.ID.ValueString()), err.Error()) + + return + } + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *monitorResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data monitorResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + + return + } + + conn := r.Meta().NetworkMonitorClient(ctx) + + output, err := findMonitorByName(ctx, conn, data.ID.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading CloudWatch Network Monitor Monitor (%s)", data.ID.ValueString()), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + setTagsOut(ctx, output.Tags) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *monitorResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new monitorResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkMonitorClient(ctx) + + if !new.AggregationPeriod.Equal(old.AggregationPeriod) { + input := &networkmonitor.UpdateMonitorInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, new, input)...) + if response.Diagnostics.HasError() { + return + } + + _, err := conn.UpdateMonitor(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating CloudWatch Network Monitor Monitor (%s)", new.ID.ValueString()), err.Error()) + + return + } + + if _, err := waitMonitorReady(ctx, conn, new.ID.ValueString()); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for CloudWatch Network Monitor Monitor (%s) update", new.ID.ValueString()), err.Error()) + + return + } + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *monitorResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data monitorResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkMonitorClient(ctx) + + _, err := conn.DeleteMonitor(ctx, &networkmonitor.DeleteMonitorInput{ + MonitorName: fwflex.StringFromFramework(ctx, data.ID), + }) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting CloudWatch Network Monitor Monitor (%s)", data.ID.ValueString()), err.Error()) + + return + } + + if _, err := waitMonitorDeleted(ctx, conn, data.ID.ValueString()); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for CloudWatch Network Monitor Monitor (%s) delete", data.ID.ValueString()), err.Error()) + + return + } +} + +func (r *monitorResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func findMonitorByName(ctx context.Context, conn *networkmonitor.Client, name string) (*networkmonitor.GetMonitorOutput, error) { + input := &networkmonitor.GetMonitorInput{ + MonitorName: aws.String(name), + } + + output, err := conn.GetMonitor(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusMonitor(ctx context.Context, conn *networkmonitor.Client, name string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findMonitorByName(ctx, conn, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil + } +} + +func waitMonitorReady(ctx context.Context, conn *networkmonitor.Client, name string) (*networkmonitor.GetMonitorOutput, error) { //nolint:unparam + const ( + timeout = time.Minute * 10 + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.MonitorStatePending), + Target: enum.Slice(awstypes.MonitorStateActive, awstypes.MonitorStateInactive), + Refresh: statusMonitor(ctx, conn, name), + Timeout: timeout, + MinTimeout: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkmonitor.GetMonitorOutput); ok { + return output, err + } + + return nil, err +} + +func waitMonitorDeleted(ctx context.Context, conn *networkmonitor.Client, name string) (*networkmonitor.GetMonitorOutput, error) { + const ( + timeout = time.Minute * 10 + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.MonitorStateDeleting, awstypes.MonitorStateActive, awstypes.MonitorStateInactive), + Target: []string{}, + Refresh: statusMonitor(ctx, conn, name), + Timeout: timeout, + MinTimeout: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkmonitor.GetMonitorOutput); ok { + return output, err + } + + return nil, err +} + +type monitorResourceModel struct { + AggregationPeriod types.Int64 `tfsdk:"aggregation_period"` + ID types.String `tfsdk:"id"` + MonitorARN types.String `tfsdk:"arn"` + MonitorName types.String `tfsdk:"monitor_name"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` +} + +func (model *monitorResourceModel) InitFromID() error { + model.MonitorName = model.ID + + return nil +} + +func (model *monitorResourceModel) setID() { + model.ID = model.MonitorName +} diff --git a/internal/service/networkmonitor/monitor_test.go b/internal/service/networkmonitor/monitor_test.go new file mode 100644 index 00000000000..17be63198f2 --- /dev/null +++ b/internal/service/networkmonitor/monitor_test.go @@ -0,0 +1,200 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkmonitor_test + +import ( + "context" + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfnetworkmonitor "github.com/hashicorp/terraform-provider-aws/internal/service/networkmonitor" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkMonitorMonitor_basic(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMonitorDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMonitorConfig_basic(rName, 30), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMonitorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "aggregation_period", "30"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccMonitorConfig_basic(rName, 60), + Check: resource.ComposeTestCheckFunc( + testAccCheckMonitorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "aggregation_period", "60"), + ), + }, + }, + }) +} + +func TestAccNetworkMonitorMonitor_tags(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMonitorDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMonitorConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckMonitorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccMonitorConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckMonitorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccMonitorConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckMonitorExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + }, + }) +} + +func TestAccNetworkMonitorMonitor_disappears(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_networkmonitor_monitor.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckMonitorDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccMonitorConfig_basic(rName, 30), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckMonitorExists(ctx, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfnetworkmonitor.ResourceMonitor, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckMonitorDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkMonitorClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_networkmonitor_monitor" { + continue + } + + _, err := tfnetworkmonitor.FindMonitorByName(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("CloudWatch Network Monitor Monitor %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckMonitorExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkMonitorClient(ctx) + + _, err := tfnetworkmonitor.FindMonitorByName(ctx, conn, rs.Primary.ID) + + return err + } +} + +func testAccMonitorConfig_basic(rName string, aggregation int) string { + return fmt.Sprintf(` +resource "aws_networkmonitor_monitor" "test" { + aggregation_period = %[2]d + monitor_name = %[1]q +} +`, rName, aggregation) +} + +func testAccMonitorConfig_tags1(rName, tagKey1, tagValue1 string) string { + return fmt.Sprintf(` +resource "aws_networkmonitor_monitor" "test" { + aggregation_period = 30 + monitor_name = %[1]q + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1) +} + +func testAccMonitorConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +resource "aws_networkmonitor_monitor" "test" { + aggregation_period = 30 + monitor_name = %[1]q + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2) +} diff --git a/internal/service/networkmonitor/probe.go b/internal/service/networkmonitor/probe.go new file mode 100644 index 00000000000..7d11f838386 --- /dev/null +++ b/internal/service/networkmonitor/probe.go @@ -0,0 +1,432 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkmonitor + +import ( + "context" + "fmt" + "time" + + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkmonitor" + awstypes "github.com/aws/aws-sdk-go-v2/service/networkmonitor/types" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + fwflex "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkResource(name="Probe") +// @Tags(identifierAttribute="arn") +func newProbeResource(context.Context) (resource.ResourceWithConfigure, error) { + return &probeResource{}, nil +} + +type probeResource struct { + framework.ResourceWithConfigure + framework.WithImportByID +} + +func (*probeResource) Metadata(_ context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = "aws_networkmonitor_probe" +} + +func (r *probeResource) Schema(ctx context.Context, request resource.SchemaRequest, response *resource.SchemaResponse) { + response.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "address_family": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.AddressFamily](), + Computed: true, + }, + names.AttrARN: framework.ARNAttributeComputedOnly(), + names.AttrDestination: schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + stringvalidator.LengthBetween(1, 255), + }, + }, + "destination_port": schema.Int64Attribute{ + Optional: true, + Validators: []validator.Int64{ + int64validator.Between(0, 65536), + }, + }, + names.AttrID: framework.IDAttribute(), + "monitor_name": schema.StringAttribute{ + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + stringvalidator.RegexMatches(regexache.MustCompile("[a-zA-Z0-9_-]+"), "Must match [a-zA-Z0-9_-]+"), + stringvalidator.LengthBetween(1, 255), + }, + }, + "packet_size": schema.Int64Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), + }, + Validators: []validator.Int64{ + int64validator.Between(56, 8500), + }, + }, + "probe_id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + names.AttrProtocol: schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.Protocol](), + Required: true, + }, + "source_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + }, + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + names.AttrVPCID: schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + }, + } +} + +func (r *probeResource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + var data probeResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkMonitorClient(ctx) + + probeInput := &awstypes.ProbeInput{} + response.Diagnostics.Append(fwflex.Expand(ctx, data, probeInput)...) + if response.Diagnostics.HasError() { + return + } + + input := &networkmonitor.CreateProbeInput{ + ClientToken: aws.String(id.UniqueId()), + MonitorName: fwflex.StringFromFramework(ctx, data.MonitorName), + Probe: probeInput, + Tags: getTagsIn(ctx), + } + + outputCP, err := conn.CreateProbe(ctx, input) + + if err != nil { + response.Diagnostics.AddError("creating CloudWatch Network Monitor Probe (%s)", err.Error()) + + return + } + + // Set values for unknowns. + data.ProbeARN = fwflex.StringToFramework(ctx, outputCP.ProbeArn) + data.ProbeID = fwflex.StringToFramework(ctx, outputCP.ProbeId) + data.setID() + + outputGP, err := waitProbeReady(ctx, conn, data.MonitorName.ValueString(), data.ProbeID.ValueString()) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for CloudWatch Network Monitor Probe (%s) create", data.ID.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + data.AddressFamily = fwtypes.StringEnumValue(outputGP.AddressFamily) + if data.PacketSize.IsUnknown() { + data.PacketSize = fwflex.Int32ToFramework(ctx, outputGP.PacketSize) + } + data.VpcID = fwflex.StringToFramework(ctx, outputGP.VpcId) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *probeResource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + var data probeResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + if err := data.InitFromID(); err != nil { + response.Diagnostics.AddError("parsing resource ID", err.Error()) + + return + } + + conn := r.Meta().NetworkMonitorClient(ctx) + + output, err := findProbeByTwoPartKey(ctx, conn, data.MonitorName.ValueString(), data.ProbeID.ValueString()) + + if tfresource.NotFound(err) { + response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) + response.State.RemoveResource(ctx) + + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("reading CloudWatch Network Monitor Probe (%s)", data.ID.String()), err.Error()) + + return + } + + // Set attributes for import. + response.Diagnostics.Append(fwflex.Flatten(ctx, output, &data)...) + if response.Diagnostics.HasError() { + return + } + + setTagsOut(ctx, output.Tags) + + response.Diagnostics.Append(response.State.Set(ctx, &data)...) +} + +func (r *probeResource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) { + var old, new probeResourceModel + response.Diagnostics.Append(request.Plan.Get(ctx, &new)...) + if response.Diagnostics.HasError() { + return + } + response.Diagnostics.Append(request.State.Get(ctx, &old)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkMonitorClient(ctx) + + if !new.Destination.Equal(old.Destination) || + !new.DestinationPort.Equal(old.DestinationPort) || + !new.PacketSize.Equal(old.PacketSize) || + !new.Protocol.Equal(old.Protocol) { + input := &networkmonitor.UpdateProbeInput{ + MonitorName: fwflex.StringFromFramework(ctx, new.MonitorName), + ProbeId: fwflex.StringFromFramework(ctx, new.ProbeID), + } + + if !new.Destination.Equal(old.Destination) { + input.Destination = fwflex.StringFromFramework(ctx, new.Destination) + } + if !new.DestinationPort.Equal(old.DestinationPort) { + input.DestinationPort = fwflex.Int32FromFramework(ctx, new.DestinationPort) + } + if !new.PacketSize.Equal(old.PacketSize) { + input.PacketSize = fwflex.Int32FromFramework(ctx, new.PacketSize) + } + if !new.Protocol.Equal(old.Protocol) { + input.Protocol = new.Protocol.ValueEnum() + } + + _, err := conn.UpdateProbe(ctx, input) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("updating CloudWatch Network Monitor Probe (%s)", new.ID.String()), err.Error()) + + return + } + + outputGP, err := waitProbeReady(ctx, conn, new.MonitorName.ValueString(), new.ProbeID.ValueString()) + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for CloudWatch Network Monitor Probe (%s) update", new.ID.ValueString()), err.Error()) + + return + } + + // Set values for unknowns. + new.AddressFamily = fwtypes.StringEnumValue(outputGP.AddressFamily) + } else { + new.AddressFamily = old.AddressFamily + } + + response.Diagnostics.Append(response.State.Set(ctx, &new)...) +} + +func (r *probeResource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + var data probeResourceModel + response.Diagnostics.Append(request.State.Get(ctx, &data)...) + if response.Diagnostics.HasError() { + return + } + + conn := r.Meta().NetworkMonitorClient(ctx) + + _, err := conn.DeleteProbe(ctx, &networkmonitor.DeleteProbeInput{ + MonitorName: fwflex.StringFromFramework(ctx, data.MonitorName), + ProbeId: fwflex.StringFromFramework(ctx, data.ProbeID), + }) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + + if err != nil { + response.Diagnostics.AddError(fmt.Sprintf("deleting CloudWatch Network Monitor Probe (%s)", data.ID.ValueString()), err.Error()) + + return + } + + if _, err := waitProbeDeleted(ctx, conn, data.MonitorName.ValueString(), data.ProbeID.ValueString()); err != nil { + response.Diagnostics.AddError(fmt.Sprintf("waiting for CloudWatch Network Monitor Probe (%s) delete", data.ID.ValueString()), err.Error()) + + return + } +} + +func (r *probeResource) ModifyPlan(ctx context.Context, request resource.ModifyPlanRequest, response *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, request, response) +} + +func findProbeByTwoPartKey(ctx context.Context, conn *networkmonitor.Client, monitorName, probeID string) (*networkmonitor.GetProbeOutput, error) { + input := &networkmonitor.GetProbeInput{ + MonitorName: aws.String(monitorName), + ProbeId: aws.String(probeID), + } + + output, err := conn.GetProbe(ctx, input) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func statusProbe(ctx context.Context, conn *networkmonitor.Client, monitorName, probeID string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := findProbeByTwoPartKey(ctx, conn, monitorName, probeID) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, string(output.State), nil + } +} + +func waitProbeReady(ctx context.Context, conn *networkmonitor.Client, monitorName, probeID string) (*networkmonitor.GetProbeOutput, error) { + const ( + timeout = time.Minute * 15 + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ProbeStatePending), + Target: enum.Slice(awstypes.ProbeStateActive, awstypes.ProbeStateInactive), + Refresh: statusProbe(ctx, conn, monitorName, probeID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkmonitor.GetProbeOutput); ok { + return output, err + } + + return nil, err +} + +func waitProbeDeleted(ctx context.Context, conn *networkmonitor.Client, monitorName, probeID string) (*networkmonitor.GetProbeOutput, error) { + const ( + timeout = time.Minute * 15 + ) + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(awstypes.ProbeStateActive, awstypes.ProbeStateInactive, awstypes.ProbeStateDeleting), + Target: []string{}, + Refresh: statusProbe(ctx, conn, monitorName, probeID), + Timeout: timeout, + MinTimeout: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*networkmonitor.GetProbeOutput); ok { + return output, err + } + + return nil, err +} + +type probeResourceModel struct { + AddressFamily fwtypes.StringEnum[awstypes.AddressFamily] `tfsdk:"address_family"` + Destination types.String `tfsdk:"destination"` + DestinationPort types.Int64 `tfsdk:"destination_port"` + ID types.String `tfsdk:"id"` + MonitorName types.String `tfsdk:"monitor_name"` + PacketSize types.Int64 `tfsdk:"packet_size"` + ProbeARN types.String `tfsdk:"arn"` + ProbeID types.String `tfsdk:"probe_id"` + Protocol fwtypes.StringEnum[awstypes.Protocol] `tfsdk:"protocol"` + SourceARN fwtypes.ARN `tfsdk:"source_arn"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` + VpcID types.String `tfsdk:"vpc_id"` +} + +const ( + probeResourceIDPartCount = 2 +) + +func (m *probeResourceModel) InitFromID() error { + id := m.ID.ValueString() + parts, err := flex.ExpandResourceId(id, probeResourceIDPartCount, false) + + if err != nil { + return err + } + + m.MonitorName = types.StringValue(parts[0]) + m.ProbeID = types.StringValue(parts[1]) + + return nil +} + +func (m *probeResourceModel) setID() { + m.ID = types.StringValue(errs.Must(flex.FlattenResourceId([]string{m.MonitorName.ValueString(), m.ProbeID.ValueString()}, probeResourceIDPartCount, false))) +} diff --git a/internal/service/networkmonitor/probe_test.go b/internal/service/networkmonitor/probe_test.go new file mode 100644 index 00000000000..1bbf0a6b253 --- /dev/null +++ b/internal/service/networkmonitor/probe_test.go @@ -0,0 +1,348 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package networkmonitor_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go/service/ec2" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfec2 "github.com/hashicorp/terraform-provider-aws/internal/service/ec2" + tfnetworkmonitor "github.com/hashicorp/terraform-provider-aws/internal/service/networkmonitor" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccNetworkMonitorProbe_basic(t *testing.T) { + ctx := acctest.Context(t) + var vpc ec2.Vpc + resourceName := "aws_networkmonitor_probe.test" + vpcResourceName := "aws_vpc.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProbeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProbeConfig_basic(rName, "10.0.0.1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProbeExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "address_family"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, names.AttrDestination, "10.0.0.1"), + resource.TestCheckNoResourceAttr(resourceName, "destination_port"), + resource.TestCheckResourceAttrSet(resourceName, "packet_size"), + resource.TestCheckResourceAttrSet(resourceName, "probe_id"), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, "ICMP"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + resource.TestCheckResourceAttrSet(resourceName, names.AttrVPCID), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { // nosemgrep:ci.test-config-funcs-correct-form + Config: acctest.ConfigVPCWithSubnets(rName, 1), + Check: resource.ComposeTestCheckFunc( + acctest.CheckVPCExists(ctx, vpcResourceName, &vpc), + testAccCheckProbeDeleteSecurityGroup(ctx, rName, &vpc), + ), + }, + }, + }) +} + +func TestAccNetworkMonitorProbe_disappears(t *testing.T) { + ctx := acctest.Context(t) + var vpc ec2.Vpc + resourceName := "aws_networkmonitor_probe.test" + vpcResourceName := "aws_vpc.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProbeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProbeConfig_basic(rName, "10.0.0.1"), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckProbeExists(ctx, resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfnetworkmonitor.ResourceProbe, resourceName), + ), + ExpectNonEmptyPlan: true, + }, + { // nosemgrep:ci.test-config-funcs-correct-form + Config: acctest.ConfigVPCWithSubnets(rName, 1), + Check: resource.ComposeTestCheckFunc( + acctest.CheckVPCExists(ctx, vpcResourceName, &vpc), + testAccCheckProbeDeleteSecurityGroup(ctx, rName, &vpc), + ), + }, + }, + }) +} + +func TestAccNetworkMonitorProbe_tags(t *testing.T) { + ctx := acctest.Context(t) + var vpc ec2.Vpc + resourceName := "aws_networkmonitor_probe.test" + vpcResourceName := "aws_vpc.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProbeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProbeConfig_tags1(rName, acctest.CtKey1, acctest.CtValue1), + Check: resource.ComposeTestCheckFunc( + testAccCheckProbeExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccProbeConfig_tags2(rName, acctest.CtKey1, acctest.CtValue1Updated, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckProbeExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct2), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey1, acctest.CtValue1Updated), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { + Config: testAccProbeConfig_tags1(rName, acctest.CtKey2, acctest.CtValue2), + Check: resource.ComposeTestCheckFunc( + testAccCheckProbeExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsKey2, acctest.CtValue2), + ), + }, + { // nosemgrep:ci.test-config-funcs-correct-form + Config: acctest.ConfigVPCWithSubnets(rName, 1), + Check: resource.ComposeTestCheckFunc( + acctest.CheckVPCExists(ctx, vpcResourceName, &vpc), + testAccCheckProbeDeleteSecurityGroup(ctx, rName, &vpc), + ), + }, + }, + }) +} + +func TestAccNetworkMonitorProbe_update(t *testing.T) { + ctx := acctest.Context(t) + var vpc ec2.Vpc + resourceName := "aws_networkmonitor_probe.test" + vpcResourceName := "aws_vpc.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.NetworkMonitorServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckProbeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccProbeConfig_full(rName, "10.0.0.1", 8080, 256), + Check: resource.ComposeTestCheckFunc( + testAccCheckProbeExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "address_family"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, names.AttrDestination, "10.0.0.1"), + resource.TestCheckResourceAttr(resourceName, "destination_port", "8080"), + resource.TestCheckResourceAttr(resourceName, "packet_size", "256"), + resource.TestCheckResourceAttrSet(resourceName, "probe_id"), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, "TCP"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + resource.TestCheckResourceAttrSet(resourceName, names.AttrVPCID), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccProbeConfig_full(rName, "10.0.0.2", 8443, 512), + Check: resource.ComposeTestCheckFunc( + testAccCheckProbeExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "address_family"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), + resource.TestCheckResourceAttr(resourceName, names.AttrDestination, "10.0.0.2"), + resource.TestCheckResourceAttr(resourceName, "destination_port", "8443"), + resource.TestCheckResourceAttr(resourceName, "packet_size", "512"), + resource.TestCheckResourceAttrSet(resourceName, "probe_id"), + resource.TestCheckResourceAttr(resourceName, names.AttrProtocol, "TCP"), + resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), + resource.TestCheckResourceAttrSet(resourceName, names.AttrVPCID), + ), + }, + { // nosemgrep:ci.test-config-funcs-correct-form + Config: acctest.ConfigVPCWithSubnets(rName, 1), + Check: resource.ComposeTestCheckFunc( + acctest.CheckVPCExists(ctx, vpcResourceName, &vpc), + testAccCheckProbeDeleteSecurityGroup(ctx, rName, &vpc), + ), + }, + }, + }) +} + +func testAccCheckProbeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkMonitorClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_networkmonitor_probe" { + continue + } + + _, err := tfnetworkmonitor.FindProbeByTwoPartKey(ctx, conn, rs.Primary.Attributes["monitor_name"], rs.Primary.Attributes["probe_id"]) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("CloudWatch Network Monitor Probe %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckProbeExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).NetworkMonitorClient(ctx) + + _, err := tfnetworkmonitor.FindProbeByTwoPartKey(ctx, conn, rs.Primary.Attributes["monitor_name"], rs.Primary.Attributes["probe_id"]) + + return err + } +} + +func testAccCheckProbeDeleteSecurityGroup(ctx context.Context, rName string, vpc *ec2.Vpc) resource.TestCheckFunc { + return func(s *terraform.State) error { + meta := acctest.Provider.Meta() + conn := meta.(*conns.AWSClient).EC2Conn(ctx) + + description := "Created By Amazon CloudWatch Network Monitor for " + rName + v, err := tfec2.FindSecurityGroupByDescriptionAndVPCID(ctx, conn, description, aws.ToString(vpc.VpcId)) + + if tfresource.NotFound(err) { + // Already gone. + return nil + } + + if err != nil { + return err + } + + r := tfec2.ResourceSecurityGroup() + d := r.Data(nil) + d.SetId(aws.ToString(v.GroupId)) + d.Set("revoke_rules_on_delete", true) + + err = acctest.DeleteResource(ctx, r, d, meta) + + return err + } +} + +func testAccProbeConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` +resource "aws_networkmonitor_monitor" "test" { + aggregation_period = 30 + monitor_name = %[1]q + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccProbeConfig_basic(rName, destination string) string { + return acctest.ConfigCompose(testAccProbeConfig_base(rName), fmt.Sprintf(` +resource "aws_networkmonitor_probe" "test" { + monitor_name = aws_networkmonitor_monitor.test.monitor_name + destination = %[2]q + protocol = "ICMP" + source_arn = aws_subnet.test[0].arn +} +`, rName, destination)) +} + +func testAccProbeConfig_full(rName, destination string, port, packetSize int) string { + return acctest.ConfigCompose(testAccProbeConfig_base(rName), fmt.Sprintf(` +resource "aws_networkmonitor_probe" "test" { + monitor_name = aws_networkmonitor_monitor.test.monitor_name + destination = %[2]q + destination_port = %[3]d + protocol = "TCP" + source_arn = aws_subnet.test[0].arn + packet_size = %[4]d +} +`, rName, destination, port, packetSize)) +} + +func testAccProbeConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccProbeConfig_base(rName), fmt.Sprintf(` +resource "aws_networkmonitor_probe" "test" { + monitor_name = aws_networkmonitor_monitor.test.monitor_name + destination = "10.0.0.1" + protocol = "ICMP" + source_arn = aws_subnet.test[0].arn + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccProbeConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccProbeConfig_base(rName), fmt.Sprintf(` +resource "aws_networkmonitor_probe" "test" { + monitor_name = aws_networkmonitor_monitor.test.monitor_name + destination = "10.0.0.1" + protocol = "ICMP" + source_arn = aws_subnet.test[0].arn + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} diff --git a/internal/service/networkmonitor/service_endpoint_resolver_gen.go b/internal/service/networkmonitor/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..c4505310f2d --- /dev/null +++ b/internal/service/networkmonitor/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package networkmonitor + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + networkmonitor_sdkv2 "github.com/aws/aws-sdk-go-v2/service/networkmonitor" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ networkmonitor_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver networkmonitor_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: networkmonitor_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params networkmonitor_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up networkmonitor endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*networkmonitor_sdkv2.Options) { + return func(o *networkmonitor_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/networkmonitor/service_endpoints_gen_test.go b/internal/service/networkmonitor/service_endpoints_gen_test.go new file mode 100644 index 00000000000..3af81e1d308 --- /dev/null +++ b/internal/service/networkmonitor/service_endpoints_gen_test.go @@ -0,0 +1,610 @@ +// Code generated by internal/generate/serviceendpointtests/main.go; DO NOT EDIT. + +package networkmonitor_test + +import ( + "context" + "errors" + "fmt" + "maps" + "net" + "net/url" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + networkmonitor_sdkv2 "github.com/aws/aws-sdk-go-v2/service/networkmonitor" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/google/go-cmp/cmp" + "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + terraformsdk "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/provider" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type endpointTestCase struct { + with []setupFunc + expected caseExpectations +} + +type caseSetup struct { + config map[string]any + configFile configFile + environmentVariables map[string]string +} + +type configFile struct { + baseUrl string + serviceUrl string +} + +type caseExpectations struct { + diags diag.Diagnostics + endpoint string + region string +} + +type apiCallParams struct { + endpoint string + region string +} + +type setupFunc func(setup *caseSetup) + +type callFunc func(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams + +const ( + packageNameConfigEndpoint = "https://packagename-config.endpoint.test/" + awsServiceEnvvarEndpoint = "https://service-envvar.endpoint.test/" + baseEnvvarEndpoint = "https://base-envvar.endpoint.test/" + serviceConfigFileEndpoint = "https://service-configfile.endpoint.test/" + baseConfigFileEndpoint = "https://base-configfile.endpoint.test/" +) + +const ( + packageName = "networkmonitor" + awsEnvVar = "AWS_ENDPOINT_URL_NETWORKMONITOR" + baseEnvVar = "AWS_ENDPOINT_URL" + configParam = "networkmonitor" +) + +const ( + expectedCallRegion = "us-west-2" //lintignore:AWSAT003 +) + +func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.Setenv + const providerRegion = "us-west-2" //lintignore:AWSAT003 + const expectedEndpointRegion = providerRegion + + testcases := map[string]endpointTestCase{ + "no config": { + with: []setupFunc{withNoConfig}, + expected: expectDefaultEndpoint(t, expectedEndpointRegion), + }, + + // Package name endpoint on Config + + "package name endpoint config": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides aws service envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withAwsEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base envvar": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEnvVar, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides service config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withServiceEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + "package name endpoint config overrides base config file": { + with: []setupFunc{ + withPackageNameEndpointInConfig, + withBaseEndpointInConfigFile, + }, + expected: expectPackageNameConfigEndpoint(), + }, + + // Service endpoint in AWS envvar + + "service aws envvar": { + with: []setupFunc{ + withAwsEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base envvar": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEnvVar, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides service config file": { + with: []setupFunc{ + withAwsEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + "service aws envvar overrides base config file": { + with: []setupFunc{ + withAwsEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectAwsEnvVarEndpoint(), + }, + + // Base endpoint in envvar + + "base endpoint envvar": { + with: []setupFunc{ + withBaseEnvVar, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides service config file": { + with: []setupFunc{ + withBaseEnvVar, + withServiceEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + "base endpoint envvar overrides base config file": { + with: []setupFunc{ + withBaseEnvVar, + withBaseEndpointInConfigFile, + }, + expected: expectBaseEnvVarEndpoint(), + }, + + // Service endpoint in config file + + "service config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + "service config file overrides base config file": { + with: []setupFunc{ + withServiceEndpointInConfigFile, + withBaseEndpointInConfigFile, + }, + expected: expectServiceConfigFileEndpoint(), + }, + + // Base endpoint in config file + + "base endpoint config file": { + with: []setupFunc{ + withBaseEndpointInConfigFile, + }, + expected: expectBaseConfigFileEndpoint(), + }, + + // Use FIPS endpoint on Config + + "use fips config": { + with: []setupFunc{ + withUseFIPSInConfig, + }, + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), + }, + + "use fips config with package name endpoint config": { + with: []setupFunc{ + withUseFIPSInConfig, + withPackageNameEndpointInConfig, + }, + expected: expectPackageNameConfigEndpoint(), + }, + } + + for name, testcase := range testcases { //nolint:paralleltest // uses t.Setenv + testcase := testcase + + t.Run(name, func(t *testing.T) { + testEndpointCase(t, providerRegion, testcase, callService) + }) + } +} + +func defaultEndpoint(region string) (url.URL, error) { + r := networkmonitor_sdkv2.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(context.Background(), networkmonitor_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := networkmonitor_sdkv2.NewDefaultEndpointResolverV2() + + ep, err := r.ResolveEndpoint(context.Background(), networkmonitor_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), + }) + if err != nil { + return url.URL{}, err + } + + if ep.URI.Path == "" { + ep.URI.Path = "/" + } + + return ep.URI, nil +} + +func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { + t.Helper() + + client := meta.NetworkMonitorClient(ctx) + + var result apiCallParams + + _, err := client.ListMonitors(ctx, &networkmonitor_sdkv2.ListMonitorsInput{}, + func(opts *networkmonitor_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) + } + + return result +} + +func withNoConfig(_ *caseSetup) { + // no-op +} + +func withPackageNameEndpointInConfig(setup *caseSetup) { + if _, ok := setup.config[names.AttrEndpoints]; !ok { + setup.config[names.AttrEndpoints] = []any{ + map[string]any{}, + } + } + endpoints := setup.config[names.AttrEndpoints].([]any)[0].(map[string]any) + endpoints[packageName] = packageNameConfigEndpoint +} + +func withAwsEnvVar(setup *caseSetup) { + setup.environmentVariables[awsEnvVar] = awsServiceEnvvarEndpoint +} + +func withBaseEnvVar(setup *caseSetup) { + setup.environmentVariables[baseEnvVar] = baseEnvvarEndpoint +} + +func withServiceEndpointInConfigFile(setup *caseSetup) { + setup.configFile.serviceUrl = serviceConfigFileEndpoint +} + +func withBaseEndpointInConfigFile(setup *caseSetup) { + setup.configFile.baseUrl = baseConfigFileEndpoint +} + +func withUseFIPSInConfig(setup *caseSetup) { + setup.config["use_fips_endpoint"] = true +} + +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + + return caseExpectations{ + endpoint: endpoint.String(), + region: expectedCallRegion, + } +} + +func expectPackageNameConfigEndpoint() caseExpectations { + return caseExpectations{ + endpoint: packageNameConfigEndpoint, + region: expectedCallRegion, + } +} + +func expectAwsEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: awsServiceEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseEnvVarEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseEnvvarEndpoint, + region: expectedCallRegion, + } +} + +func expectServiceConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: serviceConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func expectBaseConfigFileEndpoint() caseExpectations { + return caseExpectations{ + endpoint: baseConfigFileEndpoint, + region: expectedCallRegion, + } +} + +func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, callF callFunc) { + t.Helper() + + ctx := context.Background() + + setup := caseSetup{ + config: map[string]any{}, + environmentVariables: map[string]string{}, + } + + for _, f := range testcase.with { + f(&setup) + } + + config := map[string]any{ + names.AttrAccessKey: servicemocks.MockStaticAccessKey, + names.AttrSecretKey: servicemocks.MockStaticSecretKey, + names.AttrRegion: region, + names.AttrSkipCredentialsValidation: true, + names.AttrSkipRequestingAccountID: true, + } + + maps.Copy(config, setup.config) + + if setup.configFile.baseUrl != "" || setup.configFile.serviceUrl != "" { + config[names.AttrProfile] = "default" + tempDir := t.TempDir() + writeSharedConfigFile(t, &config, tempDir, generateSharedConfigFile(setup.configFile)) + } + + for k, v := range setup.environmentVariables { + t.Setenv(k, v) + } + + p, err := provider.New(ctx) + if err != nil { + t.Fatal(err) + } + + expectedDiags := testcase.expected.diags + expectedDiags = append( + expectedDiags, + errs.NewWarningDiagnostic( + "AWS account ID not found for provider", + "See https://registry.terraform.io/providers/hashicorp/aws/latest/docs#skip_requesting_account_id for implications.", + ), + ) + + diags := p.Configure(ctx, terraformsdk.NewResourceConfigRaw(config)) + + if diff := cmp.Diff(diags, expectedDiags, cmp.Comparer(sdkdiag.Comparer)); diff != "" { + t.Errorf("unexpected diagnostics difference: %s", diff) + } + + if diags.HasError() { + return + } + + meta := p.Meta().(*conns.AWSClient) + + callParams := callF(ctx, t, meta) + + if e, a := testcase.expected.endpoint, callParams.endpoint; e != a { + t.Errorf("expected endpoint %q, got %q", e, a) + } + + if e, a := testcase.expected.region, callParams.region; e != a { + t.Errorf("expected region %q, got %q", e, a) + } +} + +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + +func generateSharedConfigFile(config configFile) string { + var buf strings.Builder + + buf.WriteString(` +[default] +aws_access_key_id = DefaultSharedCredentialsAccessKey +aws_secret_access_key = DefaultSharedCredentialsSecretKey +`) + if config.baseUrl != "" { + buf.WriteString(fmt.Sprintf("endpoint_url = %s\n", config.baseUrl)) + } + + if config.serviceUrl != "" { + buf.WriteString(fmt.Sprintf(` +services = endpoint-test + +[services endpoint-test] +%[1]s = + endpoint_url = %[2]s +`, configParam, serviceConfigFileEndpoint)) + } + + return buf.String() +} + +func writeSharedConfigFile(t *testing.T, config *map[string]any, tempDir, content string) string { + t.Helper() + + file, err := os.Create(filepath.Join(tempDir, "aws-sdk-go-base-shared-configuration-file")) + if err != nil { + t.Fatalf("creating shared configuration file: %s", err) + } + + _, err = file.WriteString(content) + if err != nil { + t.Fatalf(" writing shared configuration file: %s", err) + } + + if v, ok := (*config)[names.AttrSharedConfigFiles]; !ok { + (*config)[names.AttrSharedConfigFiles] = []any{file.Name()} + } else { + (*config)[names.AttrSharedConfigFiles] = append(v.([]any), file.Name()) + } + + return file.Name() +} diff --git a/internal/service/networkmonitor/service_package_gen.go b/internal/service/networkmonitor/service_package_gen.go new file mode 100644 index 00000000000..de51b85ce97 --- /dev/null +++ b/internal/service/networkmonitor/service_package_gen.go @@ -0,0 +1,64 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package networkmonitor + +import ( + "context" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + networkmonitor_sdkv2 "github.com/aws/aws-sdk-go-v2/service/networkmonitor" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +type servicePackage struct{} + +func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { + return []*types.ServicePackageFrameworkDataSource{} +} + +func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { + return []*types.ServicePackageFrameworkResource{ + { + Factory: newMonitorResource, + Name: "Monitor", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, + }, + { + Factory: newProbeResource, + Name: "Probe", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrARN, + }, + }, + } +} + +func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { + return []*types.ServicePackageSDKDataSource{} +} + +func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { + return []*types.ServicePackageSDKResource{} +} + +func (p *servicePackage) ServicePackageName() string { + return names.NetworkMonitor +} + +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*networkmonitor_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) + + return networkmonitor_sdkv2.NewFromConfig(cfg, + networkmonitor_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil +} + +func ServicePackage(ctx context.Context) conns.ServicePackage { + return &servicePackage{} +} diff --git a/internal/service/networkmonitor/tags_gen.go b/internal/service/networkmonitor/tags_gen.go new file mode 100644 index 00000000000..40ab57a4ca8 --- /dev/null +++ b/internal/service/networkmonitor/tags_gen.go @@ -0,0 +1,128 @@ +// Code generated by internal/generate/tags/main.go; DO NOT EDIT. +package networkmonitor + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/networkmonitor" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/logging" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// listTags lists networkmonitor service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func listTags(ctx context.Context, conn *networkmonitor.Client, identifier string, optFns ...func(*networkmonitor.Options)) (tftags.KeyValueTags, error) { + input := &networkmonitor.ListTagsForResourceInput{ + ResourceArn: aws.String(identifier), + } + + output, err := conn.ListTagsForResource(ctx, input, optFns...) + + if err != nil { + return tftags.New(ctx, nil), err + } + + return KeyValueTags(ctx, output.Tags), nil +} + +// ListTags lists networkmonitor service tags and set them in Context. +// It is called from outside this package. +func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { + tags, err := listTags(ctx, meta.(*conns.AWSClient).NetworkMonitorClient(ctx), identifier) + + if err != nil { + return err + } + + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(tags) + } + + return nil +} + +// map[string]string handling + +// Tags returns networkmonitor service tags. +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() +} + +// KeyValueTags creates tftags.KeyValueTags from networkmonitor service tags. +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { + return tftags.New(ctx, tags) +} + +// getTagsIn returns networkmonitor service tags from Context. +// nil is returned if there are no input tags. +func getTagsIn(ctx context.Context) map[string]string { + if inContext, ok := tftags.FromContext(ctx); ok { + if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + return tags + } + } + + return nil +} + +// setTagsOut sets networkmonitor service tags in Context. +func setTagsOut(ctx context.Context, tags map[string]string) { + if inContext, ok := tftags.FromContext(ctx); ok { + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) + } +} + +// updateTags updates networkmonitor service tags. +// The identifier is typically the Amazon Resource Name (ARN), although +// it may also be a different identifier depending on the service. +func updateTags(ctx context.Context, conn *networkmonitor.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*networkmonitor.Options)) error { + oldTags := tftags.New(ctx, oldTagsMap) + newTags := tftags.New(ctx, newTagsMap) + + ctx = tflog.SetField(ctx, logging.KeyResourceId, identifier) + + removedTags := oldTags.Removed(newTags) + removedTags = removedTags.IgnoreSystem(names.NetworkMonitor) + if len(removedTags) > 0 { + input := &networkmonitor.UntagResourceInput{ + ResourceArn: aws.String(identifier), + TagKeys: removedTags.Keys(), + } + + _, err := conn.UntagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("untagging resource (%s): %w", identifier, err) + } + } + + updatedTags := oldTags.Updated(newTags) + updatedTags = updatedTags.IgnoreSystem(names.NetworkMonitor) + if len(updatedTags) > 0 { + input := &networkmonitor.TagResourceInput{ + ResourceArn: aws.String(identifier), + Tags: Tags(updatedTags), + } + + _, err := conn.TagResource(ctx, input, optFns...) + + if err != nil { + return fmt.Errorf("tagging resource (%s): %w", identifier, err) + } + } + + return nil +} + +// UpdateTags updates networkmonitor service tags. +// It is called from outside this package. +func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { + return updateTags(ctx, meta.(*conns.AWSClient).NetworkMonitorClient(ctx), identifier, oldTags, newTags) +} diff --git a/internal/service/oam/link.go b/internal/service/oam/link.go index 4d79a0fac2c..4fd5fb1c355 100644 --- a/internal/service/oam/link.go +++ b/internal/service/oam/link.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" @@ -58,6 +59,43 @@ func ResourceLink() *schema.Resource { Required: true, ForceNew: true, }, + "link_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_group_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFilter: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 2000), + }, + }, + }, + }, + "metric_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFilter: { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 2000), + }, + }, + }, + }, + }, + }, + }, "link_id": { Type: schema.TypeString, Computed: true, @@ -98,10 +136,11 @@ func resourceLinkCreate(ctx context.Context, d *schema.ResourceData, meta interf conn := meta.(*conns.AWSClient).ObservabilityAccessManagerClient(ctx) in := &oam.CreateLinkInput{ - LabelTemplate: aws.String(d.Get("label_template").(string)), - ResourceTypes: flex.ExpandStringyValueSet[types.ResourceType](d.Get("resource_types").(*schema.Set)), - SinkIdentifier: aws.String(d.Get("sink_identifier").(string)), - Tags: getTagsIn(ctx), + LabelTemplate: aws.String(d.Get("label_template").(string)), + LinkConfiguration: expandLinkConfiguration(d.Get("link_configuration").([]interface{})), + ResourceTypes: flex.ExpandStringyValueSet[types.ResourceType](d.Get("resource_types").(*schema.Set)), + SinkIdentifier: aws.String(d.Get("sink_identifier").(string)), + Tags: getTagsIn(ctx), } out, err := conn.CreateLink(ctx, in) @@ -137,6 +176,7 @@ func resourceLinkRead(ctx context.Context, d *schema.ResourceData, meta interfac d.Set(names.AttrARN, out.Arn) d.Set("label", out.Label) d.Set("label_template", out.LabelTemplate) + d.Set("link_configuration", flattenLinkConfiguration(out.LinkConfiguration)) d.Set("link_id", out.Id) d.Set("resource_types", flex.FlattenStringValueList(out.ResourceTypes)) d.Set("sink_arn", out.SinkArn) @@ -155,8 +195,13 @@ func resourceLinkUpdate(ctx context.Context, d *schema.ResourceData, meta interf Identifier: aws.String(d.Id()), } - if d.HasChanges("resource_types") { + if d.HasChanges("resource_types", "link_configuration") { in.ResourceTypes = flex.ExpandStringyValueSet[types.ResourceType](d.Get("resource_types").(*schema.Set)) + + if d.HasChanges("link_configuration") { + in.LinkConfiguration = expandLinkConfiguration(d.Get("link_configuration").([]interface{})) + } + update = true } @@ -216,3 +261,93 @@ func findLinkByID(ctx context.Context, conn *oam.Client, id string) (*oam.GetLin return out, nil } + +func expandLinkConfiguration(l []interface{}) *types.LinkConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + config := &types.LinkConfiguration{} + + m := l[0].(map[string]interface{}) + if v, ok := m["log_group_configuration"]; ok { + config.LogGroupConfiguration = expandLogGroupConfiguration(v.([]interface{})) + } + if v, ok := m["metric_configuration"]; ok { + config.MetricConfiguration = expandMetricConfiguration(v.([]interface{})) + } + + return config +} + +func expandLogGroupConfiguration(l []interface{}) *types.LogGroupConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + config := &types.LogGroupConfiguration{} + + m := l[0].(map[string]interface{}) + if v, ok := m[names.AttrFilter]; ok && v != "" { + config.Filter = aws.String(v.(string)) + } + + return config +} + +func expandMetricConfiguration(l []interface{}) *types.MetricConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + config := &types.MetricConfiguration{} + + m := l[0].(map[string]interface{}) + if v, ok := m[names.AttrFilter]; ok && v != "" { + config.Filter = aws.String(v.(string)) + } + + return config +} + +func flattenLinkConfiguration(a *types.LinkConfiguration) []interface{} { + if a == nil { + return []interface{}{} + } + m := map[string]interface{}{} + + if a.LogGroupConfiguration != nil { + m["log_group_configuration"] = flattenLogGroupConfiguration(a.LogGroupConfiguration) + } + if a.MetricConfiguration != nil { + m["metric_configuration"] = flattenMetricConfiguration(a.MetricConfiguration) + } + + return []interface{}{m} +} + +func flattenLogGroupConfiguration(a *types.LogGroupConfiguration) []interface{} { + if a == nil { + return []interface{}{} + } + m := map[string]interface{}{} + + if a.Filter != nil { + m[names.AttrFilter] = aws.ToString(a.Filter) + } + + return []interface{}{m} +} + +func flattenMetricConfiguration(a *types.MetricConfiguration) []interface{} { + if a == nil { + return []interface{}{} + } + m := map[string]interface{}{} + + if a.Filter != nil { + m[names.AttrFilter] = aws.ToString(a.Filter) + } + + return []interface{}{m} +} diff --git a/internal/service/oam/link_data_source.go b/internal/service/oam/link_data_source.go index af8b0ff3620..e4739a74bd0 100644 --- a/internal/service/oam/link_data_source.go +++ b/internal/service/oam/link_data_source.go @@ -26,22 +26,54 @@ func DataSourceLink() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "link_id": { + "label": { Type: schema.TypeString, Computed: true, }, - "link_identifier": { + "label_template": { Type: schema.TypeString, - Required: true, + Computed: true, }, - "label": { - Type: schema.TypeString, + "link_configuration": { + Type: schema.TypeList, Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_group_configuration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFilter: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "metric_configuration": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + names.AttrFilter: { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, }, - "label_template": { + "link_id": { Type: schema.TypeString, Computed: true, }, + "link_identifier": { + Type: schema.TypeString, + Required: true, + }, "resource_types": { Type: schema.TypeSet, Computed: true, @@ -76,9 +108,10 @@ func dataSourceLinkRead(ctx context.Context, d *schema.ResourceData, meta interf d.SetId(aws.ToString(out.Arn)) d.Set(names.AttrARN, out.Arn) - d.Set("link_id", out.Id) d.Set("label", out.Label) d.Set("label_template", out.LabelTemplate) + d.Set("link_configuration", flattenLinkConfiguration(out.LinkConfiguration)) + d.Set("link_id", out.Id) d.Set("resource_types", flex.FlattenStringValueList(out.ResourceTypes)) d.Set("sink_arn", out.SinkArn) diff --git a/internal/service/oam/link_data_source_test.go b/internal/service/oam/link_data_source_test.go index 0cd91e3462c..779424fcad3 100644 --- a/internal/service/oam/link_data_source_test.go +++ b/internal/service/oam/link_data_source_test.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccObservabilityAccessManagerLinkDataSource_basic(t *testing.T) { +func testAccObservabilityAccessManagerLinkDataSource_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -51,6 +51,90 @@ func TestAccObservabilityAccessManagerLinkDataSource_basic(t *testing.T) { }) } +func testAccObservabilityAccessManagerLinkDataSource_logGroupConfiguration(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_oam_link.test" + filter := "LogGroupName LIKE 'aws/lambda/%' OR LogGroupName LIKE 'AWSLogs%'" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + acctest.PreCheckPartitionHasService(t, names.ObservabilityAccessManagerEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ObservabilityAccessManagerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccLinkDataSourceConfig_logGroupConfiguration(rName, filter), + Check: resource.ComposeTestCheckFunc( + acctest.MatchResourceAttrRegionalARN(dataSourceName, names.AttrARN, "oam", regexache.MustCompile(`link/+.`)), + resource.TestCheckResourceAttrSet(dataSourceName, "label"), + resource.TestCheckResourceAttr(dataSourceName, "label_template", "$AccountName"), + resource.TestCheckResourceAttr(dataSourceName, "link_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(dataSourceName, "link_configuration.0.log_group_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(dataSourceName, "link_configuration.0.log_group_configuration.0.filter", filter), + resource.TestCheckResourceAttr(dataSourceName, "link_configuration.0.metric_configuration.#", acctest.Ct0), + resource.TestCheckResourceAttrSet(dataSourceName, "link_id"), + resource.TestCheckResourceAttr(dataSourceName, "resource_types.#", acctest.Ct1), + resource.TestCheckResourceAttr(dataSourceName, "resource_types.0", "AWS::Logs::LogGroup"), + resource.TestCheckResourceAttrSet(dataSourceName, "sink_arn"), + resource.TestCheckResourceAttr(dataSourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(dataSourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + }, + }) +} + +func testAccObservabilityAccessManagerLinkDataSource_metricConfiguration(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_oam_link.test" + filter := "Namespace IN ('AWS/EC2', 'AWS/ELB', 'AWS/S3')" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + acctest.PreCheckPartitionHasService(t, names.ObservabilityAccessManagerEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ObservabilityAccessManagerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + Steps: []resource.TestStep{ + { + Config: testAccLinkDataSourceConfig_metricConfiguration(rName, filter), + Check: resource.ComposeTestCheckFunc( + acctest.MatchResourceAttrRegionalARN(dataSourceName, names.AttrARN, "oam", regexache.MustCompile(`link/+.`)), + resource.TestCheckResourceAttrSet(dataSourceName, "label"), + resource.TestCheckResourceAttr(dataSourceName, "label_template", "$AccountName"), + resource.TestCheckResourceAttr(dataSourceName, "link_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(dataSourceName, "link_configuration.0.log_group_configuration.#", acctest.Ct0), + resource.TestCheckResourceAttr(dataSourceName, "link_configuration.0.metric_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(dataSourceName, "link_configuration.0.metric_configuration.0.filter", filter), + resource.TestCheckResourceAttrSet(dataSourceName, "link_id"), + resource.TestCheckResourceAttr(dataSourceName, "resource_types.#", acctest.Ct1), + resource.TestCheckResourceAttr(dataSourceName, "resource_types.0", "AWS::CloudWatch::Metric"), + resource.TestCheckResourceAttrSet(dataSourceName, "sink_arn"), + resource.TestCheckResourceAttr(dataSourceName, acctest.CtTagsPercent, acctest.Ct1), + resource.TestCheckResourceAttr(dataSourceName, acctest.CtTagsKey1, acctest.CtValue1), + ), + }, + }, + }) +} + func testAccLinkDataSourceConfig_basic(rName string) string { return acctest.ConfigCompose( acctest.ConfigAlternateAccountProvider(), @@ -110,3 +194,133 @@ data aws_oam_link "test" { } `, rName)) } + +func testAccLinkDataSourceConfig_logGroupConfiguration(rName, filter string) string { + return acctest.ConfigCompose( + acctest.ConfigAlternateAccountProvider(), + fmt.Sprintf(` +data "aws_caller_identity" "source" {} +data "aws_partition" "source" {} + +data "aws_caller_identity" "monitoring" { + provider = "awsalternate" +} +data "aws_partition" "monitoring" { + provider = "awsalternate" +} + +resource "aws_oam_sink" "test" { + provider = "awsalternate" + + name = %[1]q +} + +resource "aws_oam_sink_policy" "test" { + provider = "awsalternate" + + sink_identifier = aws_oam_sink.test.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["oam:CreateLink", "oam:UpdateLink"] + Effect = "Allow" + Resource = "*" + Principal = { + "AWS" = "arn:${data.aws_partition.source.partition}:iam::${data.aws_caller_identity.source.account_id}:root" + } + Condition = { + "ForAnyValue:StringEquals" = { + "oam:ResourceTypes" = ["AWS::CloudWatch::Metric", "AWS::Logs::LogGroup"] + } + } + } + ] + }) +} + +resource "aws_oam_link" "test" { + label_template = "$AccountName" + link_configuration { + log_group_configuration { + filter = %[2]q + } + } + resource_types = ["AWS::Logs::LogGroup"] + sink_identifier = aws_oam_sink.test.id + + tags = { + key1 = "value1" + } +} + +data aws_oam_link "test" { + link_identifier = aws_oam_link.test.id +} +`, rName, filter)) +} + +func testAccLinkDataSourceConfig_metricConfiguration(rName, filter string) string { + return acctest.ConfigCompose( + acctest.ConfigAlternateAccountProvider(), + fmt.Sprintf(` +data "aws_caller_identity" "source" {} +data "aws_partition" "source" {} + +data "aws_caller_identity" "monitoring" { + provider = "awsalternate" +} +data "aws_partition" "monitoring" { + provider = "awsalternate" +} + +resource "aws_oam_sink" "test" { + provider = "awsalternate" + + name = %[1]q +} + +resource "aws_oam_sink_policy" "test" { + provider = "awsalternate" + + sink_identifier = aws_oam_sink.test.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["oam:CreateLink", "oam:UpdateLink"] + Effect = "Allow" + Resource = "*" + Principal = { + "AWS" = "arn:${data.aws_partition.source.partition}:iam::${data.aws_caller_identity.source.account_id}:root" + } + Condition = { + "ForAnyValue:StringEquals" = { + "oam:ResourceTypes" = ["AWS::CloudWatch::Metric", "AWS::Logs::LogGroup"] + } + } + } + ] + }) +} + +resource "aws_oam_link" "test" { + label_template = "$AccountName" + link_configuration { + metric_configuration { + filter = %[2]q + } + } + resource_types = ["AWS::CloudWatch::Metric"] + sink_identifier = aws_oam_sink.test.id + + tags = { + key1 = "value1" + } +} + +data aws_oam_link "test" { + link_identifier = aws_oam_link.test.id +} +`, rName, filter)) +} diff --git a/internal/service/oam/link_test.go b/internal/service/oam/link_test.go index 8314997face..673748efb99 100644 --- a/internal/service/oam/link_test.go +++ b/internal/service/oam/link_test.go @@ -23,7 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccObservabilityAccessManagerLink_basic(t *testing.T) { +func testAccObservabilityAccessManagerLink_basic(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -67,7 +67,7 @@ func TestAccObservabilityAccessManagerLink_basic(t *testing.T) { }) } -func TestAccObservabilityAccessManagerLink_disappears(t *testing.T) { +func testAccObservabilityAccessManagerLink_disappears(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -100,7 +100,7 @@ func TestAccObservabilityAccessManagerLink_disappears(t *testing.T) { }) } -func TestAccObservabilityAccessManagerLink_update(t *testing.T) { +func testAccObservabilityAccessManagerLink_update(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -159,7 +159,7 @@ func TestAccObservabilityAccessManagerLink_update(t *testing.T) { }) } -func TestAccObservabilityAccessManagerLink_tags(t *testing.T) { +func testAccObservabilityAccessManagerLink_tags(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -214,6 +214,110 @@ func TestAccObservabilityAccessManagerLink_tags(t *testing.T) { }) } +func testAccObservabilityAccessManagerLink_logGroupConfiguration(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var link oam.GetLinkOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_oam_link.test" + filter1 := "LogGroupName LIKE 'aws/lambda/%' OR LogGroupName LIKE 'AWSLogs%'" + filter2 := "LogGroupName NOT IN ('Private-Log-Group', 'Private-Log-Group-2')" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + acctest.PreCheckPartitionHasService(t, names.ObservabilityAccessManagerEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ObservabilityAccessManagerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLinkConfig_logGroupConfiguration(rName, filter1), + Check: resource.ComposeTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName, &link), + resource.TestCheckResourceAttr(resourceName, "link_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.log_group_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.metric_configuration.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.log_group_configuration.0.filter", filter1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccLinkConfig_logGroupConfiguration(rName, filter2), + Check: resource.ComposeTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName, &link), + resource.TestCheckResourceAttr(resourceName, "link_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.log_group_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.metric_configuration.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.log_group_configuration.0.filter", filter2), + ), + }, + }, + }) +} + +func testAccObservabilityAccessManagerLink_metricConfiguration(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var link oam.GetLinkOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_oam_link.test" + filter1 := "Namespace IN ('AWS/EC2', 'AWS/ELB', 'AWS/S3')" + filter2 := "Namespace NOT LIKE 'AWS/%'" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckAlternateAccount(t) + acctest.PreCheckPartitionHasService(t, names.ObservabilityAccessManagerEndpointID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.ObservabilityAccessManagerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), + CheckDestroy: testAccCheckLinkDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccLinkConfig_metricConfiguration(rName, filter1), + Check: resource.ComposeTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName, &link), + resource.TestCheckResourceAttr(resourceName, "link_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.log_group_configuration.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.metric_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.metric_configuration.0.filter", filter1), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccLinkConfig_metricConfiguration(rName, filter2), + Check: resource.ComposeTestCheckFunc( + testAccCheckLinkExists(ctx, resourceName, &link), + resource.TestCheckResourceAttr(resourceName, "link_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.log_group_configuration.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.metric_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "link_configuration.0.metric_configuration.0.filter", filter2), + ), + }, + }, + }) +} + func testAccCheckLinkDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).ObservabilityAccessManagerClient(ctx) @@ -484,3 +588,117 @@ resource "aws_oam_link" "test" { } `, rName, tag1Key, tag1Value, tag2Key, tag2Value)) } + +func testAccLinkConfig_logGroupConfiguration(rName, filter string) string { + return acctest.ConfigCompose( + acctest.ConfigAlternateAccountProvider(), + fmt.Sprintf(` +data "aws_caller_identity" "source" {} +data "aws_partition" "source" {} + +data "aws_caller_identity" "monitoring" { + provider = "awsalternate" +} +data "aws_partition" "monitoring" { + provider = "awsalternate" +} + +resource "aws_oam_sink" "test" { + provider = "awsalternate" + + name = %[1]q +} + +resource "aws_oam_sink_policy" "test" { + provider = "awsalternate" + + sink_identifier = aws_oam_sink.test.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["oam:CreateLink", "oam:UpdateLink"] + Effect = "Allow" + Resource = "*" + Principal = { + "AWS" = "arn:${data.aws_partition.source.partition}:iam::${data.aws_caller_identity.source.account_id}:root" + } + Condition = { + "ForAnyValue:StringEquals" = { + "oam:ResourceTypes" = ["AWS::CloudWatch::Metric", "AWS::Logs::LogGroup"] + } + } + } + ] + }) +} + +resource "aws_oam_link" "test" { + label_template = "$AccountName" + link_configuration { + log_group_configuration { + filter = %[2]q + } + } + resource_types = ["AWS::Logs::LogGroup"] + sink_identifier = aws_oam_sink.test.id +} +`, rName, filter)) +} + +func testAccLinkConfig_metricConfiguration(rName, filter string) string { + return acctest.ConfigCompose( + acctest.ConfigAlternateAccountProvider(), + fmt.Sprintf(` +data "aws_caller_identity" "source" {} +data "aws_partition" "source" {} + +data "aws_caller_identity" "monitoring" { + provider = "awsalternate" +} +data "aws_partition" "monitoring" { + provider = "awsalternate" +} + +resource "aws_oam_sink" "test" { + provider = "awsalternate" + + name = %[1]q +} + +resource "aws_oam_sink_policy" "test" { + provider = "awsalternate" + + sink_identifier = aws_oam_sink.test.id + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = ["oam:CreateLink", "oam:UpdateLink"] + Effect = "Allow" + Resource = "*" + Principal = { + "AWS" = "arn:${data.aws_partition.source.partition}:iam::${data.aws_caller_identity.source.account_id}:root" + } + Condition = { + "ForAnyValue:StringEquals" = { + "oam:ResourceTypes" = ["AWS::CloudWatch::Metric", "AWS::Logs::LogGroup"] + } + } + } + ] + }) +} + +resource "aws_oam_link" "test" { + label_template = "$AccountName" + link_configuration { + metric_configuration { + filter = %[2]q + } + } + resource_types = ["AWS::CloudWatch::Metric"] + sink_identifier = aws_oam_sink.test.id +} +`, rName, filter)) +} diff --git a/internal/service/oam/links_data_source_test.go b/internal/service/oam/links_data_source_test.go index b8ff6e59582..8381ed46512 100644 --- a/internal/service/oam/links_data_source_test.go +++ b/internal/service/oam/links_data_source_test.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccObservabilityAccessManagerLinksDataSource_basic(t *testing.T) { +func testAccObservabilityAccessManagerLinksDataSource_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } diff --git a/internal/service/oam/oam_test.go b/internal/service/oam/oam_test.go new file mode 100644 index 00000000000..2a599ed9664 --- /dev/null +++ b/internal/service/oam/oam_test.go @@ -0,0 +1,50 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package oam_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-aws/internal/acctest" +) + +func TestAccObservabilityAccessManager_serial(t *testing.T) { + t.Parallel() + + testCases := map[string]map[string]func(t *testing.T){ + "Link": { + acctest.CtBasic: testAccObservabilityAccessManagerLink_basic, + acctest.CtDisappears: testAccObservabilityAccessManagerLink_disappears, + "update": testAccObservabilityAccessManagerLink_update, + "tags": testAccObservabilityAccessManagerLink_tags, + "logGroupConfiguration": testAccObservabilityAccessManagerLink_logGroupConfiguration, + "metricConfiguration": testAccObservabilityAccessManagerLink_metricConfiguration, + }, + "LinkDataSource": { + acctest.CtBasic: testAccObservabilityAccessManagerLinkDataSource_basic, + "logGroupConfiguration": testAccObservabilityAccessManagerLinkDataSource_logGroupConfiguration, + "metricConfiguration": testAccObservabilityAccessManagerLinkDataSource_metricConfiguration, + }, + "LinksDataSource": { + acctest.CtBasic: testAccObservabilityAccessManagerLinksDataSource_basic, + }, + "Sink": { + acctest.CtBasic: testAccObservabilityAccessManagerSink_basic, + acctest.CtDisappears: testAccObservabilityAccessManagerSink_disappears, + "tags": testAccObservabilityAccessManagerSink_tags, + }, + "SinkDataSource": { + acctest.CtBasic: testAccObservabilityAccessManagerSinkDataSource_basic, + }, + "SinkPolicy": { + acctest.CtBasic: testAccObservabilityAccessManagerSinkPolicy_basic, + "update": testAccObservabilityAccessManagerSinkPolicy_update, + }, + "SinksDataSource": { + acctest.CtBasic: testAccObservabilityAccessManagerSinksDataSource_basic, + }, + } + + acctest.RunSerialTests2Levels(t, testCases, 0) +} diff --git a/internal/service/oam/service_endpoint_resolver_gen.go b/internal/service/oam/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..06de202246c --- /dev/null +++ b/internal/service/oam/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package oam + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + oam_sdkv2 "github.com/aws/aws-sdk-go-v2/service/oam" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ oam_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver oam_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: oam_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params oam_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up oam endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*oam_sdkv2.Options) { + return func(o *oam_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/oam/service_endpoints_gen_test.go b/internal/service/oam/service_endpoints_gen_test.go index c34ed793937..8761c852c4f 100644 --- a/internal/service/oam/service_endpoints_gen_test.go +++ b/internal/service/oam/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := oam_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), oam_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := oam_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), oam_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/oam/service_package_gen.go b/internal/service/oam/service_package_gen.go index f6a5e91ab4c..2778bea7d35 100644 --- a/internal/service/oam/service_package_gen.go +++ b/internal/service/oam/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package oam @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" oam_sdkv2 "github.com/aws/aws-sdk-go-v2/service/oam" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -77,19 +76,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*oam_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return oam_sdkv2.NewFromConfig(cfg, func(o *oam_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return oam_sdkv2.NewFromConfig(cfg, + oam_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/oam/sink_data_source_test.go b/internal/service/oam/sink_data_source_test.go index fd0dafe4826..870eaaa01e1 100644 --- a/internal/service/oam/sink_data_source_test.go +++ b/internal/service/oam/sink_data_source_test.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccObservabilityAccessManagerSinkDataSource_basic(t *testing.T) { +func testAccObservabilityAccessManagerSinkDataSource_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } diff --git a/internal/service/oam/sink_policy_test.go b/internal/service/oam/sink_policy_test.go index 87230425a45..eb4136cc23c 100644 --- a/internal/service/oam/sink_policy_test.go +++ b/internal/service/oam/sink_policy_test.go @@ -24,7 +24,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccObservabilityAccessManagerSinkPolicy_basic(t *testing.T) { +func testAccObservabilityAccessManagerSinkPolicy_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -83,7 +83,7 @@ func TestAccObservabilityAccessManagerSinkPolicy_basic(t *testing.T) { }) } -func TestAccObservabilityAccessManagerSinkPolicy_update(t *testing.T) { +func testAccObservabilityAccessManagerSinkPolicy_update(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } diff --git a/internal/service/oam/sink_test.go b/internal/service/oam/sink_test.go index 3ff84579dde..7d0ce3854bf 100644 --- a/internal/service/oam/sink_test.go +++ b/internal/service/oam/sink_test.go @@ -23,7 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccObservabilityAccessManagerSink_basic(t *testing.T) { +func testAccObservabilityAccessManagerSink_basic(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -62,7 +62,7 @@ func TestAccObservabilityAccessManagerSink_basic(t *testing.T) { }) } -func TestAccObservabilityAccessManagerSink_disappears(t *testing.T) { +func testAccObservabilityAccessManagerSink_disappears(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -94,7 +94,7 @@ func TestAccObservabilityAccessManagerSink_disappears(t *testing.T) { }) } -func TestAccObservabilityAccessManagerSink_tags(t *testing.T) { +func testAccObservabilityAccessManagerSink_tags(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { t.Skip("skipping long-running test in short mode") diff --git a/internal/service/oam/sinks_data_source_test.go b/internal/service/oam/sinks_data_source_test.go index a2f3e26a973..48fb5725652 100644 --- a/internal/service/oam/sinks_data_source_test.go +++ b/internal/service/oam/sinks_data_source_test.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -func TestAccObservabilityAccessManagerSinksDataSource_basic(t *testing.T) { +func testAccObservabilityAccessManagerSinksDataSource_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } diff --git a/internal/service/opensearch/domain_test.go b/internal/service/opensearch/domain_test.go index c02752e1ae0..a216cdda0b7 100644 --- a/internal/service/opensearch/domain_test.go +++ b/internal/service/opensearch/domain_test.go @@ -11,7 +11,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cognitoidentityprovider" "github.com/aws/aws-sdk-go/service/opensearchservice" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -1189,7 +1188,7 @@ func TestAccOpenSearchDomain_CognitoOptions_createAndRemove(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheckCognitoIdentityProvider(ctx, t) + acctest.PreCheckCognitoIdentityProvider(ctx, t) testAccPreCheckIAMServiceLinkedRole(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.OpenSearchServiceID), @@ -1233,7 +1232,7 @@ func TestAccOpenSearchDomain_CognitoOptions_update(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - testAccPreCheckCognitoIdentityProvider(ctx, t) + acctest.PreCheckCognitoIdentityProvider(ctx, t) testAccPreCheckIAMServiceLinkedRole(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.OpenSearchServiceID), @@ -2261,24 +2260,6 @@ func testAccPreCheckIAMServiceLinkedRole(ctx context.Context, t *testing.T) { acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/opensearchservice") } -func testAccPreCheckCognitoIdentityProvider(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).CognitoIDPConn(ctx) - - input := &cognitoidentityprovider.ListUserPoolsInput{ - MaxResults: aws.Int64(1), - } - - _, err := conn.ListUserPoolsWithContext(ctx, input) - - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } - - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } -} - func testAccDomainConfig_basic(rName string) string { return fmt.Sprintf(` resource "aws_opensearch_domain" "test" { diff --git a/internal/service/opensearch/service_endpoint_resolver_gen.go b/internal/service/opensearch/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..3e4e71bc0d9 --- /dev/null +++ b/internal/service/opensearch/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package opensearch + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/opensearch/service_endpoints_gen_test.go b/internal/service/opensearch/service_endpoints_gen_test.go index 1df5c4b1f01..af5f1d5543b 100644 --- a/internal/service/opensearch/service_endpoints_gen_test.go +++ b/internal/service/opensearch/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -88,7 +89,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -271,7 +272,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -292,12 +293,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(opensearchservice_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -306,17 +307,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(opensearchservice_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -325,7 +326,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -396,16 +397,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/opensearch/service_package_gen.go b/internal/service/opensearch/service_package_gen.go index 4dd8ae4b597..b4b1a8103d4 100644 --- a/internal/service/opensearch/service_package_gen.go +++ b/internal/service/opensearch/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package opensearch @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" opensearchservice_sdkv1 "github.com/aws/aws-sdk-go/service/opensearchservice" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -90,11 +89,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*o "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return opensearchservice_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/opensearchserverless/access_policy.go b/internal/service/opensearchserverless/access_policy.go index 6ad6703b567..23b66868e3c 100644 --- a/internal/service/opensearchserverless/access_policy.go +++ b/internal/service/opensearchserverless/access_policy.go @@ -157,6 +157,14 @@ func (r *resourceAccessPolicy) Read(ctx context.Context, req resource.ReadReques return } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionReading, ResNameAccessPolicy, state.ID.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) if resp.Diagnostics.HasError() { diff --git a/internal/service/opensearchserverless/collection.go b/internal/service/opensearchserverless/collection.go index ecec56b1298..44577f90261 100644 --- a/internal/service/opensearchserverless/collection.go +++ b/internal/service/opensearchserverless/collection.go @@ -214,6 +214,14 @@ func (r *resourceCollection) Read(ctx context.Context, req resource.ReadRequest, return } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionReading, ResNameCollection, state.ID.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(flex.Flatten(ctx, out, &state)...) if resp.Diagnostics.HasError() { diff --git a/internal/service/opensearchserverless/security_config.go b/internal/service/opensearchserverless/security_config.go index d9adcd5afe3..9ef87101ced 100644 --- a/internal/service/opensearchserverless/security_config.go +++ b/internal/service/opensearchserverless/security_config.go @@ -152,7 +152,7 @@ func (r *resourceSecurityConfig) Create(ctx context.Context, req resource.Create if out == nil || out.SecurityConfigDetail == nil { resp.Diagnostics.AddError( create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionCreating, ResNameSecurityConfig, plan.Name.String(), nil), - err.Error(), + "Empty response.", ) return } @@ -178,6 +178,14 @@ func (r *resourceSecurityConfig) Read(ctx context.Context, req resource.ReadRequ return } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionReading, ResNameSecurityConfig, state.ID.ValueString(), err), + err.Error(), + ) + return + } + state.refreshFromOutput(ctx, out) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } diff --git a/internal/service/opensearchserverless/security_policy.go b/internal/service/opensearchserverless/security_policy.go index 967b274a3b7..8b144cc0b11 100644 --- a/internal/service/opensearchserverless/security_policy.go +++ b/internal/service/opensearchserverless/security_policy.go @@ -149,6 +149,14 @@ func (r *resourceSecurityPolicy) Read(ctx context.Context, req resource.ReadRequ return } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionReading, ResNameSecurityPolicy, state.ID.ValueString(), err), + err.Error(), + ) + return + } + resp.Diagnostics.Append(state.refreshFromOutput(ctx, out)...) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } diff --git a/internal/service/opensearchserverless/service_endpoint_resolver_gen.go b/internal/service/opensearchserverless/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..5d8f1b501c6 --- /dev/null +++ b/internal/service/opensearchserverless/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package opensearchserverless + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + opensearchserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ opensearchserverless_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver opensearchserverless_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: opensearchserverless_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params opensearchserverless_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up opensearchserverless endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*opensearchserverless_sdkv2.Options) { + return func(o *opensearchserverless_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/opensearchserverless/service_endpoints_gen_test.go b/internal/service/opensearchserverless/service_endpoints_gen_test.go index 23e72508d77..9ebb72f97b5 100644 --- a/internal/service/opensearchserverless/service_endpoints_gen_test.go +++ b/internal/service/opensearchserverless/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := opensearchserverless_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), opensearchserverless_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := opensearchserverless_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), opensearchserverless_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/opensearchserverless/service_package_gen.go b/internal/service/opensearchserverless/service_package_gen.go index 8857ae29747..d90ff9867a4 100644 --- a/internal/service/opensearchserverless/service_package_gen.go +++ b/internal/service/opensearchserverless/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package opensearchserverless @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" opensearchserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -89,19 +88,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*opensearchserverless_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return opensearchserverless_sdkv2.NewFromConfig(cfg, func(o *opensearchserverless_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return opensearchserverless_sdkv2.NewFromConfig(cfg, + opensearchserverless_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/opensearchserverless/vpc_endpoint.go b/internal/service/opensearchserverless/vpc_endpoint.go index 2cc464ecdf4..5351898fceb 100644 --- a/internal/service/opensearchserverless/vpc_endpoint.go +++ b/internal/service/opensearchserverless/vpc_endpoint.go @@ -184,6 +184,14 @@ func (r *resourceVpcEndpoint) Read(ctx context.Context, req resource.ReadRequest return } + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.OpenSearchServerless, create.ErrActionReading, ResNameVPCEndpoint, state.ID.ValueString(), err), + err.Error(), + ) + return + } + state.refreshFromOutput(ctx, out) resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } diff --git a/internal/service/opsworks/service_endpoint_resolver_gen.go b/internal/service/opsworks/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..6f09d847eec --- /dev/null +++ b/internal/service/opsworks/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package opsworks + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/opsworks/service_endpoints_gen_test.go b/internal/service/opsworks/service_endpoints_gen_test.go index 1eed4760957..09f8ed6291a 100644 --- a/internal/service/opsworks/service_endpoints_gen_test.go +++ b/internal/service/opsworks/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(opsworks_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(opsworks_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/opsworks/service_package_gen.go b/internal/service/opsworks/service_package_gen.go index 17196336b0e..e218ee36fc1 100644 --- a/internal/service/opsworks/service_package_gen.go +++ b/internal/service/opsworks/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package opsworks @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" opsworks_sdkv1 "github.com/aws/aws-sdk-go/service/opsworks" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -163,11 +162,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*o "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return opsworks_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/opsworks/stack.go b/internal/service/opsworks/stack.go index 32544fe5337..a758336f19e 100644 --- a/internal/service/opsworks/stack.go +++ b/internal/service/opsworks/stack.go @@ -329,7 +329,7 @@ func resourceStackRead(ctx context.Context, d *schema.ResourceData, meta interfa stack, err := FindStackByID(ctx, conn, d.Id()) - if tfresource.NotFound(err) { + if tfresource.NotFound(err) { // nosemgrep:ci.semgrep.errors.notfound-without-err-checks // If it's not found in the default region we're in, we check us-east-1 // in the event this stack was created with Terraform before version 0.9. // See https://github.com/hashicorp/terraform/issues/12842. diff --git a/internal/service/organizations/service_endpoint_resolver_gen.go b/internal/service/organizations/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..6d88c9fd916 --- /dev/null +++ b/internal/service/organizations/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package organizations + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + organizations_sdkv2 "github.com/aws/aws-sdk-go-v2/service/organizations" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ organizations_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver organizations_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: organizations_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params organizations_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up organizations endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*organizations_sdkv2.Options) { + return func(o *organizations_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/organizations/service_endpoints_gen_test.go b/internal/service/organizations/service_endpoints_gen_test.go index f35ae8f69a1..3941633f604 100644 --- a/internal/service/organizations/service_endpoints_gen_test.go +++ b/internal/service/organizations/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := organizations_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), organizations_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := organizations_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), organizations_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/organizations/service_package.go b/internal/service/organizations/service_package.go index b6a95ab8a27..62b07ffb4dc 100644 --- a/internal/service/organizations/service_package.go +++ b/internal/service/organizations/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/organizations" awstypes "github.com/aws/aws-sdk-go-v2/service/organizations/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,24 +19,16 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*organizations.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return organizations.NewFromConfig(cfg, func(o *organizations.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*awstypes.ConcurrentModificationException](err, "Try again later") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return organizations.NewFromConfig(cfg, + organizations.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *organizations.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.ConcurrentModificationException](err, "Try again later") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/organizations/service_package_gen.go b/internal/service/organizations/service_package_gen.go index 01c8fccdb9a..5aba3d9eedf 100644 --- a/internal/service/organizations/service_package_gen.go +++ b/internal/service/organizations/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package organizations diff --git a/internal/service/osis/service_endpoint_resolver_gen.go b/internal/service/osis/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..5f57e01330e --- /dev/null +++ b/internal/service/osis/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package osis + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + osis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/osis" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ osis_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver osis_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: osis_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params osis_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up osis endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*osis_sdkv2.Options) { + return func(o *osis_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/osis/service_endpoints_gen_test.go b/internal/service/osis/service_endpoints_gen_test.go index 548247185db..940518484c6 100644 --- a/internal/service/osis/service_endpoints_gen_test.go +++ b/internal/service/osis/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := osis_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), osis_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := osis_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), osis_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/osis/service_package_gen.go b/internal/service/osis/service_package_gen.go index 7b183a2c152..c290f3ba01a 100644 --- a/internal/service/osis/service_package_gen.go +++ b/internal/service/osis/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package osis @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" osis_sdkv2 "github.com/aws/aws-sdk-go-v2/service/osis" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -47,19 +46,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*osis_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return osis_sdkv2.NewFromConfig(cfg, func(o *osis_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return osis_sdkv2.NewFromConfig(cfg, + osis_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/outposts/service_endpoint_resolver_gen.go b/internal/service/outposts/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..e2e9cbe7d39 --- /dev/null +++ b/internal/service/outposts/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package outposts + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/outposts/service_endpoints_gen_test.go b/internal/service/outposts/service_endpoints_gen_test.go index a5675a6f43a..4da82a857ff 100644 --- a/internal/service/outposts/service_endpoints_gen_test.go +++ b/internal/service/outposts/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(outposts_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(outposts_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/outposts/service_package_gen.go b/internal/service/outposts/service_package_gen.go index 43230cdddb3..41b43754f56 100644 --- a/internal/service/outposts/service_package_gen.go +++ b/internal/service/outposts/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package outposts @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" outposts_sdkv1 "github.com/aws/aws-sdk-go/service/outposts" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -81,11 +80,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*o "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return outposts_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/paymentcryptography/service_endpoint_resolver_gen.go b/internal/service/paymentcryptography/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..fc57ac8a514 --- /dev/null +++ b/internal/service/paymentcryptography/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package paymentcryptography + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + paymentcryptography_sdkv2 "github.com/aws/aws-sdk-go-v2/service/paymentcryptography" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ paymentcryptography_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver paymentcryptography_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: paymentcryptography_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params paymentcryptography_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up paymentcryptography endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*paymentcryptography_sdkv2.Options) { + return func(o *paymentcryptography_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/paymentcryptography/service_package_gen.go b/internal/service/paymentcryptography/service_package_gen.go index 12aaf10726d..ef747eda518 100644 --- a/internal/service/paymentcryptography/service_package_gen.go +++ b/internal/service/paymentcryptography/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package paymentcryptography @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" paymentcryptography_sdkv2 "github.com/aws/aws-sdk-go-v2/service/paymentcryptography" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -51,19 +50,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*paymentcryptography_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return paymentcryptography_sdkv2.NewFromConfig(cfg, func(o *paymentcryptography_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return paymentcryptography_sdkv2.NewFromConfig(cfg, + paymentcryptography_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/pcaconnectorad/service_endpoint_resolver_gen.go b/internal/service/pcaconnectorad/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..dc488d05723 --- /dev/null +++ b/internal/service/pcaconnectorad/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package pcaconnectorad + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + pcaconnectorad_sdkv2 "github.com/aws/aws-sdk-go-v2/service/pcaconnectorad" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ pcaconnectorad_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver pcaconnectorad_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: pcaconnectorad_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params pcaconnectorad_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up pcaconnectorad endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*pcaconnectorad_sdkv2.Options) { + return func(o *pcaconnectorad_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/pcaconnectorad/service_endpoints_gen_test.go b/internal/service/pcaconnectorad/service_endpoints_gen_test.go index b767a7e618d..fd53a886ea9 100644 --- a/internal/service/pcaconnectorad/service_endpoints_gen_test.go +++ b/internal/service/pcaconnectorad/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := pcaconnectorad_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), pcaconnectorad_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := pcaconnectorad_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), pcaconnectorad_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/pcaconnectorad/service_package_gen.go b/internal/service/pcaconnectorad/service_package_gen.go index 8b9d067a6b8..2723b987d99 100644 --- a/internal/service/pcaconnectorad/service_package_gen.go +++ b/internal/service/pcaconnectorad/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package pcaconnectorad @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" pcaconnectorad_sdkv2 "github.com/aws/aws-sdk-go-v2/service/pcaconnectorad" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*pcaconnectorad_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return pcaconnectorad_sdkv2.NewFromConfig(cfg, func(o *pcaconnectorad_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return pcaconnectorad_sdkv2.NewFromConfig(cfg, + pcaconnectorad_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/pinpoint/app.go b/internal/service/pinpoint/app.go index 944572ad808..b354a97cdd3 100644 --- a/internal/service/pinpoint/app.go +++ b/internal/service/pinpoint/app.go @@ -313,7 +313,7 @@ func findAppSettingsByID(ctx context.Context, conn *pinpoint.Pinpoint, id string } func expandCampaignHook(configs []interface{}) *pinpoint.CampaignHook { - if len(configs) == 0 { + if len(configs) == 0 || configs[0] == nil { return nil } @@ -351,7 +351,7 @@ func flattenCampaignHook(ch *pinpoint.CampaignHook) []interface{} { } func expandCampaignLimits(configs []interface{}) *pinpoint.CampaignLimits { - if len(configs) == 0 { + if len(configs) == 0 || configs[0] == nil { return nil } @@ -394,7 +394,7 @@ func flattenCampaignLimits(cl *pinpoint.CampaignLimits) []interface{} { } func expandQuietTime(configs []interface{}) *pinpoint.QuietTime { - if len(configs) == 0 { + if len(configs) == 0 || configs[0] == nil { return nil } diff --git a/internal/service/pinpoint/app_test.go b/internal/service/pinpoint/app_test.go index 07986bb497c..0077b962b4e 100644 --- a/internal/service/pinpoint/app_test.go +++ b/internal/service/pinpoint/app_test.go @@ -12,7 +12,10 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/statecheck" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfpinpoint "github.com/hashicorp/terraform-provider-aws/internal/service/pinpoint" @@ -38,13 +41,34 @@ func TestAccPinpointApp_basic(t *testing.T) { testAccCheckAppExists(ctx, resourceName, &application), resource.TestCheckResourceAttrSet(resourceName, names.AttrApplicationID), resource.TestCheckResourceAttrSet(resourceName, names.AttrARN), - resource.TestCheckResourceAttr(resourceName, "campaign_hook.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "limits.#", acctest.Ct1), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, ""), - resource.TestCheckResourceAttr(resourceName, "quiet_time.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, acctest.CtTagsPercent, acctest.Ct0), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("campaign_hook"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "lambda_function_name": knownvalue.StringExact(""), + names.AttrMode: knownvalue.StringExact(""), + "web_url": knownvalue.StringExact(""), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("limits"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "daily": knownvalue.Int64Exact(0), + "maximum_duration": knownvalue.Int64Exact(0), + "messages_per_second": knownvalue.Int64Exact(0), + "total": knownvalue.Int64Exact(0), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("quiet_time"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "end": knownvalue.StringExact(""), + "start": knownvalue.StringExact(""), + }), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, }, { ResourceName: resourceName, @@ -187,9 +211,53 @@ func TestAccPinpointApp_campaignHookLambda(t *testing.T) { Config: testAccAppConfig_campaignHookLambda(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAppExists(ctx, resourceName, &application), - resource.TestCheckResourceAttr(resourceName, "campaign_hook.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "campaign_hook.0.mode", "DELIVERY"), + resource.TestCheckResourceAttrPair(resourceName, "campaign_hook.0.lambda_function_name", "aws_lambda_function.test", names.AttrARN), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("campaign_hook"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "lambda_function_name": knownvalue.NotNull(), // Should be a Pair function, waiting on https://github.com/hashicorp/terraform-plugin-testing/pull/330 + names.AttrMode: knownvalue.StringExact(pinpoint.ModeDelivery), + "web_url": knownvalue.StringExact(""), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPinpointApp_campaignHookEmpty(t *testing.T) { + ctx := acctest.Context(t) + var application pinpoint.ApplicationResponse + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pinpoint_app.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckApp(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.PinpointServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAppDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAppConfig_campaignHookEmpty(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppExists(ctx, resourceName, &application), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("campaign_hook"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "lambda_function_name": knownvalue.StringExact(""), + names.AttrMode: knownvalue.StringExact(""), + "web_url": knownvalue.StringExact(""), + }), + })), + }, }, { ResourceName: resourceName, @@ -216,9 +284,17 @@ func TestAccPinpointApp_limits(t *testing.T) { Config: testAccAppConfig_limits(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAppExists(ctx, resourceName, &application), - resource.TestCheckResourceAttr(resourceName, "limits.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "limits.0.total", "100"), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("limits"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "daily": knownvalue.Int64Exact(3), + "maximum_duration": knownvalue.Int64Exact(600), + "messages_per_second": knownvalue.Int64Exact(50), + "total": knownvalue.Int64Exact(100), + }), + })), + }, }, { ResourceName: resourceName, @@ -229,7 +305,7 @@ func TestAccPinpointApp_limits(t *testing.T) { }) } -func TestAccPinpointApp_quietTime(t *testing.T) { +func TestAccPinpointApp_limitsEmpty(t *testing.T) { ctx := acctest.Context(t) var application pinpoint.ApplicationResponse rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -242,12 +318,20 @@ func TestAccPinpointApp_quietTime(t *testing.T) { CheckDestroy: testAccCheckAppDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccAppConfig_quietTime(rName), + Config: testAccAppConfig_limitsEmpty(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAppExists(ctx, resourceName, &application), - resource.TestCheckResourceAttr(resourceName, "quiet_time.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "quiet_time.0.start", "00:00"), ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("limits"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "daily": knownvalue.Int64Exact(0), + "maximum_duration": knownvalue.Int64Exact(0), + "messages_per_second": knownvalue.Int64Exact(0), + "total": knownvalue.Int64Exact(0), + }), + })), + }, }, { ResourceName: resourceName, @@ -258,20 +342,79 @@ func TestAccPinpointApp_quietTime(t *testing.T) { }) } -func testAccPreCheckApp(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).PinpointConn(ctx) +func TestAccPinpointApp_quietTime(t *testing.T) { + ctx := acctest.Context(t) + var application pinpoint.ApplicationResponse + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pinpoint_app.test" - input := &pinpoint.GetAppsInput{} + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckApp(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.PinpointServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAppDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAppConfig_quietTime(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppExists(ctx, resourceName, &application), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("quiet_time"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "end": knownvalue.StringExact("03:00"), + "start": knownvalue.StringExact("00:00"), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} - _, err := conn.GetAppsWithContext(ctx, input) +func TestAccPinpointApp_quietTimeEmpty(t *testing.T) { + ctx := acctest.Context(t) + var application pinpoint.ApplicationResponse + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_pinpoint_app.test" - if acctest.PreCheckSkipError(err) { - t.Skipf("skipping acceptance testing: %s", err) - } + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckApp(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.PinpointServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckAppDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccAppConfig_quietTimeEmpty(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAppExists(ctx, resourceName, &application), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New("quiet_time"), knownvalue.ListExact([]knownvalue.Check{ + knownvalue.ObjectExact(map[string]knownvalue.Check{ + "end": knownvalue.StringExact(""), + "start": knownvalue.StringExact(""), + }), + })), + }, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} - if err != nil { - t.Fatalf("unexpected PreCheck error: %s", err) - } +func testAccPreCheckApp(ctx context.Context, t *testing.T) { + t.Helper() + acctest.PreCheckPinpointApp(ctx, t) } func testAccCheckAppDestroy(ctx context.Context) resource.TestCheckFunc { @@ -426,6 +569,16 @@ resource "aws_lambda_permission" "test" { `, rName) } +func testAccAppConfig_campaignHookEmpty(rName string) string { + return fmt.Sprintf(` +resource "aws_pinpoint_app" "test" { + name = %[1]q + + campaign_hook {} +} +`, rName) +} + func testAccAppConfig_limits(rName string) string { return fmt.Sprintf(` resource "aws_pinpoint_app" "test" { @@ -441,6 +594,16 @@ resource "aws_pinpoint_app" "test" { `, rName) } +func testAccAppConfig_limitsEmpty(rName string) string { + return fmt.Sprintf(` +resource "aws_pinpoint_app" "test" { + name = %[1]q + + limits {} +} +`, rName) +} + func testAccAppConfig_quietTime(rName string) string { return fmt.Sprintf(` resource "aws_pinpoint_app" "test" { @@ -453,3 +616,13 @@ resource "aws_pinpoint_app" "test" { } `, rName) } + +func testAccAppConfig_quietTimeEmpty(rName string) string { + return fmt.Sprintf(` +resource "aws_pinpoint_app" "test" { + name = %[1]q + + quiet_time {} +} +`, rName) +} diff --git a/internal/service/pinpoint/service_endpoint_resolver_gen.go b/internal/service/pinpoint/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..59d160d67cb --- /dev/null +++ b/internal/service/pinpoint/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package pinpoint + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/pinpoint/service_endpoints_gen_test.go b/internal/service/pinpoint/service_endpoints_gen_test.go index d777caf603f..5bf7bed558c 100644 --- a/internal/service/pinpoint/service_endpoints_gen_test.go +++ b/internal/service/pinpoint/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(pinpoint_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(pinpoint_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/pinpoint/service_package_gen.go b/internal/service/pinpoint/service_package_gen.go index 7d2b3d4b77f..08d563cff4a 100644 --- a/internal/service/pinpoint/service_package_gen.go +++ b/internal/service/pinpoint/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package pinpoint @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" pinpoint_sdkv1 "github.com/aws/aws-sdk-go/service/pinpoint" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -97,11 +96,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*p "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return pinpoint_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/pipes/service_endpoint_resolver_gen.go b/internal/service/pipes/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..81c0d5f49b1 --- /dev/null +++ b/internal/service/pipes/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package pipes + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + pipes_sdkv2 "github.com/aws/aws-sdk-go-v2/service/pipes" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ pipes_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver pipes_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: pipes_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params pipes_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up pipes endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*pipes_sdkv2.Options) { + return func(o *pipes_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/pipes/service_endpoints_gen_test.go b/internal/service/pipes/service_endpoints_gen_test.go index d8d4b91752f..6996fb58bee 100644 --- a/internal/service/pipes/service_endpoints_gen_test.go +++ b/internal/service/pipes/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := pipes_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), pipes_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := pipes_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), pipes_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/pipes/service_package_gen.go b/internal/service/pipes/service_package_gen.go index f071123258e..c268b1f80d5 100644 --- a/internal/service/pipes/service_package_gen.go +++ b/internal/service/pipes/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package pipes @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" pipes_sdkv2 "github.com/aws/aws-sdk-go-v2/service/pipes" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*pipes_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return pipes_sdkv2.NewFromConfig(cfg, func(o *pipes_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return pipes_sdkv2.NewFromConfig(cfg, + pipes_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/polly/service_endpoint_resolver_gen.go b/internal/service/polly/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..cd3672f6520 --- /dev/null +++ b/internal/service/polly/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package polly + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + polly_sdkv2 "github.com/aws/aws-sdk-go-v2/service/polly" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ polly_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver polly_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: polly_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params polly_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up polly endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*polly_sdkv2.Options) { + return func(o *polly_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/polly/service_endpoints_gen_test.go b/internal/service/polly/service_endpoints_gen_test.go index 366f5bb424e..1b12cee4ca5 100644 --- a/internal/service/polly/service_endpoints_gen_test.go +++ b/internal/service/polly/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := polly_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), polly_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := polly_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), polly_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/polly/service_package_gen.go b/internal/service/polly/service_package_gen.go index 019d6190e78..e3fa6f7dd0c 100644 --- a/internal/service/polly/service_package_gen.go +++ b/internal/service/polly/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package polly @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" polly_sdkv2 "github.com/aws/aws-sdk-go-v2/service/polly" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -44,19 +43,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*polly_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return polly_sdkv2.NewFromConfig(cfg, func(o *polly_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return polly_sdkv2.NewFromConfig(cfg, + polly_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/pricing/service_endpoint_resolver_gen.go b/internal/service/pricing/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..15b32059979 --- /dev/null +++ b/internal/service/pricing/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package pricing + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + pricing_sdkv2 "github.com/aws/aws-sdk-go-v2/service/pricing" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ pricing_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver pricing_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: pricing_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params pricing_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up pricing endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*pricing_sdkv2.Options) { + return func(o *pricing_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/pricing/service_endpoints_gen_test.go b/internal/service/pricing/service_endpoints_gen_test.go index 24b6be6fb89..00c87d52019 100644 --- a/internal/service/pricing/service_endpoints_gen_test.go +++ b/internal/service/pricing/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := pricing_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), pricing_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := pricing_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), pricing_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/pricing/service_package_gen.go b/internal/service/pricing/service_package_gen.go index 62d1e64ba29..2fed2364644 100644 --- a/internal/service/pricing/service_package_gen.go +++ b/internal/service/pricing/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package pricing @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" pricing_sdkv2 "github.com/aws/aws-sdk-go-v2/service/pricing" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -44,19 +43,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*pricing_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return pricing_sdkv2.NewFromConfig(cfg, func(o *pricing_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return pricing_sdkv2.NewFromConfig(cfg, + pricing_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/qbusiness/service_endpoint_resolver_gen.go b/internal/service/qbusiness/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..1d24494ea9a --- /dev/null +++ b/internal/service/qbusiness/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package qbusiness + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + qbusiness_sdkv2 "github.com/aws/aws-sdk-go-v2/service/qbusiness" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ qbusiness_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver qbusiness_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: qbusiness_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params qbusiness_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up qbusiness endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*qbusiness_sdkv2.Options) { + return func(o *qbusiness_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/qbusiness/service_endpoints_gen_test.go b/internal/service/qbusiness/service_endpoints_gen_test.go index e7d0fb408e7..dd995ba0375 100644 --- a/internal/service/qbusiness/service_endpoints_gen_test.go +++ b/internal/service/qbusiness/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := qbusiness_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), qbusiness_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := qbusiness_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), qbusiness_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/qbusiness/service_package_gen.go b/internal/service/qbusiness/service_package_gen.go index 60138df50c3..b02a4b2b74e 100644 --- a/internal/service/qbusiness/service_package_gen.go +++ b/internal/service/qbusiness/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package qbusiness @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" qbusiness_sdkv2 "github.com/aws/aws-sdk-go-v2/service/qbusiness" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*qbusiness_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return qbusiness_sdkv2.NewFromConfig(cfg, func(o *qbusiness_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return qbusiness_sdkv2.NewFromConfig(cfg, + qbusiness_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/qldb/service_endpoint_resolver_gen.go b/internal/service/qldb/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..1db7b08627a --- /dev/null +++ b/internal/service/qldb/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package qldb + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + qldb_sdkv2 "github.com/aws/aws-sdk-go-v2/service/qldb" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ qldb_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver qldb_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: qldb_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params qldb_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up qldb endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*qldb_sdkv2.Options) { + return func(o *qldb_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/qldb/service_endpoints_gen_test.go b/internal/service/qldb/service_endpoints_gen_test.go index 563115948c2..c3ed46c410c 100644 --- a/internal/service/qldb/service_endpoints_gen_test.go +++ b/internal/service/qldb/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := qldb_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), qldb_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := qldb_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), qldb_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/qldb/service_package_gen.go b/internal/service/qldb/service_package_gen.go index 4dc5155ea09..2af8b39f05e 100644 --- a/internal/service/qldb/service_package_gen.go +++ b/internal/service/qldb/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package qldb @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" qldb_sdkv2 "github.com/aws/aws-sdk-go-v2/service/qldb" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -61,19 +60,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*qldb_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return qldb_sdkv2.NewFromConfig(cfg, func(o *qldb_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return qldb_sdkv2.NewFromConfig(cfg, + qldb_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/quicksight/account_subscription.go b/internal/service/quicksight/account_subscription.go index 8dee6e088dd..3b64c93c645 100644 --- a/internal/service/quicksight/account_subscription.go +++ b/internal/service/quicksight/account_subscription.go @@ -107,6 +107,11 @@ func ResourceAccountSubscription() *schema.Resource { Optional: true, ForceNew: true, }, + "iam_identity_center_instance_arn": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "last_name": { Type: schema.TypeString, Optional: true, @@ -187,6 +192,10 @@ func resourceAccountSubscriptionCreate(ctx context.Context, d *schema.ResourceDa in.FirstName = aws.String(v.(string)) } + if v, ok := d.GetOk("iam_identity_center_instance_arn"); ok { + in.IAMIdentityCenterInstanceArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("last_name"); ok { in.LastName = aws.String(v.(string)) } @@ -239,6 +248,7 @@ func resourceAccountSubscriptionRead(ctx context.Context, d *schema.ResourceData d.Set("edition", out.Edition) d.Set("notification_email", out.NotificationEmail) d.Set("account_subscription_status", out.AccountSubscriptionStatus) + d.Set("iam_identity_center_instance_arn", out.IAMIdentityCenterInstanceArn) return diags } diff --git a/internal/service/quicksight/service_endpoint_resolver_gen.go b/internal/service/quicksight/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..aa39a967cdd --- /dev/null +++ b/internal/service/quicksight/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package quicksight + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/quicksight/service_endpoints_gen_test.go b/internal/service/quicksight/service_endpoints_gen_test.go index 3f1e3a462ca..c0468f9f4be 100644 --- a/internal/service/quicksight/service_endpoints_gen_test.go +++ b/internal/service/quicksight/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(quicksight_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(quicksight_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -325,16 +326,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/quicksight/service_package_gen.go b/internal/service/quicksight/service_package_gen.go index c4823c3a621..923d0e41fa6 100644 --- a/internal/service/quicksight/service_package_gen.go +++ b/internal/service/quicksight/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package quicksight @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" quicksight_sdkv1 "github.com/aws/aws-sdk-go/service/quicksight" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -181,11 +180,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*q "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return quicksight_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/ram/principal_association_test.go b/internal/service/ram/principal_association_test.go index b642ac38495..440c3d8c87f 100644 --- a/internal/service/ram/principal_association_test.go +++ b/internal/service/ram/principal_association_test.go @@ -138,6 +138,10 @@ func testAccPreCheckSharingWithOrganizationEnabled(ctx context.Context, t *testi if tfresource.NotFound(err) { t.Skipf("Sharing with AWS Organization not found, skipping acceptance test: %s", err) } + + if err != nil { + t.Fatalf("unexpected PreCheck error: %s", err) + } } func testAccCheckPrincipalAssociationExists(ctx context.Context, n string, v *awstypes.ResourceShareAssociation) resource.TestCheckFunc { diff --git a/internal/service/ram/service_endpoint_resolver_gen.go b/internal/service/ram/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..850db742de4 --- /dev/null +++ b/internal/service/ram/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ram + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ram_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ram" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ ram_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver ram_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: ram_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params ram_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up ram endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*ram_sdkv2.Options) { + return func(o *ram_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ram/service_endpoints_gen_test.go b/internal/service/ram/service_endpoints_gen_test.go index c18e23add22..562edb8339c 100644 --- a/internal/service/ram/service_endpoints_gen_test.go +++ b/internal/service/ram/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := ram_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ram_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := ram_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ram_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ram/service_package_gen.go b/internal/service/ram/service_package_gen.go index 6f0bf06dd97..dc830590c4c 100644 --- a/internal/service/ram/service_package_gen.go +++ b/internal/service/ram/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ram @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" ram_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ram" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -75,19 +74,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*ram_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return ram_sdkv2.NewFromConfig(cfg, func(o *ram_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return ram_sdkv2.NewFromConfig(cfg, + ram_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/rbin/service_endpoint_resolver_gen.go b/internal/service/rbin/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..c2030262e0e --- /dev/null +++ b/internal/service/rbin/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package rbin + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + rbin_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rbin" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ rbin_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver rbin_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: rbin_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params rbin_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up rbin endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*rbin_sdkv2.Options) { + return func(o *rbin_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/rbin/service_endpoints_gen_test.go b/internal/service/rbin/service_endpoints_gen_test.go index 28c5ca0b63d..c8dcffc3425 100644 --- a/internal/service/rbin/service_endpoints_gen_test.go +++ b/internal/service/rbin/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -92,7 +94,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -275,7 +277,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -296,24 +298,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := rbin_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), rbin_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := rbin_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), rbin_sdkv2.EndpointParameters{ @@ -321,14 +323,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -411,16 +413,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/rbin/service_package_gen.go b/internal/service/rbin/service_package_gen.go index 7b80b7e1a00..063072945e6 100644 --- a/internal/service/rbin/service_package_gen.go +++ b/internal/service/rbin/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package rbin @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" rbin_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rbin" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -48,19 +47,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*rbin_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return rbin_sdkv2.NewFromConfig(cfg, func(o *rbin_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return rbin_sdkv2.NewFromConfig(cfg, + rbin_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/rds/certificate.go b/internal/service/rds/certificate.go new file mode 100644 index 00000000000..2b44984e256 --- /dev/null +++ b/internal/service/rds/certificate.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rds + +import ( + "context" + "log" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rds" + "github.com/aws/aws-sdk-go-v2/service/rds/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +// @SDKResource("aws_rds_certificate", name="Default Certificate") +func resourceCertificate() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceCertificatePut, + ReadWithoutTimeout: resourceCertificateRead, + UpdateWithoutTimeout: resourceCertificatePut, + DeleteWithoutTimeout: resourceCertificateDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Schema: map[string]*schema.Schema{ + "certificate_identifier": { + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceCertificatePut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).RDSClient(ctx) + + certificateID := d.Get("certificate_identifier").(string) + input := &rds.ModifyCertificatesInput{ + CertificateIdentifier: aws.String(certificateID), + } + + _, err := conn.ModifyCertificates(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "setting RDS Default Certificate (%s): %s", certificateID, err) + } + + if d.IsNewResource() { + d.SetId(meta.(*conns.AWSClient).Region) + } + + return append(diags, resourceCertificateRead(ctx, d, meta)...) +} + +func resourceCertificateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).RDSClient(ctx) + + output, err := findDefaultCertificate(ctx, conn) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] RDS Default Certificate (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading RDS Default Certificate (%s): %s", d.Id(), err) + } + + d.Set("certificate_identifier", output.CertificateIdentifier) + + return diags +} + +func resourceCertificateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).RDSClient(ctx) + + log.Printf("[DEBUG] Deleting RDS Default Certificate: %s", d.Id()) + _, err := conn.ModifyCertificates(ctx, &rds.ModifyCertificatesInput{ + RemoveCustomerOverride: aws.Bool(true), + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "removing RDS Default Certificate (%s): %s", d.Id(), err) + } + + return diags +} + +func findCertificate(ctx context.Context, conn *rds.Client, input *rds.DescribeCertificatesInput, filter tfslices.Predicate[*types.Certificate]) (*types.Certificate, error) { + output, err := findCertificates(ctx, conn, input, filter) + + if err != nil { + return nil, err + } + + return tfresource.AssertSingleValueResult(output) +} + +func findCertificates(ctx context.Context, conn *rds.Client, input *rds.DescribeCertificatesInput, filter tfslices.Predicate[*types.Certificate]) ([]types.Certificate, error) { + var output []types.Certificate + + pages := rds.NewDescribeCertificatesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*types.CertificateNotFoundFault](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Certificates { + if filter(&v) { + output = append(output, v) + } + } + } + + return output, nil +} + +func findDefaultCertificate(ctx context.Context, conn *rds.Client) (*types.Certificate, error) { + input := &rds.DescribeCertificatesInput{} + + return findCertificate(ctx, conn, input, func(v *types.Certificate) bool { + return aws.ToBool(v.CustomerOverride) + }) +} diff --git a/internal/service/rds/certificate_test.go b/internal/service/rds/certificate_test.go new file mode 100644 index 00000000000..bc30d2b99df --- /dev/null +++ b/internal/service/rds/certificate_test.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rds_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/rds/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tfrds "github.com/hashicorp/terraform-provider-aws/internal/service/rds" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccRDSCertificate_serial(t *testing.T) { + t.Parallel() + + testCases := map[string]func(t *testing.T){ + acctest.CtBasic: testAccCertificate_basic, + acctest.CtDisappears: testAccCertificate_disappears, + } + + acctest.RunSerialTests1Level(t, testCases, 0) +} + +func testAccCertificate_basic(t *testing.T) { + ctx := acctest.Context(t) + var v types.Certificate + resourceName := "aws_rds_certificate.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCertificateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCertificateConfig_basic("rds-ca-rsa4096-g1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "certificate_identifier", "rds-ca-rsa4096-g1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCertificateConfig_basic("rds-ca-ecc384-g1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "certificate_identifier", "rds-ca-ecc384-g1"), + ), + }, + }, + }) +} + +func testAccCertificate_disappears(t *testing.T) { + ctx := acctest.Context(t) + var v types.Certificate + resourceName := "aws_rds_certificate.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckCertificateDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccCertificateConfig_basic("rds-ca-rsa4096-g1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckCertificateExists(ctx, resourceName, &v), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfrds.ResourceCertificate(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckCertificateDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_rds_certificate" { + continue + } + + _, err := tfrds.FindDefaultCertificate(ctx, conn) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("RDS Default Certificate %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckCertificateExists(ctx context.Context, n string, v *types.Certificate) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).RDSClient(ctx) + + output, err := tfrds.FindDefaultCertificate(ctx, conn) + + if err != nil { + return err + } + + *v = *output + + return nil + } +} + +func testAccCertificateConfig_basic(certificateID string) string { + return fmt.Sprintf(` +resource "aws_rds_certificate" "test" { + certificate_identifier = %[1]q +} +`, certificateID) +} diff --git a/internal/service/rds/cluster.go b/internal/service/rds/cluster.go index a7bc7b55197..2a2ad3bc96b 100644 --- a/internal/service/rds/cluster.go +++ b/internal/service/rds/cluster.go @@ -113,6 +113,14 @@ func ResourceCluster() *schema.Resource { ValidateFunc: validIdentifier, ConflictsWith: []string{"cluster_identifier_prefix"}, }, + "ca_certificate_identifier": { + Type: schema.TypeString, + Optional: true, + }, + "ca_certificate_valid_till": { + Type: schema.TypeString, + Computed: true, + }, "cluster_identifier_prefix": { Type: schema.TypeString, Optional: true, @@ -220,12 +228,18 @@ func ResourceCluster() *schema.Resource { validation.StringInSlice(ClusterEngine_Values(), false), ), }, + "engine_lifecycle_support": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(engineLifecycleSupport_Values(), false), + }, "engine_mode": { Type: schema.TypeString, Optional: true, ForceNew: true, - Default: EngineModeProvisioned, - ValidateFunc: validation.StringInSlice(EngineMode_Values(), false), + Default: engineModeProvisioned, + ValidateFunc: validation.StringInSlice(engineMode_Values(), false), }, names.AttrEngineVersion: { Type: schema.TypeString, @@ -640,6 +654,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int input.EnableCloudwatchLogsExports = flex.ExpandStringSet(v.(*schema.Set)) } + if v, ok := d.GetOk("engine_lifecycle_support"); ok { + input.EngineLifecycleSupport = aws.String(v.(string)) + } + if v, ok := d.GetOk(names.AttrEngineVersion); ok { input.EngineVersion = aws.String(v.(string)) } @@ -766,6 +784,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int input.EnableCloudwatchLogsExports = flex.ExpandStringSet(v.(*schema.Set)) } + if v, ok := d.GetOk("engine_lifecycle_support"); ok { + input.EngineLifecycleSupport = aws.String(v.(string)) + } + if v, ok := d.GetOk(names.AttrEngineVersion); ok { input.EngineVersion = aws.String(v.(string)) } @@ -984,6 +1006,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int input.BackupRetentionPeriod = aws.Int64(int64(v.(int))) } + if v := d.Get("ca_certificate_identifier"); v.(string) != "" { + input.CACertificateIdentifier = aws.String(v.(string)) + } + if v := d.Get(names.AttrDatabaseName); v.(string) != "" { input.DatabaseName = aws.String(v.(string)) } @@ -1028,6 +1054,10 @@ func resourceClusterCreate(ctx context.Context, d *schema.ResourceData, meta int input.EnableCloudwatchLogsExports = flex.ExpandStringSet(v.(*schema.Set)) } + if v, ok := d.GetOk("engine_lifecycle_support"); ok { + input.EngineLifecycleSupport = aws.String(v.(string)) + } + if v, ok := d.GetOk(names.AttrEngineVersion); ok { input.EngineVersion = aws.String(v.(string)) } @@ -1173,6 +1203,10 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set(names.AttrAvailabilityZones, aws.StringValueSlice(dbc.AvailabilityZones)) d.Set("backtrack_window", dbc.BacktrackWindow) d.Set("backup_retention_period", dbc.BackupRetentionPeriod) + if dbc.CertificateDetails != nil { + d.Set("ca_certificate_identifier", dbc.CertificateDetails.CAIdentifier) + d.Set("ca_certificate_valid_till", dbc.CertificateDetails.ValidTill.Format(time.RFC3339)) + } d.Set(names.AttrClusterIdentifier, dbc.DBClusterIdentifier) d.Set("cluster_identifier_prefix", create.NamePrefixFromName(aws.StringValue(dbc.DBClusterIdentifier))) var clusterMembers []string @@ -1206,6 +1240,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set("enable_http_endpoint", dbc.HttpEndpointEnabled) d.Set(names.AttrEndpoint, dbc.Endpoint) d.Set(names.AttrEngine, dbc.Engine) + d.Set("engine_lifecycle_support", dbc.EngineLifecycleSupport) d.Set("engine_mode", dbc.EngineMode) clusterSetResourceDataEngineVersionFromCluster(d, dbc) d.Set(names.AttrHostedZoneID, dbc.HostedZoneId) @@ -1268,7 +1303,7 @@ func resourceClusterRead(ctx context.Context, d *schema.ResourceData, meta inter // Fetch and save Global Cluster if engine mode global d.Set("global_cluster_identifier", "") - if aws.StringValue(dbc.EngineMode) == EngineModeGlobal || aws.StringValue(dbc.EngineMode) == EngineModeProvisioned { + if aws.StringValue(dbc.EngineMode) == engineModeGlobal || aws.StringValue(dbc.EngineMode) == engineModeProvisioned { globalCluster, err := FindGlobalClusterByDBClusterARN(ctx, conn, aws.StringValue(dbc.DBClusterArn)) if err == nil { @@ -1319,6 +1354,10 @@ func resourceClusterUpdate(ctx context.Context, d *schema.ResourceData, meta int input.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) } + if d.HasChange("ca_certificate_identifier") { + input.CACertificateIdentifier = aws.String(d.Get("ca_certificate_identifier").(string)) + } + if d.HasChange("copy_tags_to_snapshot") { input.CopyTagsToSnapshot = aws.Bool(d.Get("copy_tags_to_snapshot").(bool)) } diff --git a/internal/service/rds/cluster_migrate.go b/internal/service/rds/cluster_migrate.go index 89ecf9a30e7..66c8c7c24ad 100644 --- a/internal/service/rds/cluster_migrate.go +++ b/internal/service/rds/cluster_migrate.go @@ -143,7 +143,7 @@ func resourceClusterResourceV0() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - Default: EngineModeProvisioned, + Default: engineModeProvisioned, }, names.AttrEngineVersion: { Type: schema.TypeString, diff --git a/internal/service/rds/cluster_test.go b/internal/service/rds/cluster_test.go index 506b76dc2c7..8087b8a694b 100644 --- a/internal/service/rds/cluster_test.go +++ b/internal/service/rds/cluster_test.go @@ -77,6 +77,8 @@ func TestAccRDSCluster_basic(t *testing.T) { testAccCheckClusterExists(ctx, resourceName, &dbCluster), acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "rds", fmt.Sprintf("cluster:%s", rName)), resource.TestCheckResourceAttr(resourceName, "backtrack_window", acctest.Ct0), + resource.TestCheckNoResourceAttr(resourceName, "ca_certificate_identifier"), + resource.TestCheckNoResourceAttr(resourceName, "ca_certificate_valid_till"), resource.TestCheckResourceAttr(resourceName, names.AttrClusterIdentifier, rName), resource.TestCheckResourceAttr(resourceName, "cluster_identifier_prefix", ""), resource.TestCheckResourceAttrSet(resourceName, "cluster_resource_id"), @@ -481,6 +483,30 @@ func TestAccRDSCluster_availabilityZones(t *testing.T) { }) } +func TestAccRDSCluster_availabilityZones_caCertificateIdentifier(t *testing.T) { + ctx := acctest.Context(t) + var dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_availabilityZones_caCertificateIdentifier(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + resource.TestCheckResourceAttrSet(resourceName, "ca_certificate_identifier"), + resource.TestCheckResourceAttrSet(resourceName, "ca_certificate_valid_till"), + ), + }, + }, + }) +} + func TestAccRDSCluster_storageTypeIo1(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -1361,6 +1387,7 @@ func TestAccRDSCluster_engineVersion(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName, &dbCluster), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, tfrds.ClusterEngineAuroraPostgreSQL), + resource.TestCheckResourceAttr(resourceName, "engine_lifecycle_support", "open-source-rds-extended-support"), resource.TestCheckResourceAttrPair(resourceName, names.AttrEngineVersion, dataSourceName, names.AttrVersion), ), }, @@ -2583,6 +2610,34 @@ func TestAccRDSCluster_NoDeleteAutomatedBackups(t *testing.T) { }) } +func TestAccRDSCluster_engineLifecycleSupport_disabled(t *testing.T) { + ctx := acctest.Context(t) + var dbCluster rds.DBCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccClusterConfig_engineLifecycleSupport_disabled(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckClusterExists(ctx, resourceName, &dbCluster), + acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "rds", fmt.Sprintf("cluster:%s", rName)), + resource.TestCheckResourceAttr(resourceName, "backtrack_window", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, names.AttrClusterIdentifier, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrEngine, tfrds.ClusterEngineAuroraPostgreSQL), + resource.TestCheckResourceAttr(resourceName, "engine_lifecycle_support", "open-source-rds-extended-support-disabled"), + ), + }, + testAccClusterImportStep(resourceName), + }, + }) +} + func testAccCheckClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { return testAccCheckClusterDestroyWithProvider(ctx)(s, acctest.Provider) @@ -3108,6 +3163,34 @@ resource "aws_rds_cluster" "test" { `, rName, tfrds.ClusterEngineAuroraMySQL)) } +func testAccClusterConfig_availabilityZones_caCertificateIdentifier(rName string) string { + return acctest.ConfigCompose(testAccConfig_ClusterSubnetGroup(rName), fmt.Sprintf(` +data "aws_rds_orderable_db_instance" "test" { + engine = %[2]q + engine_latest_version = true + preferred_instance_classes = [%[3]s] + supports_clusters = true + supports_iops = true +} + +resource "aws_rds_cluster" "test" { + apply_immediately = true + db_subnet_group_name = aws_db_subnet_group.test.name + ca_certificate_identifier = "rds-ca-2019" + cluster_identifier = %[1]q + engine = data.aws_rds_orderable_db_instance.test.engine + engine_version = data.aws_rds_orderable_db_instance.test.engine_version + storage_type = data.aws_rds_orderable_db_instance.test.storage_type + allocated_storage = 100 + iops = 1000 + db_cluster_instance_class = "db.r6gd.xlarge" + master_password = "avoid-plaintext-passwords" + master_username = "tfacctest" + skip_final_snapshot = true +} +`, rName, tfrds.ClusterEngineMySQL, mainInstanceClasses)) +} + func testAccClusterConfig_storageType(rName string, sType string) string { return acctest.ConfigCompose( testAccConfig_ClusterSubnetGroup(rName), @@ -5169,3 +5252,17 @@ resource "aws_rds_cluster" "test" { } `, rName) } + +func testAccClusterConfig_engineLifecycleSupport_disabled(rName string) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "test" { + cluster_identifier = %[1]q + database_name = "test" + engine = %[2]q + master_username = "tfacctest" + master_password = "avoid-plaintext-passwords" + skip_final_snapshot = true + engine_lifecycle_support = "open-source-rds-extended-support-disabled" +} +`, rName, tfrds.ClusterEngineAuroraPostgreSQL) +} diff --git a/internal/service/rds/consts.go b/internal/service/rds/consts.go index 1d3d708af96..fe7e5365271 100644 --- a/internal/service/rds/consts.go +++ b/internal/service/rds/consts.go @@ -158,34 +158,46 @@ func ClusterInstanceEngine_Values() []string { } const ( - GlobalClusterEngineAurora = "aurora" - GlobalClusterEngineAuroraMySQL = "aurora-mysql" - GlobalClusterEngineAuroraPostgreSQL = "aurora-postgresql" + globalClusterEngineAurora = "aurora" + globalClusterEngineAuroraMySQL = "aurora-mysql" + globalClusterEngineAuroraPostgreSQL = "aurora-postgresql" ) -func GlobalClusterEngine_Values() []string { +func globalClusterEngine_Values() []string { return []string{ - GlobalClusterEngineAurora, - GlobalClusterEngineAuroraMySQL, - GlobalClusterEngineAuroraPostgreSQL, + globalClusterEngineAurora, + globalClusterEngineAuroraMySQL, + globalClusterEngineAuroraPostgreSQL, } } const ( - EngineModeGlobal = "global" - EngineModeMultiMaster = "multimaster" - EngineModeParallelQuery = "parallelquery" - EngineModeProvisioned = "provisioned" - EngineModeServerless = "serverless" + engineModeGlobal = "global" + engineModeMultiMaster = "multimaster" + engineModeParallelQuery = "parallelquery" + engineModeProvisioned = "provisioned" + engineModeServerless = "serverless" ) -func EngineMode_Values() []string { +func engineMode_Values() []string { return []string{ - EngineModeGlobal, - EngineModeMultiMaster, - EngineModeParallelQuery, - EngineModeProvisioned, - EngineModeServerless, + engineModeGlobal, + engineModeMultiMaster, + engineModeParallelQuery, + engineModeProvisioned, + engineModeServerless, + } +} + +const ( + engineLifecycleSupport = "open-source-rds-extended-support" + engineLifecycleSupportDisabled = "open-source-rds-extended-support-disabled" +) + +func engineLifecycleSupport_Values() []string { + return []string{ + engineLifecycleSupport, + engineLifecycleSupportDisabled, } } diff --git a/internal/service/rds/exports_test.go b/internal/service/rds/exports_test.go index aa1c21e76b5..2b042e2d7c5 100644 --- a/internal/service/rds/exports_test.go +++ b/internal/service/rds/exports_test.go @@ -5,6 +5,7 @@ package rds // Exports for use in tests only. var ( + ResourceCertificate = resourceCertificate ResourceEventSubscription = resourceEventSubscription ResourceProxy = resourceProxy ResourceProxyDefaultTargetGroup = resourceProxyDefaultTargetGroup @@ -17,6 +18,7 @@ var ( FindDBProxyEndpointByTwoPartKey = findDBProxyEndpointByTwoPartKey FindDBProxyTargetByFourPartKey = findDBProxyTargetByFourPartKey FindDBSubnetGroupByName = findDBSubnetGroupByName + FindDefaultCertificate = findDefaultCertificate FindDefaultDBProxyTargetGroupByDBProxyName = findDefaultDBProxyTargetGroupByDBProxyName FindEventSubscriptionByID = findEventSubscriptionByID ListTags = listTags diff --git a/internal/service/rds/global_cluster.go b/internal/service/rds/global_cluster.go index 26f9e54c0e8..56b08ce8039 100644 --- a/internal/service/rds/global_cluster.go +++ b/internal/service/rds/global_cluster.go @@ -64,7 +64,13 @@ func ResourceGlobalCluster() *schema.Resource { Computed: true, ForceNew: true, ConflictsWith: []string{"source_db_cluster_identifier"}, - ValidateFunc: validation.StringInSlice(GlobalClusterEngine_Values(), false), + ValidateFunc: validation.StringInSlice(globalClusterEngine_Values(), false), + }, + "engine_lifecycle_support": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(engineLifecycleSupport_Values(), false), }, names.AttrEngineVersion: { Type: schema.TypeString, @@ -144,6 +150,10 @@ func resourceGlobalClusterCreate(ctx context.Context, d *schema.ResourceData, me input.Engine = aws.String(v.(string)) } + if v, ok := d.GetOk("engine_lifecycle_support"); ok { + input.EngineLifecycleSupport = aws.String(v.(string)) + } + if v, ok := d.GetOk(names.AttrEngineVersion); ok { input.EngineVersion = aws.String(v.(string)) } @@ -160,7 +170,7 @@ func resourceGlobalClusterCreate(ctx context.Context, d *schema.ResourceData, me // since we cannot have Engine default after adding SourceDBClusterIdentifier: // InvalidParameterValue: When creating standalone global cluster, value for engineName should be specified if input.Engine == nil && input.SourceDBClusterIdentifier == nil { - input.Engine = aws.String(GlobalClusterEngineAurora) + input.Engine = aws.String(globalClusterEngineAurora) } output, err := conn.CreateGlobalClusterWithContext(ctx, input) @@ -198,6 +208,7 @@ func resourceGlobalClusterRead(ctx context.Context, d *schema.ResourceData, meta d.Set(names.AttrDatabaseName, globalCluster.DatabaseName) d.Set(names.AttrDeletionProtection, globalCluster.DeletionProtection) d.Set(names.AttrEngine, globalCluster.Engine) + d.Set("engine_lifecycle_support", globalCluster.EngineLifecycleSupport) d.Set("global_cluster_identifier", globalCluster.GlobalClusterIdentifier) if err := d.Set("global_cluster_members", flattenGlobalClusterMembers(globalCluster.GlobalClusterMembers)); err != nil { return sdkdiag.AppendErrorf(diags, "setting global_cluster_members: %s", err) diff --git a/internal/service/rds/global_cluster_test.go b/internal/service/rds/global_cluster_test.go index c9eb09b4653..6b131a36f91 100644 --- a/internal/service/rds/global_cluster_test.go +++ b/internal/service/rds/global_cluster_test.go @@ -119,6 +119,7 @@ func TestAccRDSGlobalCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrDatabaseName, ""), resource.TestCheckResourceAttr(resourceName, names.AttrDeletionProtection, acctest.CtFalse), resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "aurora-postgresql"), + resource.TestCheckResourceAttr(resourceName, "engine_lifecycle_support", "open-source-rds-extended-support"), resource.TestCheckResourceAttrSet(resourceName, names.AttrEngineVersion), resource.TestCheckResourceAttr(resourceName, "global_cluster_identifier", rName), resource.TestMatchResourceAttr(resourceName, "global_cluster_resource_id", regexache.MustCompile(`cluster-.+`)), @@ -230,6 +231,38 @@ func TestAccRDSGlobalCluster_deletionProtection(t *testing.T) { }) } +func TestAccRDSGlobalCluster_engineLifecycleSupport_disabled(t *testing.T) { + ctx := acctest.Context(t) + var globalCluster1 rds.GlobalCluster + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_rds_global_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheckGlobalCluster(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckGlobalClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccGlobalClusterConfig_engineLifecycleSupport_disabled(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGlobalClusterExists(ctx, resourceName, &globalCluster1), + acctest.CheckResourceAttrGlobalARN(resourceName, names.AttrARN, "rds", fmt.Sprintf("global-cluster:%s", rName)), + resource.TestCheckResourceAttr(resourceName, names.AttrDatabaseName, ""), + resource.TestCheckResourceAttr(resourceName, names.AttrEngine, "aurora-postgresql"), + resource.TestCheckResourceAttr(resourceName, "engine_lifecycle_support", "open-source-rds-extended-support-disabled"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrEngineVersion), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccRDSGlobalCluster_EngineVersion_updateMinor(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -675,6 +708,16 @@ resource "aws_rds_global_cluster" "test" { `, deletionProtection, rName) } +func testAccGlobalClusterConfig_engineLifecycleSupport_disabled(rName string) string { + return fmt.Sprintf(` +resource "aws_rds_global_cluster" "test" { + global_cluster_identifier = %[1]q + engine = "aurora-postgresql" + engine_lifecycle_support = "open-source-rds-extended-support-disabled" +} +`, rName) +} + func testAccGlobalClusterConfig_engineVersion(rName, engine string) string { return fmt.Sprintf(` data "aws_rds_engine_version" "default" { diff --git a/internal/service/rds/instance.go b/internal/service/rds/instance.go index 834de44196d..0c8f681b759 100644 --- a/internal/service/rds/instance.go +++ b/internal/service/rds/instance.go @@ -308,6 +308,12 @@ func ResourceInstance() *schema.Resource { return strings.ToLower(value) }, }, + "engine_lifecycle_support": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(engineLifecycleSupport_Values(), false), + }, names.AttrEngineVersion: { Type: schema.TypeString, Optional: true, @@ -866,18 +872,22 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in input.VpcSecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) } - outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, - func() (interface{}, error) { - return conn.CreateDBInstanceReadReplicaWithContext(ctx, input) - }, - errCodeInvalidParameterValue, "ENHANCED_MONITORING") + output, err := dbInstanceCreateReadReplica(ctx, conn, input) + + // Some engines (e.g. PostgreSQL) you cannot specify a custom parameter group for the read replica during creation. + // See https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html#USER_ReadRepl.XRgn.Cnsdr. + if input.DBParameterGroupName != nil && tfawserr.ErrMessageContains(err, "InvalidParameterCombination", "A parameter group can't be specified during Read Replica creation for the following DB engine") { + input.DBParameterGroupName = nil + + output, err = dbInstanceCreateReadReplica(ctx, conn, input) + } + if err != nil { return sdkdiag.AppendErrorf(diags, "creating RDS DB Instance (read replica) (%s): %s", identifier, err) } - output := outputRaw.(*rds.CreateDBInstanceReadReplicaOutput) - resourceID = aws.StringValue(output.DBInstance.DbiResourceId) + d.SetId(resourceID) if v, ok := d.GetOk(names.AttrAllowMajorVersionUpgrade); ok { // Having allowing_major_version_upgrade by itself should not trigger ModifyDBInstance @@ -996,6 +1006,10 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in input.DedicatedLogVolume = aws.Bool(v.(bool)) } + if v, ok := d.GetOk("engine_lifecycle_support"); ok { + input.EngineLifecycleSupport = aws.String(v.(string)) + } + if v, ok := d.GetOk("iam_database_authentication_enabled"); ok { input.EnableIAMDatabaseAuthentication = aws.Bool(v.(bool)) } @@ -1106,10 +1120,10 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "creating RDS DB Instance (restore from S3) (%s): %s", identifier, err) } - if outputRaw != nil { - output := outputRaw.(*rds.RestoreDBInstanceFromS3Output) - resourceID = aws.StringValue(output.DBInstance.DbiResourceId) - } + output := outputRaw.(*rds.RestoreDBInstanceFromS3Output) + + resourceID = aws.StringValue(output.DBInstance.DbiResourceId) + d.SetId(resourceID) } else if v, ok := d.GetOk("snapshot_identifier"); ok { input := &rds.RestoreDBInstanceFromDBSnapshotInput{ AutoMinorVersionUpgrade: aws.Bool(d.Get(names.AttrAutoMinorVersionUpgrade).(bool)), @@ -1211,6 +1225,10 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in input.Engine = aws.String(engine) } + if v, ok := d.GetOk("engine_lifecycle_support"); ok { + input.EngineLifecycleSupport = aws.String(v.(string)) + } + if v, ok := d.GetOk(names.AttrEngineVersion); ok { modifyDbInstanceInput.EngineVersion = aws.String(v.(string)) requiresModifyDbInstance = true @@ -1339,6 +1357,12 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in }, ) + var output *rds.RestoreDBInstanceFromDBSnapshotOutput + + if err == nil { + output = outputRaw.(*rds.RestoreDBInstanceFromDBSnapshotOutput) + } + // When using SQL Server engine with MultiAZ enabled, its not // possible to immediately enable mirroring since // BackupRetentionPeriod is not available as a parameter to @@ -1351,17 +1375,15 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in input.MultiAZ = aws.Bool(false) modifyDbInstanceInput.MultiAZ = aws.Bool(true) requiresModifyDbInstance = true - _, err = conn.RestoreDBInstanceFromDBSnapshotWithContext(ctx, input) + output, err = conn.RestoreDBInstanceFromDBSnapshotWithContext(ctx, input) } if err != nil { return sdkdiag.AppendErrorf(diags, "creating RDS DB Instance (restore from snapshot) (%s): %s", identifier, err) } - if outputRaw != nil { - output := outputRaw.(*rds.RestoreDBInstanceFromDBSnapshotOutput) - resourceID = aws.StringValue(output.DBInstance.DbiResourceId) - } + resourceID = aws.StringValue(output.DBInstance.DbiResourceId) + d.SetId(resourceID) } else if v, ok := d.GetOk("restore_to_point_in_time"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { tfMap := v.([]interface{})[0].(map[string]interface{}) input := &rds.RestoreDBInstanceToPointInTimeInput{ @@ -1456,6 +1478,10 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in input.Engine = aws.String(v.(string)) } + if v, ok := d.GetOk("engine_lifecycle_support"); ok { + input.EngineLifecycleSupport = aws.String(v.(string)) + } + if v, ok := d.GetOk("iam_database_authentication_enabled"); ok { input.EnableIAMDatabaseAuthentication = aws.Bool(v.(bool)) } @@ -1541,14 +1567,15 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in return false, err }, ) + if err != nil { return sdkdiag.AppendErrorf(diags, "creating RDS DB Instance (restore to point-in-time) (%s): %s", identifier, err) } - if outputRaw != nil { - output := outputRaw.(*rds.RestoreDBInstanceToPointInTimeOutput) - resourceID = aws.StringValue(output.DBInstance.DbiResourceId) - } + output := outputRaw.(*rds.RestoreDBInstanceToPointInTimeOutput) + + resourceID = aws.StringValue(output.DBInstance.DbiResourceId) + d.SetId(resourceID) } else { if _, ok := d.GetOk(names.AttrAllocatedStorage); !ok { diags = sdkdiag.AppendErrorf(diags, `"allocated_storage": required field is not set`) @@ -1640,6 +1667,10 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in input.EnableCloudwatchLogsExports = flex.ExpandStringSet(v.(*schema.Set)) } + if v, ok := d.GetOk("engine_lifecycle_support"); ok { + input.EngineLifecycleSupport = aws.String(v.(string)) + } + if v, ok := d.GetOk("iam_database_authentication_enabled"); ok { input.EnableIAMDatabaseAuthentication = aws.Bool(v.(bool)) } @@ -1751,12 +1782,15 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in return false, err }, ) + if err != nil { return sdkdiag.AppendErrorf(diags, "creating RDS DB Instance (%s): %s", identifier, err) } output := outputRaw.(*rds.CreateDBInstanceOutput) + resourceID = aws.StringValue(output.DBInstance.DbiResourceId) + d.SetId(resourceID) // This is added here to avoid unnecessary modification when ca_cert_identifier is the default one if v, ok := d.GetOk("ca_cert_identifier"); ok && v.(string) != aws.StringValue(output.DBInstance.CACertificateIdentifier) { @@ -1775,7 +1809,9 @@ func resourceInstanceCreate(ctx context.Context, d *schema.ResourceData, meta in resourceID = aws.StringValue(instance.DbiResourceId) } - d.SetId(resourceID) + if d.Id() == "" { + d.SetId(resourceID) + } if requiresModifyDbInstance { modifyDbInstanceInput.DBInstanceIdentifier = aws.String(identifier) @@ -1819,9 +1855,10 @@ func resourceInstanceRead(ctx context.Context, d *schema.ResourceData, meta inte v, err = findDBInstanceByIDSDKv1(ctx, conn, d.Id()) } else { v, err = findDBInstanceByIDSDKv1(ctx, conn, d.Id()) - if tfresource.NotFound(err) { + if tfresource.NotFound(err) { // nosemgrep:ci.semgrep.errors.notfound-without-err-checks + // Retry with `identifier` v, err = findDBInstanceByIDSDKv1(ctx, conn, d.Get(names.AttrIdentifier).(string)) - if tfresource.NotFound(err) { + if tfresource.NotFound(err) { // nosemgrep:ci.semgrep.errors.notfound-without-err-checks log.Printf("[WARN] RDS DB Instance (%s) not found, removing from state", d.Get(names.AttrIdentifier).(string)) d.SetId("") return diags @@ -1871,6 +1908,7 @@ func resourceInstanceRead(ctx context.Context, d *schema.ResourceData, meta inte } d.Set("enabled_cloudwatch_logs_exports", aws.StringValueSlice(v.EnabledCloudwatchLogsExports)) d.Set(names.AttrEngine, v.Engine) + d.Set("engine_lifecycle_support", v.EngineLifecycleSupport) d.Set("iam_database_authentication_enabled", v.IAMDatabaseAuthenticationEnabled) d.Set(names.AttrIdentifier, v.DBInstanceIdentifier) d.Set("identifier_prefix", create.NamePrefixFromName(aws.StringValue(v.DBInstanceIdentifier))) @@ -2189,6 +2227,105 @@ func resourceInstanceUpdate(ctx context.Context, d *schema.ResourceData, meta in return append(diags, resourceInstanceRead(ctx, d, meta)...) } +func resourceInstanceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).RDSConn(ctx) + + input := &rds.DeleteDBInstanceInput{ + DBInstanceIdentifier: aws.String(d.Get(names.AttrIdentifier).(string)), + DeleteAutomatedBackups: aws.Bool(d.Get("delete_automated_backups").(bool)), + } + + if d.Get("skip_final_snapshot").(bool) { + input.SkipFinalSnapshot = aws.Bool(true) + } else { + input.SkipFinalSnapshot = aws.Bool(false) + + if v, ok := d.GetOk(names.AttrFinalSnapshotIdentifier); ok { + input.FinalDBSnapshotIdentifier = aws.String(v.(string)) + } else { + return sdkdiag.AppendErrorf(diags, "final_snapshot_identifier is required when skip_final_snapshot is false") + } + } + + log.Printf("[DEBUG] Deleting RDS DB Instance: %s", d.Get(names.AttrIdentifier).(string)) + _, err := conn.DeleteDBInstanceWithContext(ctx, input) + + if tfawserr.ErrMessageContains(err, errCodeInvalidParameterCombination, "disable deletion pro") { + if v, ok := d.GetOk(names.AttrDeletionProtection); (!ok || !v.(bool)) && d.Get(names.AttrApplyImmediately).(bool) { + _, ierr := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), + func() (interface{}, error) { + return conn.ModifyDBInstanceWithContext(ctx, &rds.ModifyDBInstanceInput{ + ApplyImmediately: aws.Bool(true), + DBInstanceIdentifier: aws.String(d.Get(names.AttrIdentifier).(string)), + DeletionProtection: aws.Bool(false), + }) + }, + func(err error) (bool, error) { + // Retry for IAM eventual consistency. + if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "IAM role ARN value is invalid or") { + return true, err + } + + // "InvalidDBInstanceState: RDS is configuring Enhanced Monitoring or Performance Insights for this DB instance. Try your request later." + if tfawserr.ErrMessageContains(err, rds.ErrCodeInvalidDBInstanceStateFault, "your request later") { + return true, err + } + + return false, err + }, + ) + + if ierr != nil { + return sdkdiag.AppendErrorf(diags, "updating RDS DB Instance (%s): %s", d.Get(names.AttrIdentifier).(string), err) + } + + if _, ierr := waitDBInstanceAvailableSDKv1(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); ierr != nil { + return sdkdiag.AppendErrorf(diags, "waiting for RDS DB Instance (%s) update: %s", d.Get(names.AttrIdentifier).(string), ierr) + } + + _, err = conn.DeleteDBInstanceWithContext(ctx, input) + } + } + + if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBInstanceNotFoundFault) { + return diags + } + + if err != nil && !tfawserr.ErrMessageContains(err, rds.ErrCodeInvalidDBInstanceStateFault, "is already being deleted") { + return sdkdiag.AppendErrorf(diags, "deleting RDS DB Instance (%s): %s", d.Get(names.AttrIdentifier).(string), err) + } + + if _, err := waitDBInstanceDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for RDS DB Instance (%s) delete: %s", d.Get(names.AttrIdentifier).(string), err) + } + + return diags +} + +func resourceInstanceImport(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required. + d.Set("skip_final_snapshot", true) + d.Set("delete_automated_backups", true) + return []*schema.ResourceData{d}, nil +} + +func dbInstanceCreateReadReplica(ctx context.Context, conn *rds.RDS, input *rds.CreateDBInstanceReadReplicaInput) (*rds.CreateDBInstanceReadReplicaOutput, error) { + outputRaw, err := tfresource.RetryWhenAWSErrMessageContains(ctx, propagationTimeout, + func() (interface{}, error) { + return conn.CreateDBInstanceReadReplicaWithContext(ctx, input) + }, + errCodeInvalidParameterValue, "ENHANCED_MONITORING") + + if err != nil { + return nil, err + } + + return outputRaw.(*rds.CreateDBInstanceReadReplicaOutput), nil +} + func dbInstancePopulateModify(input *rds_sdkv2.ModifyDBInstanceInput, d *schema.ResourceData) bool { needsModify := false @@ -2456,91 +2593,6 @@ func dbInstanceModify(ctx context.Context, conn *rds_sdkv2.Client, resourceID st return nil } -func resourceInstanceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RDSConn(ctx) - - input := &rds.DeleteDBInstanceInput{ - DBInstanceIdentifier: aws.String(d.Get(names.AttrIdentifier).(string)), - DeleteAutomatedBackups: aws.Bool(d.Get("delete_automated_backups").(bool)), - } - - if d.Get("skip_final_snapshot").(bool) { - input.SkipFinalSnapshot = aws.Bool(true) - } else { - input.SkipFinalSnapshot = aws.Bool(false) - - if v, ok := d.GetOk(names.AttrFinalSnapshotIdentifier); ok { - input.FinalDBSnapshotIdentifier = aws.String(v.(string)) - } else { - return sdkdiag.AppendErrorf(diags, "final_snapshot_identifier is required when skip_final_snapshot is false") - } - } - - log.Printf("[DEBUG] Deleting RDS DB Instance: %s", d.Get(names.AttrIdentifier).(string)) - _, err := conn.DeleteDBInstanceWithContext(ctx, input) - - if tfawserr.ErrMessageContains(err, errCodeInvalidParameterCombination, "disable deletion pro") { - if v, ok := d.GetOk(names.AttrDeletionProtection); (!ok || !v.(bool)) && d.Get(names.AttrApplyImmediately).(bool) { - _, ierr := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), - func() (interface{}, error) { - return conn.ModifyDBInstanceWithContext(ctx, &rds.ModifyDBInstanceInput{ - ApplyImmediately: aws.Bool(true), - DBInstanceIdentifier: aws.String(d.Get(names.AttrIdentifier).(string)), - DeletionProtection: aws.Bool(false), - }) - }, - func(err error) (bool, error) { - // Retry for IAM eventual consistency. - if tfawserr.ErrMessageContains(err, errCodeInvalidParameterValue, "IAM role ARN value is invalid or") { - return true, err - } - - // "InvalidDBInstanceState: RDS is configuring Enhanced Monitoring or Performance Insights for this DB instance. Try your request later." - if tfawserr.ErrMessageContains(err, rds.ErrCodeInvalidDBInstanceStateFault, "your request later") { - return true, err - } - - return false, err - }, - ) - - if ierr != nil { - return sdkdiag.AppendErrorf(diags, "updating RDS DB Instance (%s): %s", d.Get(names.AttrIdentifier).(string), err) - } - - if _, ierr := waitDBInstanceAvailableSDKv1(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); ierr != nil { - return sdkdiag.AppendErrorf(diags, "waiting for RDS DB Instance (%s) update: %s", d.Get(names.AttrIdentifier).(string), ierr) - } - - _, err = conn.DeleteDBInstanceWithContext(ctx, input) - } - } - - if tfawserr.ErrCodeEquals(err, rds.ErrCodeDBInstanceNotFoundFault) { - return diags - } - - if err != nil && !tfawserr.ErrMessageContains(err, rds.ErrCodeInvalidDBInstanceStateFault, "is already being deleted") { - return sdkdiag.AppendErrorf(diags, "deleting RDS DB Instance (%s): %s", d.Get(names.AttrIdentifier).(string), err) - } - - if _, err := waitDBInstanceDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for RDS DB Instance (%s) delete: %s", d.Get(names.AttrIdentifier).(string), err) - } - - return diags -} - -func resourceInstanceImport(_ context.Context, d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { - // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched - // from any API call, so we need to default skip_final_snapshot to true so - // that final_snapshot_identifier is not required. - d.Set("skip_final_snapshot", true) - d.Set("delete_automated_backups", true) - return []*schema.ResourceData{d}, nil -} - // See https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#gp3-storage. func isStorageTypeGP3BelowAllocatedStorageThreshold(d *schema.ResourceData) bool { if storageType := d.Get(names.AttrStorageType).(string); storageType != storageTypeGP3 { diff --git a/internal/service/rds/instance_test.go b/internal/service/rds/instance_test.go index afc3c14e1e5..e41a8e8f2b7 100644 --- a/internal/service/rds/instance_test.go +++ b/internal/service/rds/instance_test.go @@ -213,6 +213,50 @@ func TestAccRDSInstance_disappears(t *testing.T) { }) } +func TestAccRDSInstance_engineLifecycleSupport_disabled(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var v rds.DBInstance + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfig_engineLifecycleSupport_disabled(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExists(ctx, resourceName, &v), + testAccCheckInstanceAttributes(&v), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "rds", regexache.MustCompile(`db:.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrEngine, tfrds.InstanceEngineMySQL), + resource.TestCheckResourceAttr(resourceName, "engine_lifecycle_support", "open-source-rds-extended-support-disabled"), + resource.TestCheckResourceAttrSet(resourceName, names.AttrEngineVersion), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrApplyImmediately, + names.AttrFinalSnapshotIdentifier, + names.AttrPassword, + "manage_master_user_password", + "skip_final_snapshot", + "delete_automated_backups", + }, + }, + }, + }) +} + func TestAccRDSInstance_Versions_onlyMajor(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -2213,6 +2257,50 @@ func TestAccRDSInstance_ReplicateSourceDB_CrossRegion_parameterGroupNameEquivale }) } +func TestAccRDSInstance_ReplicateSourceDB_CrossRegion_parameterGroupNamePostgres(t *testing.T) { + ctx := acctest.Context(t) + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + var dbInstance, sourceDbInstance rds.DBInstance + var providers []*schema.Provider + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + sourceResourceName := "aws_db_instance.source" + resourceName := "aws_db_instance.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.RDSServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), + CheckDestroy: testAccCheckDBInstanceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccInstanceConfig_ReplicateSourceDB_CrossRegion_ParameterGroupName_postgres(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckDBInstanceExistsWithProvider(ctx, sourceResourceName, &sourceDbInstance, acctest.RegionProviderFunc(acctest.AlternateRegion(), &providers)), + resource.TestCheckResourceAttr(sourceResourceName, names.AttrParameterGroupName, fmt.Sprintf("%s-source", rName)), + testAccCheckDBInstanceExistsWithProvider(ctx, resourceName, &dbInstance, acctest.RegionProviderFunc(acctest.Region(), &providers)), + resource.TestCheckResourceAttrPair(resourceName, "replicate_source_db", sourceResourceName, names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, names.AttrParameterGroupName, "aws_db_parameter_group.test", names.AttrName), + testAccCheckInstanceParameterApplyStatusInSync(&dbInstance), + testAccCheckInstanceParameterApplyStatusInSync(&sourceDbInstance), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + names.AttrApplyImmediately, + names.AttrPassword, + }, + }, + }, + }) +} + func TestAccRDSInstance_ReplicateSourceDB_CrossRegion_characterSet(t *testing.T) { t.Skip("Skipping due to upstream error") ctx := acctest.Context(t) @@ -6696,6 +6784,31 @@ resource "aws_db_instance" "test" { `) } +func testAccInstanceConfig_engineLifecycleSupport_disabled(rName string) string { + return acctest.ConfigCompose( + testAccInstanceConfig_orderableClassMySQL(), + fmt.Sprintf(` +resource "aws_db_instance" "test" { + identifier = %[1]q + allocated_storage = 10 + backup_retention_period = 0 + engine = data.aws_rds_orderable_db_instance.test.engine + engine_version = data.aws_rds_orderable_db_instance.test.engine_version + engine_lifecycle_support = "open-source-rds-extended-support-disabled" + instance_class = data.aws_rds_orderable_db_instance.test.instance_class + db_name = "test" + parameter_group_name = "default.${data.aws_rds_engine_version.default.parameter_group_family}" + skip_final_snapshot = true + password = "avoid-plaintext-passwords" + username = "tfacctest" + # Maintenance Window is stored in lower case in the API, though not strictly + # documented. Terraform will downcase this to match (as opposed to throw a + # validation error). + maintenance_window = "Fri:09:00-Fri:09:30" +} +`, rName)) +} + func testAccInstanceConfig_majorVersionOnly(rName string) string { return acctest.ConfigCompose( testAccInstanceConfig_orderableClassMySQL(), @@ -9961,6 +10074,66 @@ data "aws_rds_orderable_db_instance" "test" { `, rName, tfrds.InstanceEngineOracleEnterprise, strings.Replace(mainInstanceClasses, "db.t3.small", "frodo", 1), parameters)) } +func testAccInstanceConfig_ReplicateSourceDB_CrossRegion_ParameterGroupName_postgres(rName string) string { + parameters := ` +parameter { + name = "client_encoding" + value = "UTF8" + apply_method = "pending-reboot" +} +` + return acctest.ConfigCompose( + acctest.ConfigMultipleRegionProvider(2), + testAccInstanceConfig_orderableClassPostgres(), fmt.Sprintf(` +resource "aws_db_instance" "test" { + provider = "aws" + + identifier = %[1]q + replicate_source_db = aws_db_instance.source.arn + instance_class = data.aws_rds_orderable_db_instance.test.instance_class + skip_final_snapshot = true + apply_immediately = true + parameter_group_name = aws_db_parameter_group.test.name +} + +resource "aws_db_parameter_group" "test" { + provider = "aws" + + family = data.aws_rds_engine_version.default.parameter_group_family + name = %[1]q + + %[2]s +} + +resource "aws_db_instance" "source" { + provider = "awsalternate" + + identifier = "%[1]s-source" + allocated_storage = 20 + engine = data.aws_rds_orderable_db_instance.test.engine + engine_version = data.aws_rds_orderable_db_instance.test.engine_version + instance_class = data.aws_rds_orderable_db_instance.test.instance_class + storage_type = data.aws_rds_orderable_db_instance.test.storage_type + db_name = "MAINDB" + username = "oadmin" + password = "avoid-plaintext-passwords" + skip_final_snapshot = true + apply_immediately = true + backup_retention_period = 3 + parameter_group_name = aws_db_parameter_group.source.name +} + +resource "aws_db_parameter_group" "source" { + provider = "awsalternate" + + family = data.aws_rds_engine_version.default.parameter_group_family + name = "%[1]s-source" + + %[2]s +} +`, rName, parameters)) +} + func testAccInstanceConfig_ReplicateSourceDB_CrossRegion_CharacterSet(rName string) string { return acctest.ConfigCompose( acctest.ConfigMultipleRegionProvider(2), diff --git a/internal/service/rds/service_endpoint_resolver_gen.go b/internal/service/rds/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..423d25fb15b --- /dev/null +++ b/internal/service/rds/service_endpoint_resolver_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package rds + +import ( + "context" + "fmt" + "net" + "net/url" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + rds_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rds" + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} + +var _ rds_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver rds_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: rds_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params rds_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up rds endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*rds_sdkv2.Options) { + return func(o *rds_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/rds/service_endpoints_gen_test.go b/internal/service/rds/service_endpoints_gen_test.go index 7c1d9d5facf..3c640b715c8 100644 --- a/internal/service/rds/service_endpoints_gen_test.go +++ b/internal/service/rds/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -255,24 +257,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }) } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := rds_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), rds_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := rds_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), rds_sdkv2.EndpointParameters{ @@ -280,14 +282,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -364,16 +366,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/rds/service_package_gen.go b/internal/service/rds/service_package_gen.go index ba69fdb4711..ce0328607ae 100644 --- a/internal/service/rds/service_package_gen.go +++ b/internal/service/rds/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package rds @@ -8,7 +8,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" rds_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rds" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" rds_sdkv1 "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -200,6 +199,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: names.AttrARN, }, }, + { + Factory: resourceCertificate, + TypeName: "aws_rds_certificate", + Name: "Default Certificate", + }, { Factory: ResourceCluster, TypeName: "aws_rds_cluster", @@ -278,11 +282,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*r "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return rds_sdkv1.New(sess.Copy(&cfg)), nil @@ -292,19 +293,10 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*r func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*rds_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return rds_sdkv2.NewFromConfig(cfg, func(o *rds_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return rds_sdkv2.NewFromConfig(cfg, + rds_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/rds/sweep.go b/internal/service/rds/sweep.go index 23218d07aa4..9a88e4e6609 100644 --- a/internal/service/rds/sweep.go +++ b/internal/service/rds/sweep.go @@ -238,7 +238,7 @@ func sweepClusters(region string) error { d.Set(names.AttrDeletionProtection, false) d.Set("skip_final_snapshot", true) - if engineMode := aws.StringValue(v.EngineMode); engineMode == EngineModeGlobal || engineMode == EngineModeProvisioned { + if engineMode := aws.StringValue(v.EngineMode); engineMode == engineModeGlobal || engineMode == engineModeProvisioned { globalCluster, err := FindGlobalClusterByDBClusterARN(ctx, conn, arn) if err != nil { if !tfresource.NotFound(err) { diff --git a/internal/service/redshift/service_endpoint_resolver_gen.go b/internal/service/redshift/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..236035e47c2 --- /dev/null +++ b/internal/service/redshift/service_endpoint_resolver_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package redshift + +import ( + "context" + "fmt" + "net" + "net/url" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + redshift_sdkv2 "github.com/aws/aws-sdk-go-v2/service/redshift" + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} + +var _ redshift_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver redshift_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: redshift_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params redshift_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up redshift endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*redshift_sdkv2.Options) { + return func(o *redshift_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/redshift/service_endpoints_gen_test.go b/internal/service/redshift/service_endpoints_gen_test.go index f7b82e45642..850d8658850 100644 --- a/internal/service/redshift/service_endpoints_gen_test.go +++ b/internal/service/redshift/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -255,24 +257,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }) } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := redshift_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), redshift_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := redshift_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), redshift_sdkv2.EndpointParameters{ @@ -280,14 +282,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -364,16 +366,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/redshift/service_package_gen.go b/internal/service/redshift/service_package_gen.go index 279b7e5fd6b..0363c278822 100644 --- a/internal/service/redshift/service_package_gen.go +++ b/internal/service/redshift/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package redshift @@ -8,7 +8,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" redshift_sdkv2 "github.com/aws/aws-sdk-go-v2/service/redshift" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" redshift_sdkv1 "github.com/aws/aws-sdk-go/service/redshift" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -225,11 +224,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*r "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return redshift_sdkv1.New(sess.Copy(&cfg)), nil @@ -239,19 +235,10 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*r func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*redshift_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return redshift_sdkv2.NewFromConfig(cfg, func(o *redshift_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return redshift_sdkv2.NewFromConfig(cfg, + redshift_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/redshiftdata/service_endpoint_resolver_gen.go b/internal/service/redshiftdata/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..1dcefd43622 --- /dev/null +++ b/internal/service/redshiftdata/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package redshiftdata + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + redshiftdata_sdkv2 "github.com/aws/aws-sdk-go-v2/service/redshiftdata" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ redshiftdata_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver redshiftdata_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: redshiftdata_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params redshiftdata_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up redshiftdata endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*redshiftdata_sdkv2.Options) { + return func(o *redshiftdata_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/redshiftdata/service_endpoints_gen_test.go b/internal/service/redshiftdata/service_endpoints_gen_test.go index e015f374ca0..c0ee43c23b4 100644 --- a/internal/service/redshiftdata/service_endpoints_gen_test.go +++ b/internal/service/redshiftdata/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := redshiftdata_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), redshiftdata_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := redshiftdata_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), redshiftdata_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -410,16 +412,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/redshiftdata/service_package_gen.go b/internal/service/redshiftdata/service_package_gen.go index ccd809ec732..21d6ae2a271 100644 --- a/internal/service/redshiftdata/service_package_gen.go +++ b/internal/service/redshiftdata/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package redshiftdata @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" redshiftdata_sdkv2 "github.com/aws/aws-sdk-go-v2/service/redshiftdata" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -44,19 +43,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*redshiftdata_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return redshiftdata_sdkv2.NewFromConfig(cfg, func(o *redshiftdata_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return redshiftdata_sdkv2.NewFromConfig(cfg, + redshiftdata_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/redshiftserverless/service_endpoint_resolver_gen.go b/internal/service/redshiftserverless/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..a9f40a001da --- /dev/null +++ b/internal/service/redshiftserverless/service_endpoint_resolver_gen.go @@ -0,0 +1,146 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package redshiftserverless + +import ( + "context" + "fmt" + "net" + "net/url" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + redshiftserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/redshiftserverless" + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} + +var _ redshiftserverless_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver redshiftserverless_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: redshiftserverless_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params redshiftserverless_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up redshiftserverless endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*redshiftserverless_sdkv2.Options) { + return func(o *redshiftserverless_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/redshiftserverless/service_endpoints_gen_test.go b/internal/service/redshiftserverless/service_endpoints_gen_test.go index 0f0671ff261..fc4ca9899a7 100644 --- a/internal/service/redshiftserverless/service_endpoints_gen_test.go +++ b/internal/service/redshiftserverless/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -255,24 +257,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S }) } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := redshiftserverless_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), redshiftserverless_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := redshiftserverless_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), redshiftserverless_sdkv2.EndpointParameters{ @@ -280,14 +282,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callServiceV2(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -364,16 +366,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/redshiftserverless/service_package_gen.go b/internal/service/redshiftserverless/service_package_gen.go index 1caabc829a4..6cc96608022 100644 --- a/internal/service/redshiftserverless/service_package_gen.go +++ b/internal/service/redshiftserverless/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package redshiftserverless @@ -8,7 +8,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" redshiftserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/redshiftserverless" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" redshiftserverless_sdkv1 "github.com/aws/aws-sdk-go/service/redshiftserverless" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -108,11 +107,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*r "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return redshiftserverless_sdkv1.New(sess.Copy(&cfg)), nil @@ -122,19 +118,10 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*r func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*redshiftserverless_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return redshiftserverless_sdkv2.NewFromConfig(cfg, func(o *redshiftserverless_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return redshiftserverless_sdkv2.NewFromConfig(cfg, + redshiftserverless_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/rekognition/service_endpoint_resolver_gen.go b/internal/service/rekognition/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..5a8fe0a7ff1 --- /dev/null +++ b/internal/service/rekognition/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package rekognition + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + rekognition_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rekognition" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ rekognition_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver rekognition_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: rekognition_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params rekognition_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up rekognition endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*rekognition_sdkv2.Options) { + return func(o *rekognition_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/rekognition/service_endpoints_gen_test.go b/internal/service/rekognition/service_endpoints_gen_test.go index d4f0c9c1abb..22fde1ae7a8 100644 --- a/internal/service/rekognition/service_endpoints_gen_test.go +++ b/internal/service/rekognition/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := rekognition_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), rekognition_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := rekognition_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), rekognition_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/rekognition/service_package_gen.go b/internal/service/rekognition/service_package_gen.go index dc32dc866c4..eac0885db13 100644 --- a/internal/service/rekognition/service_package_gen.go +++ b/internal/service/rekognition/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package rekognition @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" rekognition_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rekognition" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -51,19 +50,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*rekognition_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return rekognition_sdkv2.NewFromConfig(cfg, func(o *rekognition_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return rekognition_sdkv2.NewFromConfig(cfg, + rekognition_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/resourceexplorer2/service_endpoint_resolver_gen.go b/internal/service/resourceexplorer2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f1bfe5122bf --- /dev/null +++ b/internal/service/resourceexplorer2/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package resourceexplorer2 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + resourceexplorer2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ resourceexplorer2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver resourceexplorer2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: resourceexplorer2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params resourceexplorer2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up resourceexplorer2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*resourceexplorer2_sdkv2.Options) { + return func(o *resourceexplorer2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/resourceexplorer2/service_endpoints_gen_test.go b/internal/service/resourceexplorer2/service_endpoints_gen_test.go index a8a62f8c937..12047d59f13 100644 --- a/internal/service/resourceexplorer2/service_endpoints_gen_test.go +++ b/internal/service/resourceexplorer2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := resourceexplorer2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), resourceexplorer2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := resourceexplorer2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), resourceexplorer2_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/resourceexplorer2/service_package_gen.go b/internal/service/resourceexplorer2/service_package_gen.go index e37931dde0f..841b5454dab 100644 --- a/internal/service/resourceexplorer2/service_package_gen.go +++ b/internal/service/resourceexplorer2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package resourceexplorer2 @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" resourceexplorer2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -59,19 +58,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*resourceexplorer2_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return resourceexplorer2_sdkv2.NewFromConfig(cfg, func(o *resourceexplorer2_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return resourceexplorer2_sdkv2.NewFromConfig(cfg, + resourceexplorer2_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/resourcegroups/service_endpoint_resolver_gen.go b/internal/service/resourcegroups/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..5153c50eca9 --- /dev/null +++ b/internal/service/resourcegroups/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package resourcegroups + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + resourcegroups_sdkv2 "github.com/aws/aws-sdk-go-v2/service/resourcegroups" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ resourcegroups_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver resourcegroups_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: resourcegroups_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params resourcegroups_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up resourcegroups endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*resourcegroups_sdkv2.Options) { + return func(o *resourcegroups_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/resourcegroups/service_endpoints_gen_test.go b/internal/service/resourcegroups/service_endpoints_gen_test.go index 506daa7fbf1..c7511e0323f 100644 --- a/internal/service/resourcegroups/service_endpoints_gen_test.go +++ b/internal/service/resourcegroups/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := resourcegroups_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), resourcegroups_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := resourcegroups_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), resourcegroups_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/resourcegroups/service_package_gen.go b/internal/service/resourcegroups/service_package_gen.go index cfabb5d5ffd..e678b38a0ff 100644 --- a/internal/service/resourcegroups/service_package_gen.go +++ b/internal/service/resourcegroups/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package resourcegroups @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" resourcegroups_sdkv2 "github.com/aws/aws-sdk-go-v2/service/resourcegroups" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -53,19 +52,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*resourcegroups_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return resourcegroups_sdkv2.NewFromConfig(cfg, func(o *resourcegroups_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return resourcegroups_sdkv2.NewFromConfig(cfg, + resourcegroups_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/resourcegroupstaggingapi/service_endpoint_resolver_gen.go b/internal/service/resourcegroupstaggingapi/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..470727433f2 --- /dev/null +++ b/internal/service/resourcegroupstaggingapi/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package resourcegroupstaggingapi + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + resourcegroupstaggingapi_sdkv2 "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ resourcegroupstaggingapi_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver resourcegroupstaggingapi_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: resourcegroupstaggingapi_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params resourcegroupstaggingapi_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up resourcegroupstaggingapi endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*resourcegroupstaggingapi_sdkv2.Options) { + return func(o *resourcegroupstaggingapi_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/resourcegroupstaggingapi/service_endpoints_gen_test.go b/internal/service/resourcegroupstaggingapi/service_endpoints_gen_test.go index 3c4003c92e9..3fd6010e792 100644 --- a/internal/service/resourcegroupstaggingapi/service_endpoints_gen_test.go +++ b/internal/service/resourcegroupstaggingapi/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := resourcegroupstaggingapi_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), resourcegroupstaggingapi_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := resourcegroupstaggingapi_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), resourcegroupstaggingapi_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/resourcegroupstaggingapi/service_package_gen.go b/internal/service/resourcegroupstaggingapi/service_package_gen.go index 404ef92684b..32348897fe6 100644 --- a/internal/service/resourcegroupstaggingapi/service_package_gen.go +++ b/internal/service/resourcegroupstaggingapi/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package resourcegroupstaggingapi @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" resourcegroupstaggingapi_sdkv2 "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -44,19 +43,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*resourcegroupstaggingapi_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return resourcegroupstaggingapi_sdkv2.NewFromConfig(cfg, func(o *resourcegroupstaggingapi_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return resourcegroupstaggingapi_sdkv2.NewFromConfig(cfg, + resourcegroupstaggingapi_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/rolesanywhere/service_endpoint_resolver_gen.go b/internal/service/rolesanywhere/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..5d03433d57c --- /dev/null +++ b/internal/service/rolesanywhere/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package rolesanywhere + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + rolesanywhere_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rolesanywhere" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ rolesanywhere_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver rolesanywhere_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: rolesanywhere_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params rolesanywhere_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up rolesanywhere endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*rolesanywhere_sdkv2.Options) { + return func(o *rolesanywhere_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/rolesanywhere/service_endpoints_gen_test.go b/internal/service/rolesanywhere/service_endpoints_gen_test.go index 65e9b3dd526..e8b6e4292de 100644 --- a/internal/service/rolesanywhere/service_endpoints_gen_test.go +++ b/internal/service/rolesanywhere/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := rolesanywhere_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), rolesanywhere_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := rolesanywhere_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), rolesanywhere_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/rolesanywhere/service_package_gen.go b/internal/service/rolesanywhere/service_package_gen.go index c5c97bb205a..6ed74afb30d 100644 --- a/internal/service/rolesanywhere/service_package_gen.go +++ b/internal/service/rolesanywhere/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package rolesanywhere @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" rolesanywhere_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rolesanywhere" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -56,19 +55,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*rolesanywhere_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return rolesanywhere_sdkv2.NewFromConfig(cfg, func(o *rolesanywhere_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return rolesanywhere_sdkv2.NewFromConfig(cfg, + rolesanywhere_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/route53/service_endpoint_resolver_gen.go b/internal/service/route53/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..db1d4a68e6c --- /dev/null +++ b/internal/service/route53/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package route53 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + route53_sdkv2 "github.com/aws/aws-sdk-go-v2/service/route53" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ route53_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver route53_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: route53_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params route53_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up route53 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*route53_sdkv2.Options) { + return func(o *route53_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/route53/service_endpoints_gen_test.go b/internal/service/route53/service_endpoints_gen_test.go index ac39336221c..5676b3df7f2 100644 --- a/internal/service/route53/service_endpoints_gen_test.go +++ b/internal/service/route53/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := route53_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), route53_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := route53_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), route53_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/route53/service_package.go b/internal/service/route53/service_package.go index ab76a4d82c1..cb1a94c876f 100644 --- a/internal/service/route53/service_package.go +++ b/internal/service/route53/service_package.go @@ -16,33 +16,37 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*route53.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return route53.NewFromConfig(cfg, func(o *route53.Options) { - // Always override the service region - switch config["partition"].(string) { - case names.StandardPartitionID: - // https://docs.aws.amazon.com/general/latest/gr/r53.html Setting default to us-east-1. - o.Region = names.USEast1RegionID - case names.ChinaPartitionID: - // The AWS Go SDK is missing endpoint information for Route 53 in the AWS China partition. - // This can likely be removed in the future. - if aws.ToString(o.BaseEndpoint) == "" { - o.BaseEndpoint = aws.String("https://api.route53.cn") + return route53.NewFromConfig(cfg, + route53.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *route53.Options) { + // Always override the service region + switch config["partition"].(string) { + case names.StandardPartitionID: + // https://docs.aws.amazon.com/general/latest/gr/r53.html Setting default to us-east-1. + if cfg.Region != names.USEast1RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USEast1RegionID, + }) + } + o.Region = names.USEast1RegionID + case names.ChinaPartitionID: + // The AWS Go SDK is missing endpoint information for Route 53 in the AWS China partition. + // This can likely be removed in the future. + if aws.ToString(o.BaseEndpoint) == "" { + o.BaseEndpoint = aws.String("https://api.route53.cn") + } + o.Region = names.CNNorthwest1RegionID + case names.USGovCloudPartitionID: + if cfg.Region != names.USGovWest1RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USGovWest1RegionID, + }) + } + o.Region = names.USGovWest1RegionID } - o.Region = names.CNNorthwest1RegionID - case names.USGovCloudPartitionID: - o.Region = names.USGovWest1RegionID - } - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - }), nil + }, + ), nil } diff --git a/internal/service/route53/service_package_gen.go b/internal/service/route53/service_package_gen.go index 89baf108686..505e473c857 100644 --- a/internal/service/route53/service_package_gen.go +++ b/internal/service/route53/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package route53 diff --git a/internal/service/route53/tags_gen.go b/internal/service/route53/tags_gen.go index daa0289520b..024513e279f 100644 --- a/internal/service/route53/tags_gen.go +++ b/internal/service/route53/tags_gen.go @@ -99,12 +99,12 @@ func setTagsOut(ctx context.Context, tags []awstypes.Tag) { } // createTags creates route53 service tags for new resources. -func createTags(ctx context.Context, conn *route53.Client, identifier, resourceType string, tags []awstypes.Tag) error { +func createTags(ctx context.Context, conn *route53.Client, identifier, resourceType string, tags []awstypes.Tag, optFns ...func(*route53.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, resourceType, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, resourceType, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates route53 service tags. diff --git a/internal/service/route53domains/service_endpoint_resolver_gen.go b/internal/service/route53domains/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..7ef9fe3f5af --- /dev/null +++ b/internal/service/route53domains/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package route53domains + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + route53domains_sdkv2 "github.com/aws/aws-sdk-go-v2/service/route53domains" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ route53domains_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver route53domains_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: route53domains_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params route53domains_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up route53domains endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*route53domains_sdkv2.Options) { + return func(o *route53domains_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/route53domains/service_endpoints_gen_test.go b/internal/service/route53domains/service_endpoints_gen_test.go index a74f70ad80a..e19416d84c5 100644 --- a/internal/service/route53domains/service_endpoints_gen_test.go +++ b/internal/service/route53domains/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -88,7 +90,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -222,7 +224,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -243,24 +245,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := route53domains_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), route53domains_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := route53domains_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), route53domains_sdkv2.EndpointParameters{ @@ -268,14 +270,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -337,16 +339,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/route53domains/service_package.go b/internal/service/route53domains/service_package.go index 3a072ade9a0..864905dd906 100644 --- a/internal/service/route53domains/service_package.go +++ b/internal/service/route53domains/service_package.go @@ -16,22 +16,20 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*route53domains.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return route53domains.NewFromConfig(cfg, func(o *route53domains.Options) { - if config["partition"].(string) == names.StandardPartitionID { - // Route 53 Domains is only available in AWS Commercial us-east-1 Region. - o.Region = names.USEast1RegionID - } - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled + return route53domains.NewFromConfig(cfg, + route53domains.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *route53domains.Options) { + if config["partition"].(string) == names.StandardPartitionID { + // Route 53 Domains is only available in AWS Commercial us-east-1 Region. + if cfg.Region != names.USEast1RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USEast1RegionID, + }) + } + o.Region = names.USEast1RegionID } - } - }), nil + }, + ), nil } diff --git a/internal/service/route53domains/service_package_gen.go b/internal/service/route53domains/service_package_gen.go index af62cd4e913..46cefd6eb86 100644 --- a/internal/service/route53domains/service_package_gen.go +++ b/internal/service/route53domains/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package route53domains diff --git a/internal/service/route53profiles/service_endpoint_resolver_gen.go b/internal/service/route53profiles/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..d15c75fb9dc --- /dev/null +++ b/internal/service/route53profiles/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package route53profiles + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + route53profiles_sdkv2 "github.com/aws/aws-sdk-go-v2/service/route53profiles" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ route53profiles_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver route53profiles_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: route53profiles_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params route53profiles_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up route53profiles endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*route53profiles_sdkv2.Options) { + return func(o *route53profiles_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/route53profiles/service_package_gen.go b/internal/service/route53profiles/service_package_gen.go index 1bfbbc08855..c7187e55b73 100644 --- a/internal/service/route53profiles/service_package_gen.go +++ b/internal/service/route53profiles/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package route53profiles @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" route53profiles_sdkv2 "github.com/aws/aws-sdk-go-v2/service/route53profiles" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -39,19 +38,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*route53profiles_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return route53profiles_sdkv2.NewFromConfig(cfg, func(o *route53profiles_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return route53profiles_sdkv2.NewFromConfig(cfg, + route53profiles_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/route53recoverycontrolconfig/service_endpoint_resolver_gen.go b/internal/service/route53recoverycontrolconfig/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..a217e0f1660 --- /dev/null +++ b/internal/service/route53recoverycontrolconfig/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package route53recoverycontrolconfig + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/route53recoverycontrolconfig/service_endpoints_gen_test.go b/internal/service/route53recoverycontrolconfig/service_endpoints_gen_test.go index d913d7d0e64..3ebc393f0df 100644 --- a/internal/service/route53recoverycontrolconfig/service_endpoints_gen_test.go +++ b/internal/service/route53recoverycontrolconfig/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(route53recoverycontrolconfig_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(route53recoverycontrolconfig_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/route53recoverycontrolconfig/service_package.go b/internal/service/route53recoverycontrolconfig/service_package.go index ac38fc859e7..40b80654f04 100644 --- a/internal/service/route53recoverycontrolconfig/service_package.go +++ b/internal/service/route53recoverycontrolconfig/service_package.go @@ -6,36 +6,39 @@ package route53recoverycontrolconfig import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - route53recoverycontrolconfig_sdkv1 "github.com/aws/aws-sdk-go/service/route53recoverycontrolconfig" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53recoverycontrolconfig" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/names" ) // NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*route53recoverycontrolconfig_sdkv1.Route53RecoveryControlConfig, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*route53recoverycontrolconfig.Route53RecoveryControlConfig, error) { + sess := config[names.AttrSession].(*session.Session) - cfg := aws_sdkv1.Config{} + cfg := aws.Config{} if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { tflog.Debug(ctx, "setting endpoint", map[string]any{ "tf_aws.endpoint": endpoint, }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + cfg.Endpoint = aws.String(endpoint) + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } // Force "global" services to correct Regions. - if config["partition"].(string) == endpoints_sdkv1.AwsPartitionID { - cfg.Region = aws_sdkv1.String(endpoints_sdkv1.UsWest2RegionID) + if config["partition"].(string) == endpoints.AwsPartitionID { + if aws.StringValue(cfg.Region) != endpoints.UsWest2RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": aws.StringValue(cfg.Region), + "override_region": endpoints.UsWest2RegionID, + }) + cfg.Region = aws.String(endpoints.UsWest2RegionID) + } } - return route53recoverycontrolconfig_sdkv1.New(sess.Copy(&cfg)), nil + return route53recoverycontrolconfig.New(sess.Copy(&cfg)), nil } diff --git a/internal/service/route53recoverycontrolconfig/service_package_gen.go b/internal/service/route53recoverycontrolconfig/service_package_gen.go index 5aeb51c243a..3fd3a00e12f 100644 --- a/internal/service/route53recoverycontrolconfig/service_package_gen.go +++ b/internal/service/route53recoverycontrolconfig/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package route53recoverycontrolconfig diff --git a/internal/service/route53recoveryreadiness/service_endpoint_resolver_gen.go b/internal/service/route53recoveryreadiness/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..6b345c4cc55 --- /dev/null +++ b/internal/service/route53recoveryreadiness/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package route53recoveryreadiness + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/route53recoveryreadiness/service_endpoints_gen_test.go b/internal/service/route53recoveryreadiness/service_endpoints_gen_test.go index 863b25c3276..293bc01bbeb 100644 --- a/internal/service/route53recoveryreadiness/service_endpoints_gen_test.go +++ b/internal/service/route53recoveryreadiness/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,14 +239,14 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(route53recoveryreadiness_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.ResolveUnknownService = true }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -254,10 +255,10 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(route53recoveryreadiness_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { @@ -265,7 +266,7 @@ func defaultFIPSEndpoint(region string) string { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -274,7 +275,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -326,16 +327,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/route53recoveryreadiness/service_package.go b/internal/service/route53recoveryreadiness/service_package.go index ef5216de999..bdf39e4607c 100644 --- a/internal/service/route53recoveryreadiness/service_package.go +++ b/internal/service/route53recoveryreadiness/service_package.go @@ -6,36 +6,39 @@ package route53recoveryreadiness import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - route53recoveryreadiness_sdkv1 "github.com/aws/aws-sdk-go/service/route53recoveryreadiness" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/route53recoveryreadiness" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/names" ) // NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*route53recoveryreadiness_sdkv1.Route53RecoveryReadiness, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*route53recoveryreadiness.Route53RecoveryReadiness, error) { + sess := config[names.AttrSession].(*session.Session) - cfg := aws_sdkv1.Config{} + cfg := aws.Config{} if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { tflog.Debug(ctx, "setting endpoint", map[string]any{ "tf_aws.endpoint": endpoint, }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + cfg.Endpoint = aws.String(endpoint) + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } // Force "global" services to correct Regions. - if config["partition"].(string) == endpoints_sdkv1.AwsPartitionID { - cfg.Region = aws_sdkv1.String(endpoints_sdkv1.UsWest2RegionID) + if config["partition"].(string) == endpoints.AwsPartitionID { + if aws.StringValue(cfg.Region) != endpoints.UsWest2RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": aws.StringValue(cfg.Region), + "override_region": endpoints.UsWest2RegionID, + }) + cfg.Region = aws.String(endpoints.UsWest2RegionID) + } } - return route53recoveryreadiness_sdkv1.New(sess.Copy(&cfg)), nil + return route53recoveryreadiness.New(sess.Copy(&cfg)), nil } diff --git a/internal/service/route53recoveryreadiness/service_package_gen.go b/internal/service/route53recoveryreadiness/service_package_gen.go index b4c1981689c..f8beee79246 100644 --- a/internal/service/route53recoveryreadiness/service_package_gen.go +++ b/internal/service/route53recoveryreadiness/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package route53recoveryreadiness diff --git a/internal/service/route53resolver/firewall_rule.go b/internal/service/route53resolver/firewall_rule.go index f7dd14bd412..34d9bd2898c 100644 --- a/internal/service/route53resolver/firewall_rule.go +++ b/internal/service/route53resolver/firewall_rule.go @@ -67,6 +67,12 @@ func ResourceFirewallRule() *schema.Resource { Required: true, ValidateFunc: validation.StringLenBetween(1, 64), }, + "firewall_domain_redirection_action": { + Type: schema.TypeString, + Optional: true, + Default: route53resolver.FirewallDomainRedirectionActionInspectRedirectionDomain, + ValidateFunc: validation.StringInSlice(route53resolver.FirewallDomainRedirectionAction_Values(), false), + }, "firewall_rule_group_id": { Type: schema.TypeString, ForceNew: true, @@ -82,6 +88,10 @@ func ResourceFirewallRule() *schema.Resource { Type: schema.TypeInt, Required: true, }, + "q_type": { + Type: schema.TypeString, + Optional: true, + }, }, } } @@ -95,12 +105,13 @@ func resourceFirewallRuleCreate(ctx context.Context, d *schema.ResourceData, met ruleID := FirewallRuleCreateResourceID(firewallRuleGroupID, firewallDomainListID) name := d.Get(names.AttrName).(string) input := &route53resolver.CreateFirewallRuleInput{ - Action: aws.String(d.Get(names.AttrAction).(string)), - CreatorRequestId: aws.String(id.PrefixedUniqueId("tf-r53-resolver-firewall-rule-")), - FirewallRuleGroupId: aws.String(firewallRuleGroupID), - FirewallDomainListId: aws.String(firewallDomainListID), - Name: aws.String(name), - Priority: aws.Int64(int64(d.Get(names.AttrPriority).(int))), + Action: aws.String(d.Get(names.AttrAction).(string)), + CreatorRequestId: aws.String(id.PrefixedUniqueId("tf-r53-resolver-firewall-rule-")), + FirewallRuleGroupId: aws.String(firewallRuleGroupID), + FirewallDomainListId: aws.String(firewallDomainListID), + FirewallDomainRedirectionAction: aws.String(d.Get("firewall_domain_redirection_action").(string)), + Name: aws.String(name), + Priority: aws.Int64(int64(d.Get(names.AttrPriority).(int))), } if v, ok := d.GetOk("block_override_dns_type"); ok { @@ -119,6 +130,10 @@ func resourceFirewallRuleCreate(ctx context.Context, d *schema.ResourceData, met input.BlockResponse = aws.String(v.(string)) } + if v, ok := d.GetOk("q_type"); ok { + input.Qtype = aws.String(v.(string)) + } + _, err := conn.CreateFirewallRuleWithContext(ctx, input) if err != nil { @@ -159,8 +174,10 @@ func resourceFirewallRuleRead(ctx context.Context, d *schema.ResourceData, meta d.Set("block_response", firewallRule.BlockResponse) d.Set("firewall_rule_group_id", firewallRule.FirewallRuleGroupId) d.Set("firewall_domain_list_id", firewallRule.FirewallDomainListId) + d.Set("firewall_domain_redirection_action", firewallRule.FirewallDomainRedirectionAction) d.Set(names.AttrName, firewallRule.Name) d.Set(names.AttrPriority, firewallRule.Priority) + d.Set("q_type", firewallRule.Qtype) return diags } @@ -199,6 +216,14 @@ func resourceFirewallRuleUpdate(ctx context.Context, d *schema.ResourceData, met input.BlockResponse = aws.String(v.(string)) } + if v, ok := d.GetOk("firewall_domain_redirection_action"); ok { + input.FirewallDomainRedirectionAction = aws.String(v.(string)) + } + + if v, ok := d.GetOk("q_type"); ok { + input.Qtype = aws.String(v.(string)) + } + _, err = conn.UpdateFirewallRuleWithContext(ctx, input) if err != nil { @@ -218,11 +243,17 @@ func resourceFirewallRuleDelete(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendFromErr(diags, err) } - log.Printf("[DEBUG] Deleting Route53 Resolver Firewall Rule: %s", d.Id()) - _, err = conn.DeleteFirewallRuleWithContext(ctx, &route53resolver.DeleteFirewallRuleInput{ + input := &route53resolver.DeleteFirewallRuleInput{ FirewallDomainListId: aws.String(firewallDomainListID), FirewallRuleGroupId: aws.String(firewallRuleGroupID), - }) + } + + if v, ok := d.GetOk("q_type"); ok { + input.Qtype = aws.String(v.(string)) + } + + log.Printf("[DEBUG] Deleting Route53 Resolver Firewall Rule: %s", d.Id()) + _, err = conn.DeleteFirewallRuleWithContext(ctx, input) if tfawserr.ErrCodeEquals(err, route53resolver.ErrCodeResourceNotFoundException) { return diags diff --git a/internal/service/route53resolver/firewall_rule_test.go b/internal/service/route53resolver/firewall_rule_test.go index d469247698f..4cabdc70777 100644 --- a/internal/service/route53resolver/firewall_rule_test.go +++ b/internal/service/route53resolver/firewall_rule_test.go @@ -39,6 +39,7 @@ func TestAccRoute53ResolverFirewallRule_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, names.AttrAction, "ALLOW"), resource.TestCheckResourceAttrPair(resourceName, "firewall_rule_group_id", "aws_route53_resolver_firewall_rule_group.test", names.AttrID), resource.TestCheckResourceAttrPair(resourceName, "firewall_domain_list_id", "aws_route53_resolver_firewall_domain_list.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, "firewall_domain_redirection_action", "INSPECT_REDIRECTION_DOMAIN"), resource.TestCheckResourceAttr(resourceName, names.AttrPriority, "100"), ), }, @@ -51,6 +52,43 @@ func TestAccRoute53ResolverFirewallRule_basic(t *testing.T) { }) } +func TestAccRoute53ResolverFirewallRule_update_firewallDomainRedirectionAction(t *testing.T) { + ctx := acctest.Context(t) + var v route53resolver.FirewallRule + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_route53_resolver_firewall_rule.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFirewallRuleDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallRuleConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallRuleExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "firewall_domain_redirection_action", "INSPECT_REDIRECTION_DOMAIN"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccFirewallRuleConfig_firewallDomainRedirectionAction(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallRuleExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, "firewall_domain_redirection_action", "TRUST_REDIRECTION_DOMAIN"), + ), + }, + }, + }) +} + func TestAccRoute53ResolverFirewallRule_block(t *testing.T) { ctx := acctest.Context(t) var v route53resolver.FirewallRule @@ -114,6 +152,39 @@ func TestAccRoute53ResolverFirewallRule_blockOverride(t *testing.T) { }) } +func TestAccRoute53ResolverFirewallRule_qType(t *testing.T) { + ctx := acctest.Context(t) + var v route53resolver.FirewallRule + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_route53_resolver_firewall_rule.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.Route53ResolverServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFirewallRuleDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallRuleConfig_qType(rName, "A"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallRuleExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), + resource.TestCheckResourceAttr(resourceName, names.AttrAction, "ALLOW"), + resource.TestCheckResourceAttrPair(resourceName, "firewall_rule_group_id", "aws_route53_resolver_firewall_rule_group.test", names.AttrID), + resource.TestCheckResourceAttrPair(resourceName, "firewall_domain_list_id", "aws_route53_resolver_firewall_domain_list.test", names.AttrID), + resource.TestCheckResourceAttr(resourceName, names.AttrPriority, "100"), + resource.TestCheckResourceAttr(resourceName, "q_type", "A"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccRoute53ResolverFirewallRule_disappears(t *testing.T) { ctx := acctest.Context(t) var v route53resolver.FirewallRule @@ -221,6 +292,27 @@ resource "aws_route53_resolver_firewall_rule" "test" { `, rName) } +func testAccFirewallRuleConfig_firewallDomainRedirectionAction(rName string) string { + return fmt.Sprintf(` +resource "aws_route53_resolver_firewall_rule_group" "test" { + name = %[1]q +} + +resource "aws_route53_resolver_firewall_domain_list" "test" { + name = %[1]q +} + +resource "aws_route53_resolver_firewall_rule" "test" { + name = %[1]q + action = "ALLOW" + firewall_rule_group_id = aws_route53_resolver_firewall_rule_group.test.id + firewall_domain_list_id = aws_route53_resolver_firewall_domain_list.test.id + firewall_domain_redirection_action = "TRUST_REDIRECTION_DOMAIN" + priority = 100 +} +`, rName) +} + func testAccFirewallRuleConfig_block(rName, blockResponse string) string { return fmt.Sprintf(` resource "aws_route53_resolver_firewall_rule_group" "test" { @@ -265,3 +357,24 @@ resource "aws_route53_resolver_firewall_rule" "test" { } `, rName) } + +func testAccFirewallRuleConfig_qType(rName, qType string) string { + return fmt.Sprintf(` +resource "aws_route53_resolver_firewall_rule_group" "test" { + name = %[1]q +} + +resource "aws_route53_resolver_firewall_domain_list" "test" { + name = %[1]q +} + +resource "aws_route53_resolver_firewall_rule" "test" { + name = %[1]q + action = "ALLOW" + firewall_rule_group_id = aws_route53_resolver_firewall_rule_group.test.id + firewall_domain_list_id = aws_route53_resolver_firewall_domain_list.test.id + priority = 100 + q_type = %[2]q +} +`, rName, qType) +} diff --git a/internal/service/route53resolver/service_endpoint_resolver_gen.go b/internal/service/route53resolver/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..c6ac13e09fe --- /dev/null +++ b/internal/service/route53resolver/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package route53resolver + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/route53resolver/service_endpoints_gen_test.go b/internal/service/route53resolver/service_endpoints_gen_test.go index d774666c5d6..2f319bf1548 100644 --- a/internal/service/route53resolver/service_endpoints_gen_test.go +++ b/internal/service/route53resolver/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(route53resolver_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(route53resolver_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/route53resolver/service_package_gen.go b/internal/service/route53resolver/service_package_gen.go index d0c0fd7db3c..358ef8563a1 100644 --- a/internal/service/route53resolver/service_package_gen.go +++ b/internal/service/route53resolver/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package route53resolver @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" route53resolver_sdkv1 "github.com/aws/aws-sdk-go/service/route53resolver" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -158,11 +157,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*r "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return route53resolver_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/rum/app_monitor.go b/internal/service/rum/app_monitor.go index 6e08f781085..f634adb088c 100644 --- a/internal/service/rum/app_monitor.go +++ b/internal/service/rum/app_monitor.go @@ -5,18 +5,19 @@ package rum import ( "context" - "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/service/cloudwatchrum" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/rum" + awstypes "github.com/aws/aws-sdk-go-v2/service/rum/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" @@ -27,7 +28,7 @@ import ( // @SDKResource("aws_rum_app_monitor", name="App Monitor") // @Tags(identifierAttribute="arn") -func ResourceAppMonitor() *schema.Resource { +func resourceAppMonitor() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAppMonitorCreate, ReadWithoutTimeout: resourceAppMonitorRead, @@ -91,8 +92,8 @@ func ResourceAppMonitor() *schema.Resource { Type: schema.TypeSet, Optional: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(cloudwatchrum.Telemetry_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.Telemetry](), }, }, }, @@ -114,10 +115,10 @@ func ResourceAppMonitor() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ names.AttrStatus: { - Type: schema.TypeString, - Optional: true, - Default: cloudwatchrum.CustomEventsStatusDisabled, - ValidateFunc: validation.StringInSlice(cloudwatchrum.CustomEventsStatus_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: awstypes.CustomEventsStatusDisabled, + ValidateDiagFunc: enum.Validate[awstypes.CustomEventsStatus](), }, }, }, @@ -145,16 +146,17 @@ func ResourceAppMonitor() *schema.Resource { names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), }, + CustomizeDiff: verify.SetTagsDiff, } } func resourceAppMonitorCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RUMConn(ctx) + conn := meta.(*conns.AWSClient).RUMClient(ctx) name := d.Get(names.AttrName).(string) - input := &cloudwatchrum.CreateAppMonitorInput{ + input := &rum.CreateAppMonitorInput{ Name: aws.String(name), CwLogEnabled: aws.Bool(d.Get("cw_log_enabled").(bool)), Domain: aws.String(d.Get(names.AttrDomain).(string)), @@ -169,7 +171,7 @@ func resourceAppMonitorCreate(ctx context.Context, d *schema.ResourceData, meta input.CustomEvents = expandCustomEvents(v.([]interface{})[0].(map[string]interface{})) } - _, err := conn.CreateAppMonitorWithContext(ctx, input) + _, err := conn.CreateAppMonitor(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating CloudWatch RUM App Monitor (%s): %s", name, err) @@ -182,9 +184,9 @@ func resourceAppMonitorCreate(ctx context.Context, d *schema.ResourceData, meta func resourceAppMonitorRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RUMConn(ctx) + conn := meta.(*conns.AWSClient).RUMClient(ctx) - appMon, err := FindAppMonitorByName(ctx, conn, d.Id()) + appMon, err := findAppMonitorByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] CloudWatch RUM App Monitor %s not found, removing from state", d.Id()) @@ -205,18 +207,19 @@ func resourceAppMonitorRead(ctx context.Context, d *schema.ResourceData, meta in } d.Set("app_monitor_id", appMon.Id) + name := aws.ToString(appMon.Name) arn := arn.ARN{ - AccountID: meta.(*conns.AWSClient).AccountID, Partition: meta.(*conns.AWSClient).Partition, - Region: meta.(*conns.AWSClient).Region, - Resource: fmt.Sprintf("appmonitor/%s", aws.StringValue(appMon.Name)), Service: "rum", + Region: meta.(*conns.AWSClient).Region, + AccountID: meta.(*conns.AWSClient).AccountID, + Resource: "appmonitor/" + name, }.String() d.Set(names.AttrARN, arn) d.Set("cw_log_enabled", appMon.DataStorage.CwLog.CwLogEnabled) d.Set("cw_log_group", appMon.DataStorage.CwLog.CwLogGroup) d.Set(names.AttrDomain, appMon.Domain) - d.Set(names.AttrName, appMon.Name) + d.Set(names.AttrName, name) setTagsOut(ctx, appMon.Tags) @@ -225,10 +228,10 @@ func resourceAppMonitorRead(ctx context.Context, d *schema.ResourceData, meta in func resourceAppMonitorUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RUMConn(ctx) + conn := meta.(*conns.AWSClient).RUMClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { - input := &cloudwatchrum.UpdateAppMonitorInput{ + input := &rum.UpdateAppMonitorInput{ Name: aws.String(d.Id()), } @@ -248,7 +251,7 @@ func resourceAppMonitorUpdate(ctx context.Context, d *schema.ResourceData, meta input.Domain = aws.String(d.Get(names.AttrDomain).(string)) } - _, err := conn.UpdateAppMonitorWithContext(ctx, input) + _, err := conn.UpdateAppMonitor(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating CloudWatch RUM App Monitor (%s): %s", d.Id(), err) @@ -260,14 +263,14 @@ func resourceAppMonitorUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceAppMonitorDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RUMConn(ctx) + conn := meta.(*conns.AWSClient).RUMClient(ctx) log.Printf("[DEBUG] Deleting CloudWatch RUM App Monitor: %s", d.Id()) - _, err := conn.DeleteAppMonitorWithContext(ctx, &cloudwatchrum.DeleteAppMonitorInput{ + _, err := conn.DeleteAppMonitor(ctx, &rum.DeleteAppMonitorInput{ Name: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, cloudwatchrum.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -278,14 +281,14 @@ func resourceAppMonitorDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func FindAppMonitorByName(ctx context.Context, conn *cloudwatchrum.CloudWatchRUM, name string) (*cloudwatchrum.AppMonitor, error) { - input := &cloudwatchrum.GetAppMonitorInput{ +func findAppMonitorByName(ctx context.Context, conn *rum.Client, name string) (*awstypes.AppMonitor, error) { + input := &rum.GetAppMonitorInput{ Name: aws.String(name), } - output, err := conn.GetAppMonitorWithContext(ctx, input) + output, err := conn.GetAppMonitor(ctx, input) - if tfawserr.ErrCodeEquals(err, cloudwatchrum.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -303,12 +306,12 @@ func FindAppMonitorByName(ctx context.Context, conn *cloudwatchrum.CloudWatchRUM return output.AppMonitor, nil } -func expandAppMonitorConfiguration(tfMap map[string]interface{}) *cloudwatchrum.AppMonitorConfiguration { +func expandAppMonitorConfiguration(tfMap map[string]interface{}) *awstypes.AppMonitorConfiguration { if tfMap == nil { return nil } - config := &cloudwatchrum.AppMonitorConfiguration{} + config := &awstypes.AppMonitorConfiguration{} if v, ok := tfMap["guest_role_arn"].(string); ok && v != "" { config.GuestRoleArn = aws.String(v) @@ -319,7 +322,7 @@ func expandAppMonitorConfiguration(tfMap map[string]interface{}) *cloudwatchrum. } if v, ok := tfMap["session_sample_rate"].(float64); ok { - config.SessionSampleRate = aws.Float64(v) + config.SessionSampleRate = v } if v, ok := tfMap["allow_cookies"].(bool); ok { @@ -331,25 +334,25 @@ func expandAppMonitorConfiguration(tfMap map[string]interface{}) *cloudwatchrum. } if v, ok := tfMap["excluded_pages"].(*schema.Set); ok && v.Len() > 0 { - config.ExcludedPages = flex.ExpandStringSet(v) + config.ExcludedPages = flex.ExpandStringValueSet(v) } if v, ok := tfMap["favorite_pages"].(*schema.Set); ok && v.Len() > 0 { - config.FavoritePages = flex.ExpandStringSet(v) + config.FavoritePages = flex.ExpandStringValueSet(v) } if v, ok := tfMap["included_pages"].(*schema.Set); ok && v.Len() > 0 { - config.IncludedPages = flex.ExpandStringSet(v) + config.IncludedPages = flex.ExpandStringValueSet(v) } if v, ok := tfMap["telemetries"].(*schema.Set); ok && v.Len() > 0 { - config.Telemetries = flex.ExpandStringSet(v) + config.Telemetries = flex.ExpandStringyValueSet[awstypes.Telemetry](v) } return config } -func flattenAppMonitorConfiguration(apiObject *cloudwatchrum.AppMonitorConfiguration) map[string]interface{} { +func flattenAppMonitorConfiguration(apiObject *awstypes.AppMonitorConfiguration) map[string]interface{} { if apiObject == nil { return nil } @@ -357,67 +360,63 @@ func flattenAppMonitorConfiguration(apiObject *cloudwatchrum.AppMonitorConfigura tfMap := map[string]interface{}{} if v := apiObject.GuestRoleArn; v != nil { - tfMap["guest_role_arn"] = aws.StringValue(v) + tfMap["guest_role_arn"] = aws.ToString(v) } if v := apiObject.IdentityPoolId; v != nil { - tfMap["identity_pool_id"] = aws.StringValue(v) + tfMap["identity_pool_id"] = aws.ToString(v) } - if v := apiObject.SessionSampleRate; v != nil { - tfMap["session_sample_rate"] = aws.Float64Value(v) - } + tfMap["session_sample_rate"] = apiObject.SessionSampleRate if v := apiObject.AllowCookies; v != nil { - tfMap["allow_cookies"] = aws.BoolValue(v) + tfMap["allow_cookies"] = aws.ToBool(v) } if v := apiObject.EnableXRay; v != nil { - tfMap["enable_xray"] = aws.BoolValue(v) + tfMap["enable_xray"] = aws.ToBool(v) } if v := apiObject.Telemetries; v != nil { - tfMap["telemetries"] = flex.FlattenStringSet(v) + tfMap["telemetries"] = apiObject.Telemetries } if v := apiObject.IncludedPages; v != nil { - tfMap["included_pages"] = flex.FlattenStringSet(v) + tfMap["included_pages"] = apiObject.IncludedPages } if v := apiObject.FavoritePages; v != nil { - tfMap["favorite_pages"] = flex.FlattenStringSet(v) + tfMap["favorite_pages"] = apiObject.FavoritePages } if v := apiObject.ExcludedPages; v != nil { - tfMap["excluded_pages"] = flex.FlattenStringSet(v) + tfMap["excluded_pages"] = apiObject.ExcludedPages } return tfMap } -func expandCustomEvents(tfMap map[string]interface{}) *cloudwatchrum.CustomEvents { +func expandCustomEvents(tfMap map[string]interface{}) *awstypes.CustomEvents { if tfMap == nil { return nil } - config := &cloudwatchrum.CustomEvents{} + config := &awstypes.CustomEvents{} if v, ok := tfMap[names.AttrStatus].(string); ok && v != "" { - config.Status = aws.String(v) + config.Status = awstypes.CustomEventsStatus(v) } return config } -func flattenCustomEvents(apiObject *cloudwatchrum.CustomEvents) map[string]interface{} { +func flattenCustomEvents(apiObject *awstypes.CustomEvents) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Status; v != nil { - tfMap[names.AttrStatus] = aws.StringValue(v) + tfMap := map[string]interface{}{ + names.AttrStatus: apiObject.Status, } return tfMap diff --git a/internal/service/rum/app_monitor_test.go b/internal/service/rum/app_monitor_test.go index 6fa04922fa7..77ba8805023 100644 --- a/internal/service/rum/app_monitor_test.go +++ b/internal/service/rum/app_monitor_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/cloudwatchrum" + awstypes "github.com/aws/aws-sdk-go-v2/service/rum/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccRUMAppMonitor_basic(t *testing.T) { ctx := acctest.Context(t) - var appMon cloudwatchrum.AppMonitor + var appMon awstypes.AppMonitor rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rum_app_monitor.test" @@ -73,7 +73,7 @@ func TestAccRUMAppMonitor_basic(t *testing.T) { func TestAccRUMAppMonitor_customEvents(t *testing.T) { ctx := acctest.Context(t) - var appMon cloudwatchrum.AppMonitor + var appMon awstypes.AppMonitor rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rum_app_monitor.test" @@ -118,7 +118,7 @@ func TestAccRUMAppMonitor_customEvents(t *testing.T) { func TestAccRUMAppMonitor_tags(t *testing.T) { ctx := acctest.Context(t) - var appMon cloudwatchrum.AppMonitor + var appMon awstypes.AppMonitor rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rum_app_monitor.test" @@ -164,7 +164,7 @@ func TestAccRUMAppMonitor_tags(t *testing.T) { func TestAccRUMAppMonitor_disappears(t *testing.T) { ctx := acctest.Context(t) - var appMon cloudwatchrum.AppMonitor + var appMon awstypes.AppMonitor rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rum_app_monitor.test" @@ -189,7 +189,7 @@ func TestAccRUMAppMonitor_disappears(t *testing.T) { func testAccCheckAppMonitorDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RUMConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RUMClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_rum_app_monitor" { @@ -213,17 +213,14 @@ func testAccCheckAppMonitorDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccCheckAppMonitorExists(ctx context.Context, n string, v *cloudwatchrum.AppMonitor) resource.TestCheckFunc { +func testAccCheckAppMonitorExists(ctx context.Context, n string, v *awstypes.AppMonitor) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No CloudWatch RUM App Monitor ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).RUMConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RUMClient(ctx) output, err := tfcloudwatchrum.FindAppMonitorByName(ctx, conn, rs.Primary.ID) diff --git a/internal/service/rum/exports_test.go b/internal/service/rum/exports_test.go new file mode 100644 index 00000000000..bbb5989aa08 --- /dev/null +++ b/internal/service/rum/exports_test.go @@ -0,0 +1,13 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package rum + +// Exports for use in tests only. +var ( + ResourceAppMonitor = resourceAppMonitor + ResourceMetricsDestination = resourceMetricsDestination + + FindAppMonitorByName = findAppMonitorByName + FindMetricsDestinationByName = findMetricsDestinationByName +) diff --git a/internal/service/rum/generate.go b/internal/service/rum/generate.go index a6955d2a2d9..ef736f8cc82 100644 --- a/internal/service/rum/generate.go +++ b/internal/service/rum/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ServiceTagsMap -UpdateTags +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -KVTValues -ServiceTagsMap -SkipTypesImp -UpdateTags //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/rum/metrics_destination.go b/internal/service/rum/metrics_destination.go index 8104fe459de..3ffdf890754 100644 --- a/internal/service/rum/metrics_destination.go +++ b/internal/service/rum/metrics_destination.go @@ -7,22 +7,23 @@ import ( "context" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatchrum" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rum" + awstypes "github.com/aws/aws-sdk-go-v2/service/rum/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_rum_metrics_destination") -func ResourceMetricsDestination() *schema.Resource { +// @SDKResource("aws_rum_metrics_destination", name="Metrics Destination") +func resourceMetricsDestination() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceMetricsDestinationPut, ReadWithoutTimeout: resourceMetricsDestinationRead, @@ -39,9 +40,9 @@ func ResourceMetricsDestination() *schema.Resource { Required: true, }, names.AttrDestination: { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(cloudwatchrum.MetricDestination_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[awstypes.MetricDestination](), }, names.AttrDestinationARN: { Type: schema.TypeString, @@ -59,12 +60,12 @@ func ResourceMetricsDestination() *schema.Resource { func resourceMetricsDestinationPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RUMConn(ctx) + conn := meta.(*conns.AWSClient).RUMClient(ctx) name := d.Get("app_monitor_name").(string) - input := &cloudwatchrum.PutRumMetricsDestinationInput{ + input := &rum.PutRumMetricsDestinationInput{ AppMonitorName: aws.String(name), - Destination: aws.String(d.Get(names.AttrDestination).(string)), + Destination: awstypes.MetricDestination(d.Get(names.AttrDestination).(string)), } if v, ok := d.GetOk(names.AttrDestinationARN); ok { @@ -75,7 +76,7 @@ func resourceMetricsDestinationPut(ctx context.Context, d *schema.ResourceData, input.IamRoleArn = aws.String(v.(string)) } - _, err := conn.PutRumMetricsDestinationWithContext(ctx, input) + _, err := conn.PutRumMetricsDestination(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "putting CloudWatch RUM Metrics Destination (%s): %s", name, err) @@ -90,9 +91,9 @@ func resourceMetricsDestinationPut(ctx context.Context, d *schema.ResourceData, func resourceMetricsDestinationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RUMConn(ctx) + conn := meta.(*conns.AWSClient).RUMClient(ctx) - dest, err := FindMetricsDestinationByName(ctx, conn, d.Id()) + dest, err := findMetricsDestinationByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] CloudWatch RUM Metrics Destination %s not found, removing from state", d.Id()) @@ -114,11 +115,11 @@ func resourceMetricsDestinationRead(ctx context.Context, d *schema.ResourceData, func resourceMetricsDestinationDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).RUMConn(ctx) + conn := meta.(*conns.AWSClient).RUMClient(ctx) - input := &cloudwatchrum.DeleteRumMetricsDestinationInput{ + input := &rum.DeleteRumMetricsDestinationInput{ AppMonitorName: aws.String(d.Id()), - Destination: aws.String(d.Get(names.AttrDestination).(string)), + Destination: awstypes.MetricDestination(d.Get(names.AttrDestination).(string)), } if v, ok := d.GetOk(names.AttrDestinationARN); ok { @@ -126,9 +127,9 @@ func resourceMetricsDestinationDelete(ctx context.Context, d *schema.ResourceDat } log.Printf("[DEBUG] Deleting CloudWatch RUM Metrics Destination: %s", d.Id()) - _, err := conn.DeleteRumMetricsDestinationWithContext(ctx, input) + _, err := conn.DeleteRumMetricsDestination(ctx, input) - if tfawserr.ErrCodeEquals(err, cloudwatchrum.ErrCodeResourceNotFoundException) { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diags } @@ -139,44 +140,44 @@ func resourceMetricsDestinationDelete(ctx context.Context, d *schema.ResourceDat return diags } -func FindMetricsDestinationByName(ctx context.Context, conn *cloudwatchrum.CloudWatchRUM, name string) (*cloudwatchrum.MetricDestinationSummary, error) { - input := &cloudwatchrum.ListRumMetricsDestinationsInput{ - AppMonitorName: aws.String(name), +func findMetricsDestination(ctx context.Context, conn *rum.Client, input *rum.ListRumMetricsDestinationsInput) (*awstypes.MetricDestinationSummary, error) { + output, err := findMetricsDestinations(ctx, conn, input) + + if err != nil { + return nil, err } - var output []*cloudwatchrum.MetricDestinationSummary - err := conn.ListRumMetricsDestinationsPagesWithContext(ctx, input, func(page *cloudwatchrum.ListRumMetricsDestinationsOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } + return tfresource.AssertSingleValueResult(output) +} + +func findMetricsDestinations(ctx context.Context, conn *rum.Client, input *rum.ListRumMetricsDestinationsInput) ([]awstypes.MetricDestinationSummary, error) { + var output []awstypes.MetricDestinationSummary - for _, v := range page.Destinations { - if v != nil { - output = append(output, v) + pages := rum.NewListRumMetricsDestinationsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, } } - return !lastPage - }) - - if tfawserr.ErrCodeEquals(err, cloudwatchrum.ErrCodeResourceNotFoundException) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + if err != nil { + return nil, err } - } - if err != nil { - return nil, err + output = append(output, page.Destinations...) } - if len(output) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } + return output, nil +} - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) +func findMetricsDestinationByName(ctx context.Context, conn *rum.Client, name string) (*awstypes.MetricDestinationSummary, error) { + input := &rum.ListRumMetricsDestinationsInput{ + AppMonitorName: aws.String(name), } - return output[0], nil + return findMetricsDestination(ctx, conn, input) } diff --git a/internal/service/rum/metrics_destination_test.go b/internal/service/rum/metrics_destination_test.go index 0545df40e64..19b057a4412 100644 --- a/internal/service/rum/metrics_destination_test.go +++ b/internal/service/rum/metrics_destination_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/cloudwatchrum" + awstypes "github.com/aws/aws-sdk-go-v2/service/rum/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -21,7 +21,7 @@ import ( func TestAccRUMMetricsDestination_basic(t *testing.T) { ctx := acctest.Context(t) - var dest cloudwatchrum.MetricDestinationSummary + var dest awstypes.MetricDestinationSummary rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rum_metrics_destination.test" @@ -50,7 +50,7 @@ func TestAccRUMMetricsDestination_basic(t *testing.T) { func TestAccRUMMetricsDestination_disappears(t *testing.T) { ctx := acctest.Context(t) - var dest cloudwatchrum.MetricDestinationSummary + var dest awstypes.MetricDestinationSummary rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rum_metrics_destination.test" @@ -75,7 +75,7 @@ func TestAccRUMMetricsDestination_disappears(t *testing.T) { func TestAccRUMMetricsDestination_disappears_appMonitor(t *testing.T) { ctx := acctest.Context(t) - var dest cloudwatchrum.MetricDestinationSummary + var dest awstypes.MetricDestinationSummary rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_rum_metrics_destination.test" @@ -100,7 +100,7 @@ func TestAccRUMMetricsDestination_disappears_appMonitor(t *testing.T) { func testAccCheckMetricsDestinationDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).RUMConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RUMClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_rum_metrics_destination" { @@ -124,17 +124,14 @@ func testAccCheckMetricsDestinationDestroy(ctx context.Context) resource.TestChe } } -func testAccCheckMetricsDestinationExists(ctx context.Context, n string, v *cloudwatchrum.MetricDestinationSummary) resource.TestCheckFunc { +func testAccCheckMetricsDestinationExists(ctx context.Context, n string, v *awstypes.MetricDestinationSummary) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No CloudWatch RUM Metrics Destination ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).RUMConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).RUMClient(ctx) output, err := tfcloudwatchrum.FindMetricsDestinationByName(ctx, conn, rs.Primary.ID) diff --git a/internal/service/rum/service_endpoint_resolver_gen.go b/internal/service/rum/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..1c954121cf4 --- /dev/null +++ b/internal/service/rum/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package rum + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + rum_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rum" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ rum_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver rum_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: rum_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params rum_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up rum endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*rum_sdkv2.Options) { + return func(o *rum_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/rum/service_endpoints_gen_test.go b/internal/service/rum/service_endpoints_gen_test.go index a7eb6cee8ef..30590c71de8 100644 --- a/internal/service/rum/service_endpoints_gen_test.go +++ b/internal/service/rum/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package rum_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - cloudwatchrum_sdkv1 "github.com/aws/aws-sdk-go/service/cloudwatchrum" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + rum_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rum" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/go-cty/cty" @@ -88,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -271,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -292,55 +297,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := rum_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(cloudwatchrum_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), rum_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := rum_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(cloudwatchrum_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), rum_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.RUMConn(ctx) - - req, _ := client.ListAppMonitorsRequest(&cloudwatchrum_sdkv1.ListAppMonitorsInput{}) + client := meta.RUMClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListAppMonitors(ctx, &rum_sdkv2.ListAppMonitorsInput{}, + func(opts *rum_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -396,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -523,6 +559,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/rum/service_package_gen.go b/internal/service/rum/service_package_gen.go index 7d4b9dfbe77..45c6405a230 100644 --- a/internal/service/rum/service_package_gen.go +++ b/internal/service/rum/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package rum import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - cloudwatchrum_sdkv1 "github.com/aws/aws-sdk-go/service/cloudwatchrum" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + rum_sdkv2 "github.com/aws/aws-sdk-go-v2/service/rum" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -32,7 +29,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceAppMonitor, + Factory: resourceAppMonitor, TypeName: "aws_rum_app_monitor", Name: "App Monitor", Tags: &types.ServicePackageResourceTags{ @@ -40,8 +37,9 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceMetricsDestination, + Factory: resourceMetricsDestination, TypeName: "aws_rum_metrics_destination", + Name: "Metrics Destination", }, } } @@ -50,25 +48,14 @@ func (p *servicePackage) ServicePackageName() string { return names.RUM } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*cloudwatchrum_sdkv1.CloudWatchRUM, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*rum_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return cloudwatchrum_sdkv1.New(sess.Copy(&cfg)), nil + return rum_sdkv2.NewFromConfig(cfg, + rum_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/rum/sweep.go b/internal/service/rum/sweep.go index 8c8d75be90b..8854334755f 100644 --- a/internal/service/rum/sweep.go +++ b/internal/service/rum/sweep.go @@ -7,12 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatchrum" - "github.com/hashicorp/go-multierror" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rum" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -25,45 +24,40 @@ func RegisterSweepers() { func sweepAppMonitors(region string) error { ctx := sweep.Context(region) client, err := sweep.SharedRegionalSweepClient(ctx, region) - if err != nil { return fmt.Errorf("error getting client: %s", err) } - - conn := client.RUMConn(ctx) + conn := client.RUMClient(ctx) + input := &rum.ListAppMonitorsInput{} sweepResources := make([]sweep.Sweepable, 0) - var errs *multierror.Error - err = conn.ListAppMonitorsPagesWithContext(ctx, &cloudwatchrum.ListAppMonitorsInput{}, func(resp *cloudwatchrum.ListAppMonitorsOutput, lastPage bool) bool { - if len(resp.AppMonitorSummaries) == 0 { - log.Print("[DEBUG] No RUM App Monitors to sweep") - return !lastPage + pages := rum.NewListAppMonitorsPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping CloudWatch RUM App Monitor sweep for %s: %s", region, err) + return nil } - for _, c := range resp.AppMonitorSummaries { - r := ResourceAppMonitor() + if err != nil { + return fmt.Errorf("error listing CloudWatch RUM App Monitors (%s): %w", region, err) + } + + for _, v := range page.AppMonitorSummaries { + r := resourceAppMonitor() d := r.Data(nil) - d.SetId(aws.StringValue(c.Name)) + d.SetId(aws.ToString(v.Name)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if err != nil { - errs = multierror.Append(errs, fmt.Errorf("error describing RUM App Monitors: %w", err)) - // in case work can be done, don't jump out yet } - if err = sweep.SweepOrchestrator(ctx, sweepResources); err != nil { - errs = multierror.Append(errs, fmt.Errorf("error sweeping RUM App Monitors for %s: %w", region, err)) - } + err = sweep.SweepOrchestrator(ctx, sweepResources) - if awsv1.SkipSweepError(errs.ErrorOrNil()) { - log.Printf("[WARN] Skipping RUM App Monitor sweep for %s: %s", region, err) - return nil + if err != nil { + return fmt.Errorf("error sweeping CloudWatch RUM App Monitors (%s): %w", region, err) } - return errs.ErrorOrNil() + return nil } diff --git a/internal/service/rum/tags_gen.go b/internal/service/rum/tags_gen.go index b6a4fef6f68..85e91c9825c 100644 --- a/internal/service/rum/tags_gen.go +++ b/internal/service/rum/tags_gen.go @@ -5,9 +5,8 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/cloudwatchrum" - "github.com/aws/aws-sdk-go/service/cloudwatchrum/cloudwatchrumiface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/rum" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -16,21 +15,21 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// map[string]*string handling +// map[string]string handling // Tags returns rum service tags. -func Tags(tags tftags.KeyValueTags) map[string]*string { - return aws.StringMap(tags.Map()) +func Tags(tags tftags.KeyValueTags) map[string]string { + return tags.Map() } // KeyValueTags creates tftags.KeyValueTags from rum service tags. -func KeyValueTags(ctx context.Context, tags map[string]*string) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags map[string]string) tftags.KeyValueTags { return tftags.New(ctx, tags) } // getTagsIn returns rum service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) map[string]*string { +func getTagsIn(ctx context.Context) map[string]string { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -41,7 +40,7 @@ func getTagsIn(ctx context.Context) map[string]*string { } // setTagsOut sets rum service tags in Context. -func setTagsOut(ctx context.Context, tags map[string]*string) { +func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -50,7 +49,7 @@ func setTagsOut(ctx context.Context, tags map[string]*string) { // updateTags updates rum service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn cloudwatchrumiface.CloudWatchRUMAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *rum.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*rum.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -59,12 +58,12 @@ func updateTags(ctx context.Context, conn cloudwatchrumiface.CloudWatchRUMAPI, i removedTags := oldTags.Removed(newTags) removedTags = removedTags.IgnoreSystem(names.RUM) if len(removedTags) > 0 { - input := &cloudwatchrum.UntagResourceInput{ + input := &rum.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -74,12 +73,12 @@ func updateTags(ctx context.Context, conn cloudwatchrumiface.CloudWatchRUMAPI, i updatedTags := oldTags.Updated(newTags) updatedTags = updatedTags.IgnoreSystem(names.RUM) if len(updatedTags) > 0 { - input := &cloudwatchrum.TagResourceInput{ + input := &rum.TagResourceInput{ ResourceArn: aws.String(identifier), Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -92,5 +91,5 @@ func updateTags(ctx context.Context, conn cloudwatchrumiface.CloudWatchRUMAPI, i // UpdateTags updates rum service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).RUMConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).RUMClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/s3/service_endpoint_resolver_gen.go b/internal/service/s3/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..85140be3a69 --- /dev/null +++ b/internal/service/s3/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package s3 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ s3_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver s3_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: s3_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params s3_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up s3 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*s3_sdkv2.Options) { + return func(o *s3_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/s3/service_endpoints_gen_test.go b/internal/service/s3/service_endpoints_gen_test.go index b4deec6125a..f86ff5a6435 100644 --- a/internal/service/s3/service_endpoints_gen_test.go +++ b/internal/service/s3/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -93,7 +95,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -398,7 +400,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -419,24 +421,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := s3_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), s3_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := s3_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), s3_sdkv2.EndpointParameters{ @@ -444,14 +446,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -540,16 +542,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/s3/service_package.go b/internal/service/s3/service_package.go index b6a546256b7..0c9b6d78f72 100644 --- a/internal/service/s3/service_package.go +++ b/internal/service/s3/service_package.go @@ -19,29 +19,27 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*s3.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return s3.NewFromConfig(cfg, func(o *s3.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled + return s3.NewFromConfig(cfg, + s3.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *s3.Options) { + if o.Region == names.USEast1RegionID && config["s3_us_east_1_regional_endpoint"].(string) != "regional" { + // Maintain the AWS SDK for Go v1 default of using the global endpoint in us-east-1. + // See https://github.com/hashicorp/terraform-provider-aws/issues/33028. + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.GlobalRegionID, + }) + o.Region = names.GlobalRegionID } - } else if o.Region == names.USEast1RegionID && config["s3_us_east_1_regional_endpoint"].(string) != "regional" { - // Maintain the AWS SDK for Go v1 default of using the global endpoint in us-east-1. - // See https://github.com/hashicorp/terraform-provider-aws/issues/33028. - o.Region = names.GlobalRegionID - } - o.UsePathStyle = config["s3_use_path_style"].(bool) + o.UsePathStyle = config["s3_use_path_style"].(bool) - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if tfawserr.ErrMessageContains(err, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if tfawserr.ErrMessageContains(err, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/s3/service_package_gen.go b/internal/service/s3/service_package_gen.go index 4a8a6584983..ce97c095153 100644 --- a/internal/service/s3/service_package_gen.go +++ b/internal/service/s3/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package s3 diff --git a/internal/service/s3/sweep.go b/internal/service/s3/sweep.go index a7ab6064d1d..a4b191db20b 100644 --- a/internal/service/s3/sweep.go +++ b/internal/service/s3/sweep.go @@ -29,6 +29,9 @@ func RegisterSweepers() { resource.AddTestSweepers("aws_s3_object", &resource.Sweeper{ Name: "aws_s3_object", F: sweepObjects, + Dependencies: []string{ + "aws_m2_application", + }, }) resource.AddTestSweepers("aws_s3_bucket", &resource.Sweeper{ diff --git a/internal/service/s3control/service_endpoint_resolver_gen.go b/internal/service/s3control/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..2446353f251 --- /dev/null +++ b/internal/service/s3control/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package s3control + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + s3control_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3control" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ s3control_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver s3control_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: s3control_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params s3control_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up s3control endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*s3control_sdkv2.Options) { + return func(o *s3control_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/s3control/service_package_gen.go b/internal/service/s3control/service_package_gen.go index 54976b944a0..f0eb92a7942 100644 --- a/internal/service/s3control/service_package_gen.go +++ b/internal/service/s3control/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package s3control @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" s3control_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3control" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -119,19 +118,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*s3control_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return s3control_sdkv2.NewFromConfig(cfg, func(o *s3control_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return s3control_sdkv2.NewFromConfig(cfg, + s3control_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/s3outposts/service_endpoint_resolver_gen.go b/internal/service/s3outposts/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..a000d80130b --- /dev/null +++ b/internal/service/s3outposts/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package s3outposts + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/s3outposts/service_endpoints_gen_test.go b/internal/service/s3outposts/service_endpoints_gen_test.go index ae3e984c6da..081d3144981 100644 --- a/internal/service/s3outposts/service_endpoints_gen_test.go +++ b/internal/service/s3outposts/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(s3outposts_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(s3outposts_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/s3outposts/service_package_gen.go b/internal/service/s3outposts/service_package_gen.go index 0071c7a290e..50b41b2ebde 100644 --- a/internal/service/s3outposts/service_package_gen.go +++ b/internal/service/s3outposts/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package s3outposts @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" s3outposts_sdkv1 "github.com/aws/aws-sdk-go/service/s3outposts" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -53,11 +52,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*s "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return s3outposts_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/sagemaker/domain.go b/internal/service/sagemaker/domain.go index ba7cbbb8753..aa6b85e64de 100644 --- a/internal/service/sagemaker/domain.go +++ b/internal/service/sagemaker/domain.go @@ -242,6 +242,20 @@ func ResourceDomain() *schema.Resource { }, }, }, + "generative_ai_settings": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "amazon_bedrock_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, "identity_provider_oauth_settings": { Type: schema.TypeList, Optional: true, @@ -389,6 +403,27 @@ func ResourceDomain() *schema.Resource { ValidateFunc: verify.ValidARN, }, }, + "custom_image": { + Type: schema.TypeList, + Optional: true, + MaxItems: 200, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "app_image_config_name": { + Type: schema.TypeString, + Required: true, + }, + "image_name": { + Type: schema.TypeString, + Required: true, + }, + "image_version_number": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, }, }, }, @@ -1373,6 +1408,10 @@ func expandDomainCodeEditorAppSettings(l []interface{}) *sagemaker.CodeEditorApp config := &sagemaker.CodeEditorAppSettings{} + if v, ok := m["custom_image"].([]interface{}); ok && len(v) > 0 { + config.CustomImages = expandDomainCustomImages(v) + } + if v, ok := m["default_resource_spec"].([]interface{}); ok && len(v) > 0 { config.DefaultResourceSpec = expandResourceSpec(v) } @@ -1598,6 +1637,9 @@ func expandCanvasAppSettings(l []interface{}) *sagemaker.CanvasAppSettings { if v, ok := m["direct_deploy_settings"].([]interface{}); ok { config.DirectDeploySettings = expandDirectDeploySettings(v) } + if v, ok := m["generative_ai_settings"].([]interface{}); ok { + config.GenerativeAiSettings = expandGenerativeAiSettings(v) + } if v, ok := m["identity_provider_oauth_settings"].([]interface{}); ok { config.IdentityProviderOAuthSettings = expandIdentityProviderOAuthSettings(v) } @@ -1649,6 +1691,22 @@ func expandDirectDeploySettings(l []interface{}) *sagemaker.DirectDeploySettings return config } +func expandGenerativeAiSettings(l []interface{}) *sagemaker.GenerativeAiSettings { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.GenerativeAiSettings{} + + if v, ok := m["amazon_bedrock_role_arn"].(string); ok && v != "" { + config.AmazonBedrockRoleArn = aws.String(v) + } + + return config +} + func expandIdentityProviderOAuthSettings(l []interface{}) []*sagemaker.IdentityProviderOAuthSetting { providers := make([]*sagemaker.IdentityProviderOAuthSetting, 0, len(l)) @@ -1949,6 +2007,10 @@ func flattenDomainCodeEditorAppSettings(config *sagemaker.CodeEditorAppSettings) m := map[string]interface{}{} + if config.CustomImages != nil { + m["custom_image"] = flattenDomainCustomImages(config.CustomImages) + } + if config.DefaultResourceSpec != nil { m["default_resource_spec"] = flattenResourceSpec(config.DefaultResourceSpec) } @@ -2075,6 +2137,7 @@ func flattenCanvasAppSettings(config *sagemaker.CanvasAppSettings) []map[string] m := map[string]interface{}{ "direct_deploy_settings": flattenDirectDeploySettings(config.DirectDeploySettings), + "generative_ai_settings": flattenGenerativeAiSettings(config.GenerativeAiSettings), "identity_provider_oauth_settings": flattenIdentityProviderOAuthSettings(config.IdentityProviderOAuthSettings), "kendra_settings": flattenKendraSettings(config.KendraSettings), "time_series_forecasting_settings": flattenTimeSeriesForecastingSettings(config.TimeSeriesForecastingSettings), @@ -2097,6 +2160,18 @@ func flattenDirectDeploySettings(config *sagemaker.DirectDeploySettings) []map[s return []map[string]interface{}{m} } +func flattenGenerativeAiSettings(config *sagemaker.GenerativeAiSettings) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "amazon_bedrock_role_arn": aws.StringValue(config.AmazonBedrockRoleArn), + } + + return []map[string]interface{}{m} +} + func flattenKendraSettings(config *sagemaker.KendraSettings) []map[string]interface{} { if config == nil { return []map[string]interface{}{} diff --git a/internal/service/sagemaker/domain_test.go b/internal/service/sagemaker/domain_test.go index 05ac4eafef5..2dfe9b1041b 100644 --- a/internal/service/sagemaker/domain_test.go +++ b/internal/service/sagemaker/domain_test.go @@ -312,6 +312,38 @@ func testAccDomain_modelRegisterSettings(t *testing.T) { }) } +func testAccDomain_generativeAiSettings(t *testing.T) { + ctx := acctest.Context(t) + var domain sagemaker.DescribeDomainOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_domain.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDomainDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDomainConfig_generativeAiSettings(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.canvas_app_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.canvas_app_settings.0.generative_ai_settings.#", acctest.Ct1), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.canvas_app_settings.0.generative_ai_settings.0.amazon_bedrock_role_arn", "aws_iam_role.test", names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"retention_policy"}, + }, + }, + }) +} + func testAccDomain_kendraSettings(t *testing.T) { ctx := acctest.Context(t) var domain sagemaker.DescribeDomainOutput @@ -654,6 +686,85 @@ func testAccDomain_codeEditorAppSettings(t *testing.T) { }) } +func testAccDomain_codeEditorAppSettings_customImage(t *testing.T) { + ctx := acctest.Context(t) + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BASE_IMAGE is not set") + } + + var domain sagemaker.DescribeDomainOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_domain.test" + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDomainDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDomainConfig_codeEditorAppSettingsCustomImage(rName, baseImage), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.code_editor_app_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.code_editor_app_settings.0.default_resource_spec.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.code_editor_app_settings.0.custom_image.#", acctest.Ct1), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.code_editor_app_settings.0.custom_image.0.app_image_config_name", "aws_sagemaker_app_image_config.test", "app_image_config_name"), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.code_editor_app_settings.0.custom_image.0.image_name", "aws_sagemaker_image.test", "image_name"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"retention_policy"}, + }, + }, + }) +} + +func testAccDomain_codeEditorAppSettings_defaultResourceSpecAndCustomImage(t *testing.T) { + ctx := acctest.Context(t) + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BASE_IMAGE is not set") + } + + var domain sagemaker.DescribeDomainOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_domain.test" + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckDomainDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccDomainConfig_codeEditorAppSettingsDefaultResourceSpecAndCustomImage(rName, baseImage), + Check: resource.ComposeTestCheckFunc( + testAccCheckDomainExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.code_editor_app_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.code_editor_app_settings.0.default_resource_spec.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "default_user_settings.0.code_editor_app_settings.0.custom_image.#", acctest.Ct1), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.code_editor_app_settings.0.default_resource_spec.0.sagemaker_image_version_arn", "aws_sagemaker_image_version.test", names.AttrARN), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.code_editor_app_settings.0.custom_image.0.app_image_config_name", "aws_sagemaker_app_image_config.test", "app_image_config_name"), + resource.TestCheckResourceAttrPair(resourceName, "default_user_settings.0.code_editor_app_settings.0.custom_image.0.image_name", "aws_sagemaker_image.test", "image_name"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"retention_policy"}, + }, + }, + }) +} + func testAccDomain_jupyterLabAppSettings(t *testing.T) { ctx := acctest.Context(t) var domain sagemaker.DescribeDomainOutput @@ -1464,6 +1575,31 @@ resource "aws_sagemaker_domain" "test" { `, rName)) } +func testAccDomainConfig_generativeAiSettings(rName string) string { + return acctest.ConfigCompose(testAccDomainConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = aws_subnet.test[*].id + + default_user_settings { + execution_role = aws_iam_role.test.arn + + canvas_app_settings { + generative_ai_settings { + amazon_bedrock_role_arn = aws_iam_role.test.arn + } + } + } + + retention_policy { + home_efs_file_system = "Delete" + } +} +`, rName)) +} + func testAccDomainConfig_kendraSettings(rName string) string { return acctest.ConfigCompose(testAccDomainConfig_base(rName), fmt.Sprintf(` resource "aws_sagemaker_domain" "test" { @@ -1765,6 +1901,95 @@ resource "aws_sagemaker_domain" "test" { `, rName)) } +func testAccDomainConfig_codeEditorAppSettingsCustomImage(rName, baseImage string) string { + return acctest.ConfigCompose(testAccDomainConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn + + depends_on = [aws_iam_role_policy_attachment.test] +} + +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q +} + +resource "aws_sagemaker_image_version" "test" { + image_name = aws_sagemaker_image.test.id + base_image = %[2]q +} + +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = aws_subnet.test[*].id + + default_user_settings { + execution_role = aws_iam_role.test.arn + + code_editor_app_settings { + custom_image { + app_image_config_name = aws_sagemaker_app_image_config.test.app_image_config_name + image_name = aws_sagemaker_image_version.test.image_name + } + } + } + + retention_policy { + home_efs_file_system = "Delete" + } +} +`, rName, baseImage)) +} + +func testAccDomainConfig_codeEditorAppSettingsDefaultResourceSpecAndCustomImage(rName, baseImage string) string { + return acctest.ConfigCompose(testAccDomainConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_iam_role.test.arn + + depends_on = [aws_iam_role_policy_attachment.test] +} + +resource "aws_sagemaker_app_image_config" "test" { + app_image_config_name = %[1]q +} + +resource "aws_sagemaker_image_version" "test" { + image_name = aws_sagemaker_image.test.id + base_image = %[2]q +} + +resource "aws_sagemaker_domain" "test" { + domain_name = %[1]q + auth_mode = "IAM" + vpc_id = aws_vpc.test.id + subnet_ids = aws_subnet.test[*].id + + default_user_settings { + execution_role = aws_iam_role.test.arn + + code_editor_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + sagemaker_image_version_arn = aws_sagemaker_image_version.test.arn + } + + custom_image { + app_image_config_name = aws_sagemaker_app_image_config.test.app_image_config_name + image_name = aws_sagemaker_image_version.test.image_name + } + } + } + + retention_policy { + home_efs_file_system = "Delete" + } +} +`, rName, baseImage)) +} + func testAccDomainConfig_kernelGatewayAppSettings(rName string) string { return acctest.ConfigCompose(testAccDomainConfig_base(rName), fmt.Sprintf(` resource "aws_sagemaker_domain" "test" { diff --git a/internal/service/sagemaker/endpoint_configuration.go b/internal/service/sagemaker/endpoint_configuration.go index a040c56c9a0..5c226f55959 100644 --- a/internal/service/sagemaker/endpoint_configuration.go +++ b/internal/service/sagemaker/endpoint_configuration.go @@ -295,6 +295,12 @@ func ResourceEndpointConfiguration() *schema.Resource { Optional: true, ForceNew: true, }, + "inference_ami_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(sagemaker.ProductionVariantInferenceAmiVersion_Values(), false), + }, "initial_instance_count": { Type: schema.TypeInt, Optional: true, @@ -433,6 +439,12 @@ func ResourceEndpointConfiguration() *schema.Resource { Optional: true, ForceNew: true, }, + "inference_ami_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(sagemaker.ProductionVariantInferenceAmiVersion_Values(), false), + }, "initial_instance_count": { Type: schema.TypeInt, Optional: true, @@ -697,6 +709,10 @@ func expandProductionVariants(configured []interface{}) []*sagemaker.ProductionV l.EnableSSMAccess = aws.Bool(v) } + if v, ok := data["inference_ami_version"].(string); ok && v != "" { + l.InferenceAmiVersion = aws.String(v) + } + containers = append(containers, l) } @@ -750,6 +766,10 @@ func flattenProductionVariants(list []*sagemaker.ProductionVariant) []map[string l["enable_ssm_access"] = aws.BoolValue(i.EnableSSMAccess) } + if i.InferenceAmiVersion != nil { + l["inference_ami_version"] = aws.StringValue(i.InferenceAmiVersion) + } + result = append(result, l) } return result diff --git a/internal/service/sagemaker/endpoint_configuration_test.go b/internal/service/sagemaker/endpoint_configuration_test.go index 65891f987f6..2257498343a 100644 --- a/internal/service/sagemaker/endpoint_configuration_test.go +++ b/internal/service/sagemaker/endpoint_configuration_test.go @@ -214,6 +214,34 @@ func TestAccSageMakerEndpointConfiguration_ProductionVariants_serverless(t *test }) } +func TestAccSageMakerEndpointConfiguration_ProductionVariants_ami(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_endpoint_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfigurationConfig_ami(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEndpointConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "production_variants.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "production_variants.0.inference_ami_version", "al2-ami-sagemaker-inference-gpu-2"), //lintignore:AWSAT002 + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerEndpointConfiguration_ProductionVariants_serverlessProvisionedConcurrency(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -433,6 +461,39 @@ func TestAccSageMakerEndpointConfiguration_dataCapture(t *testing.T) { }) } +func TestAccSageMakerEndpointConfiguration_dataCapture_inputAndOutput(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_endpoint_configuration.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointConfigurationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfigurationConfig_dataCapture_inputAndOutput(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckEndpointConfigurationExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "data_capture_config.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "data_capture_config.0.enable_capture", acctest.CtTrue), + resource.TestCheckResourceAttr(resourceName, "data_capture_config.0.initial_sampling_percentage", "50"), + resource.TestCheckResourceAttr(resourceName, "data_capture_config.0.destination_s3_uri", fmt.Sprintf("s3://%s/", rName)), + resource.TestCheckResourceAttr(resourceName, "data_capture_config.0.capture_options.0.capture_mode", "InputAndOutput"), + resource.TestCheckResourceAttr(resourceName, "data_capture_config.0.capture_content_type_header.0.json_content_types.#", acctest.Ct1), + resource.TestCheckTypeSetElemAttr(resourceName, "data_capture_config.0.capture_content_type_header.0.json_content_types.*", "application/json"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSageMakerEndpointConfiguration_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1006,6 +1067,41 @@ resource "aws_sagemaker_endpoint_configuration" "test" { `, rName)) } +func testAccEndpointConfigurationConfig_dataCapture_inputAndOutput(rName string) string { + return acctest.ConfigCompose(testAccEndpointConfigurationConfig_base(rName), fmt.Sprintf(` +resource "aws_s3_bucket" "test" { + bucket = %[1]q + force_destroy = true +} + +resource "aws_sagemaker_endpoint_configuration" "test" { + name = %[1]q + + production_variants { + variant_name = "variant-1" + model_name = aws_sagemaker_model.test.name + initial_instance_count = 2 + instance_type = "ml.t2.medium" + initial_variant_weight = 1 + } + + data_capture_config { + enable_capture = true + initial_sampling_percentage = 50 + destination_s3_uri = "s3://${aws_s3_bucket.test.bucket}/" + + capture_options { + capture_mode = "InputAndOutput" + } + + capture_content_type_header { + json_content_types = ["application/json"] + } + } +} +`, rName)) +} + func testAccEndpointConfigurationConfig_asyncKMS(rName string) string { return acctest.ConfigCompose(testAccEndpointConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_s3_bucket" "test" { @@ -1264,6 +1360,24 @@ resource "aws_sagemaker_endpoint_configuration" "test" { `, rName)) } +func testAccEndpointConfigurationConfig_ami(rName string) string { + //lintignore:AWSAT002 + return acctest.ConfigCompose(testAccEndpointConfigurationConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_endpoint_configuration" "test" { + name = %[1]q + + production_variants { + variant_name = "variant-1" + model_name = aws_sagemaker_model.test.name + inference_ami_version = "al2-ami-sagemaker-inference-gpu-2" + instance_type = "ml.t2.medium" + initial_instance_count = 2 + initial_variant_weight = 1 + } +} +`, rName)) +} + func testAccEndpointConfigurationConfig_serverlessProvisionedConcurrency(rName string) string { return acctest.ConfigCompose(testAccEndpointConfigurationConfig_base(rName), fmt.Sprintf(` resource "aws_sagemaker_endpoint_configuration" "test" { diff --git a/internal/service/sagemaker/sagemaker_test.go b/internal/service/sagemaker/sagemaker_test.go index aef052b01f0..b04b117ee75 100644 --- a/internal/service/sagemaker/sagemaker_test.go +++ b/internal/service/sagemaker/sagemaker_test.go @@ -46,28 +46,31 @@ func TestAccSageMaker_serial(t *testing.T) { "kernelGatewayAppSettings": testAccDomain_kernelGatewayAppSettings, "kernelGatewayAppSettings_customImage": testAccDomain_kernelGatewayAppSettings_customImage, "kernelGatewayAppSettings_lifecycleConfig": testAccDomain_kernelGatewayAppSettings_lifecycleConfig, - "kernelGatewayAppSettings_defaultResourceAndCustomImage": testAccDomain_kernelGatewayAppSettings_defaultResourceSpecAndCustomImage, - "jupyterServerAppSettings": testAccDomain_jupyterServerAppSettings, - "codeEditorAppSettings": testAccDomain_codeEditorAppSettings, - "jupyterLabAppSettings": testAccDomain_jupyterLabAppSettings, - "kms": testAccDomain_kms, - "securityGroup": testAccDomain_securityGroup, - "sharingSettings": testAccDomain_sharingSettings, - "defaultUserSettingsUpdated": testAccDomain_defaultUserSettingsUpdated, - "canvas": testAccDomain_canvasAppSettings, - "modelRegisterSettings": testAccDomain_modelRegisterSettings, - "identityProviderOauthSettings": testAccDomain_identityProviderOAuthSettings, - "directDeploySettings": testAccDomain_directDeploySettings, - "kendraSettings": testAccDomain_kendraSettings, - "workspaceSettings": testAccDomain_workspaceSettings, - "domainSettings": testAccDomain_domainSettings, - "rSessionAppSettings": testAccDomain_rSessionAppSettings, - "rStudioServerProAppSettings": testAccDomain_rStudioServerProAppSettings, - "spaceSettingsKernelGatewayAppSettings": testAccDomain_spaceSettingsKernelGatewayAppSettings, - "code": testAccDomain_jupyterServerAppSettings_code, - "efs": testAccDomain_efs, - "posix": testAccDomain_posix, - "spaceStorageSettings": testAccDomain_spaceStorageSettings, + "kernelGatewayAppSettings_defaultResourceAndCustomImage": testAccDomain_kernelGatewayAppSettings_defaultResourceSpecAndCustomImage, + "jupyterServerAppSettings": testAccDomain_jupyterServerAppSettings, + "codeEditorAppSettings": testAccDomain_codeEditorAppSettings, + "codeEditorAppSettings_customImage": testAccDomain_codeEditorAppSettings_customImage, + "codeEditorAppSettings_defaultResourceSpecAndCustomImage": testAccDomain_codeEditorAppSettings_defaultResourceSpecAndCustomImage, + "jupyterLabAppSettings": testAccDomain_jupyterLabAppSettings, + "kms": testAccDomain_kms, + "securityGroup": testAccDomain_securityGroup, + "sharingSettings": testAccDomain_sharingSettings, + "defaultUserSettingsUpdated": testAccDomain_defaultUserSettingsUpdated, + "canvas": testAccDomain_canvasAppSettings, + "modelRegisterSettings": testAccDomain_modelRegisterSettings, + "generativeAi": testAccDomain_generativeAiSettings, + "identityProviderOauthSettings": testAccDomain_identityProviderOAuthSettings, + "directDeploySettings": testAccDomain_directDeploySettings, + "kendraSettings": testAccDomain_kendraSettings, + "workspaceSettings": testAccDomain_workspaceSettings, + "domainSettings": testAccDomain_domainSettings, + "rSessionAppSettings": testAccDomain_rSessionAppSettings, + "rStudioServerProAppSettings": testAccDomain_rStudioServerProAppSettings, + "spaceSettingsKernelGatewayAppSettings": testAccDomain_spaceSettingsKernelGatewayAppSettings, + "code": testAccDomain_jupyterServerAppSettings_code, + "efs": testAccDomain_efs, + "posix": testAccDomain_posix, + "spaceStorageSettings": testAccDomain_spaceStorageSettings, }, "FlowDefinition": { acctest.CtBasic: testAccFlowDefinition_basic, @@ -98,21 +101,24 @@ func TestAccSageMaker_serial(t *testing.T) { "kernelGatewayAppSettings": testAccUserProfile_kernelGatewayAppSettings, "kernelGatewayAppSettings_lifecycleConfig": testAccUserProfile_kernelGatewayAppSettings_lifecycleconfig, "kernelGatewayAppSettings_imageConfig": testAccUserProfile_kernelGatewayAppSettings_imageconfig, + "codeEditorAppSettings_customImage": testAccUserProfile_codeEditorAppSettings_customImage, "jupyterServerAppSettings": testAccUserProfile_jupyterServerAppSettings, }, "Workforce": { acctest.CtDisappears: testAccWorkforce_disappears, "CognitoConfig": testAccWorkforce_cognitoConfig, "OidcConfig": testAccWorkforce_oidcConfig, + "OidcConfig_full": testAccWorkforce_oidcConfig_full, "SourceIpConfig": testAccWorkforce_sourceIPConfig, "VPC": testAccWorkforce_vpc, }, "Workteam": { - acctest.CtDisappears: testAccWorkteam_disappears, - "tags": testAccWorkteam_tags, - "CognitoConfig": testAccWorkteam_cognitoConfig, - "NotificationConfig": testAccWorkteam_notificationConfig, - "OidcConfig": testAccWorkteam_oidcConfig, + acctest.CtDisappears: testAccWorkteam_disappears, + "tags": testAccWorkteam_tags, + "CognitoConfig": testAccWorkteam_cognitoConfig, + "NotificationConfig": testAccWorkteam_notificationConfig, + "WorkerAccessConfiguration": testAccWorkteam_workerAccessConfiguration, + "OidcConfig": testAccWorkteam_oidcConfig, }, "Servicecatalog": { acctest.CtBasic: testAccServicecatalogPortfolioStatus_basic, diff --git a/internal/service/sagemaker/service_endpoint_resolver_gen.go b/internal/service/sagemaker/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f6fe9d90139 --- /dev/null +++ b/internal/service/sagemaker/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package sagemaker + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/sagemaker/service_endpoints_gen_test.go b/internal/service/sagemaker/service_endpoints_gen_test.go index 225b319dfd1..cf13517d23f 100644 --- a/internal/service/sagemaker/service_endpoints_gen_test.go +++ b/internal/service/sagemaker/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(sagemaker_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(sagemaker_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/sagemaker/service_package_gen.go b/internal/service/sagemaker/service_package_gen.go index a8533b5b26a..fa620ff3602 100644 --- a/internal/service/sagemaker/service_package_gen.go +++ b/internal/service/sagemaker/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package sagemaker @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" sagemaker_sdkv1 "github.com/aws/aws-sdk-go/service/sagemaker" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -253,11 +252,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*s "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return sagemaker_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/sagemaker/user_profile.go b/internal/service/sagemaker/user_profile.go index 5ff0ec8811a..c4a31838ba7 100644 --- a/internal/service/sagemaker/user_profile.go +++ b/internal/service/sagemaker/user_profile.go @@ -92,6 +92,20 @@ func ResourceUserProfile() *schema.Resource { }, }, }, + "generative_ai_settings": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "amazon_bedrock_role_arn": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidARN, + }, + }, + }, + }, "identity_provider_oauth_settings": { Type: schema.TypeList, Optional: true, @@ -239,6 +253,27 @@ func ResourceUserProfile() *schema.Resource { ValidateFunc: verify.ValidARN, }, }, + "custom_image": { + Type: schema.TypeList, + Optional: true, + MaxItems: 200, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "app_image_config_name": { + Type: schema.TypeString, + Required: true, + }, + "image_name": { + Type: schema.TypeString, + Required: true, + }, + "image_version_number": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, }, }, }, diff --git a/internal/service/sagemaker/user_profile_test.go b/internal/service/sagemaker/user_profile_test.go index 940a133a2e8..2ece32de952 100644 --- a/internal/service/sagemaker/user_profile_test.go +++ b/internal/service/sagemaker/user_profile_test.go @@ -277,6 +277,44 @@ func testAccUserProfile_kernelGatewayAppSettings_imageconfig(t *testing.T) { }) } +func testAccUserProfile_codeEditorAppSettings_customImage(t *testing.T) { + ctx := acctest.Context(t) + if os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") == "" { + t.Skip("Environment variable SAGEMAKER_IMAGE_VERSION_BASE_IMAGE is not set") + } + + var domain sagemaker.DescribeUserProfileOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_user_profile.test" + baseImage := os.Getenv("SAGEMAKER_IMAGE_VERSION_BASE_IMAGE") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckUserProfileDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccUserProfileConfig_codeEditorAppSettingsImage(rName, baseImage), + Check: resource.ComposeTestCheckFunc( + testAccCheckUserProfileExists(ctx, resourceName, &domain), + resource.TestCheckResourceAttr(resourceName, "user_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.code_editor_app_settings.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.code_editor_app_settings.0.lifecycle_config_arns.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.code_editor_app_settings.0.default_resource_spec.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "user_settings.0.code_editor_app_settings.0.default_resource_spec.0.instance_type", "ml.t3.micro"), + resource.TestCheckResourceAttrPair(resourceName, "user_settings.0.code_editor_app_settings.0.default_resource_spec.0.sagemaker_image_version_arn", "aws_sagemaker_image_version.test", names.AttrARN), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccUserProfile_jupyterServerAppSettings(t *testing.T) { ctx := acctest.Context(t) var domain sagemaker.DescribeUserProfileOutput @@ -615,3 +653,46 @@ resource "aws_sagemaker_user_profile" "test" { } `, rName, baseImage)) } + +func testAccUserProfileConfig_codeEditorAppSettingsImage(rName, baseImage string) string { + return acctest.ConfigCompose(testAccUserProfileConfig_base(rName), fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_iam_role_policy_attachment" "test" { + role = aws_iam_role.test.name + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonSageMakerFullAccess" +} + +resource "aws_sagemaker_image" "test" { + image_name = %[1]q + role_arn = aws_image_role.test.arn + + depends_on = [aws_iam_role_policy_attachment.test] +} + +resource "aws_sagemaker_image_version" "test" { + image_name = aws_sagemaker_image.test.id + base_image = %[2]q + + depends_on = [aws_iam_role_policy_attachment.test] +} + +resource "aws_sagemaker_user_profile" "test" { + domain_id = aws_sagemaker_domain.test.id + user_profile_name = %[1]q + + user_settings { + execution_role = aws_iam_role.test.arn + + code_editor_app_settings { + default_resource_spec { + instance_type = "ml.t3.micro" + sagemaker_image_version_arn = aws_sagemaker_image_version.test.arn + } + } + } + + depends_on = [aws_iam_role_policy_attachment.test] +} +`, rName, baseImage)) +} diff --git a/internal/service/sagemaker/workforce.go b/internal/service/sagemaker/workforce.go index bac80fdc80c..95349f64660 100644 --- a/internal/service/sagemaker/workforce.go +++ b/internal/service/sagemaker/workforce.go @@ -63,6 +63,11 @@ func ResourceWorkforce() *schema.Resource { ExactlyOneOf: []string{"oidc_config", "cognito_config"}, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + "authentication_request_extra_params": { + Type: schema.TypeMap, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, "authorization_endpoint": { Type: schema.TypeString, Required: true, @@ -103,6 +108,10 @@ func ResourceWorkforce() *schema.Resource { validation.StringLenBetween(1, 500), validation.IsURLWithHTTPS, )}, + names.AttrScope: { + Type: schema.TypeString, + Optional: true, + }, "token_endpoint": { Type: schema.TypeString, Required: true, @@ -396,6 +405,14 @@ func expandWorkforceOIDCConfig(l []interface{}) *sagemaker.OidcConfig { UserInfoEndpoint: aws.String(m["user_info_endpoint"].(string)), } + if v, ok := m["authentication_request_extra_params"].(map[string]interface{}); ok && v != nil { + config.AuthenticationRequestExtraParams = flex.ExpandStringMap(v) + } + + if v, ok := m[names.AttrScope].(string); ok && v != "" { + config.Scope = aws.String(v) + } + return config } @@ -405,14 +422,16 @@ func flattenWorkforceOIDCConfig(config *sagemaker.OidcConfigForResponse, clientS } m := map[string]interface{}{ - "authorization_endpoint": aws.StringValue(config.AuthorizationEndpoint), - names.AttrClientID: aws.StringValue(config.ClientId), - names.AttrClientSecret: clientSecret, - names.AttrIssuer: aws.StringValue(config.Issuer), - "jwks_uri": aws.StringValue(config.JwksUri), - "logout_endpoint": aws.StringValue(config.LogoutEndpoint), - "token_endpoint": aws.StringValue(config.TokenEndpoint), - "user_info_endpoint": aws.StringValue(config.UserInfoEndpoint), + "authentication_request_extra_params": aws.StringValueMap(config.AuthenticationRequestExtraParams), + "authorization_endpoint": aws.StringValue(config.AuthorizationEndpoint), + names.AttrClientID: aws.StringValue(config.ClientId), + names.AttrClientSecret: clientSecret, + names.AttrIssuer: aws.StringValue(config.Issuer), + "jwks_uri": aws.StringValue(config.JwksUri), + "logout_endpoint": aws.StringValue(config.LogoutEndpoint), + names.AttrScope: aws.StringValue(config.Scope), + "token_endpoint": aws.StringValue(config.TokenEndpoint), + "user_info_endpoint": aws.StringValue(config.UserInfoEndpoint), } return []map[string]interface{}{m} diff --git a/internal/service/sagemaker/workforce_test.go b/internal/service/sagemaker/workforce_test.go index 62a6483a9b2..5cc20046d36 100644 --- a/internal/service/sagemaker/workforce_test.go +++ b/internal/service/sagemaker/workforce_test.go @@ -123,6 +123,78 @@ func testAccWorkforce_oidcConfig(t *testing.T) { }) } +func testAccWorkforce_oidcConfig_full(t *testing.T) { + ctx := acctest.Context(t) + var workforce sagemaker.Workforce + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_workforce.test" + endpoint1 := "https://example.com" + endpoint2 := "https://test.example.com" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWorkforceDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWorkforceConfig_oidc_full(rName, endpoint1), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkforceExists(ctx, resourceName, &workforce), + resource.TestCheckResourceAttr(resourceName, "workforce_name", rName), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "sagemaker", regexache.MustCompile(`workforce/.+`)), + resource.TestCheckResourceAttr(resourceName, "cognito_config.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "oidc_config.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.authorization_endpoint", endpoint1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.client_id", rName), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.client_secret", rName), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.issuer", endpoint1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.jwks_uri", endpoint1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.logout_endpoint", endpoint1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.token_endpoint", endpoint1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.user_info_endpoint", endpoint1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.scope", endpoint1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.authentication_request_extra_params.%", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.authentication_request_extra_params.test", endpoint1), + resource.TestCheckResourceAttr(resourceName, "source_ip_config.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "source_ip_config.0.cidrs.#", acctest.Ct0), + resource.TestCheckResourceAttrSet(resourceName, "subdomain"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"oidc_config.0.client_secret"}, + }, + { + Config: testAccWorkforceConfig_oidc_full(rName, endpoint2), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkforceExists(ctx, resourceName, &workforce), + resource.TestCheckResourceAttr(resourceName, "workforce_name", rName), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "sagemaker", regexache.MustCompile(`workforce/.+`)), + resource.TestCheckResourceAttr(resourceName, "cognito_config.#", acctest.Ct0), + resource.TestCheckResourceAttr(resourceName, "oidc_config.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.authorization_endpoint", endpoint2), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.client_id", rName), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.client_secret", rName), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.issuer", endpoint2), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.jwks_uri", endpoint2), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.logout_endpoint", endpoint2), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.token_endpoint", endpoint2), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.user_info_endpoint", endpoint2), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.scope", endpoint2), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.authentication_request_extra_params.%", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "oidc_config.0.authentication_request_extra_params.test", endpoint2), + resource.TestCheckResourceAttr(resourceName, "source_ip_config.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "source_ip_config.0.cidrs.#", acctest.Ct0), + resource.TestCheckResourceAttrSet(resourceName, "subdomain"), + ), + }, + }, + }) +} + func testAccWorkforce_sourceIPConfig(t *testing.T) { ctx := acctest.Context(t) var workforce sagemaker.Workforce @@ -368,6 +440,30 @@ resource "aws_sagemaker_workforce" "test" { `, rName, endpoint)) } +func testAccWorkforceConfig_oidc_full(rName, endpoint string) string { + return acctest.ConfigCompose(testAccWorkforceConfig_base(rName), fmt.Sprintf(` +resource "aws_sagemaker_workforce" "test" { + workforce_name = %[1]q + + oidc_config { + authorization_endpoint = %[2]q + client_id = %[1]q + client_secret = %[1]q + issuer = %[2]q + jwks_uri = %[2]q + logout_endpoint = %[2]q + token_endpoint = %[2]q + user_info_endpoint = %[2]q + scope = %[2]q + + authentication_request_extra_params = { + test = %[2]q + } + } +} +`, rName, endpoint)) +} + func testAccWorkforceConfig_vpcBase(rName string) string { return acctest.ConfigCompose(testAccWorkforceConfig_base(rName), acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` resource "aws_vpc" "test" { diff --git a/internal/service/sagemaker/workteam.go b/internal/service/sagemaker/workteam.go index 0cc33951e8b..e34d4f11361 100644 --- a/internal/service/sagemaker/workteam.go +++ b/internal/service/sagemaker/workteam.go @@ -111,6 +111,50 @@ func ResourceWorkteam() *schema.Resource { }, DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, }, + "worker_access_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "s3_presign": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iam_policy_constraints": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(sagemaker.EnabledOrDisabled_Values(), false), + ExactlyOneOf: []string{"worker_access_configuration.0.s3_presign.0.iam_policy_constraints.0.source_ip", "worker_access_configuration.0.s3_presign.0.iam_policy_constraints.0.vpc_source_ip"}, + }, + "vpc_source_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(sagemaker.EnabledOrDisabled_Values(), false), + ExactlyOneOf: []string{"worker_access_configuration.0.s3_presign.0.iam_policy_constraints.0.source_ip", "worker_access_configuration.0.s3_presign.0.iam_policy_constraints.0.vpc_source_ip"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "subdomain": { Type: schema.TypeString, Computed: true, @@ -154,6 +198,10 @@ func resourceWorkteamCreate(ctx context.Context, d *schema.ResourceData, meta in input.NotificationConfiguration = expandWorkteamNotificationConfiguration(v.([]interface{})) } + if v, ok := d.GetOk("worker_access_configuration"); ok { + input.WorkerAccessConfiguration = expandWorkerAccessConfiguration(v.([]interface{})) + } + log.Printf("[DEBUG] Updating SageMaker Workteam: %s", input) _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 2*time.Minute, func() (interface{}, error) { return conn.CreateWorkteamWithContext(ctx, input) @@ -198,6 +246,10 @@ func resourceWorkteamRead(ctx context.Context, d *schema.ResourceData, meta inte return sdkdiag.AppendErrorf(diags, "setting notification_configuration: %s", err) } + if err := d.Set("worker_access_configuration", flattenWorkerAccessConfiguration(workteam.WorkerAccessConfiguration)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting worker_access_configuration: %s", err) + } + return diags } @@ -219,6 +271,10 @@ func resourceWorkteamUpdate(ctx context.Context, d *schema.ResourceData, meta in input.NotificationConfiguration = expandWorkteamNotificationConfiguration(d.Get("notification_configuration").([]interface{})) } + if d.HasChange("worker_access_configuration") { + input.WorkerAccessConfiguration = expandWorkerAccessConfiguration(d.Get("worker_access_configuration").([]interface{})) + } + log.Printf("[DEBUG] Updating SageMaker Workteam: %s", input) _, err := conn.UpdateWorkteamWithContext(ctx, input) @@ -380,3 +436,96 @@ func flattenWorkteamNotificationConfiguration(config *sagemaker.NotificationConf return []map[string]interface{}{m} } + +func expandWorkerAccessConfiguration(l []interface{}) *sagemaker.WorkerAccessConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.WorkerAccessConfiguration{} + + if v, ok := m["s3_presign"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + config.S3Presign = expandS3Presign(v) + } else { + return nil + } + + return config +} + +func flattenWorkerAccessConfiguration(config *sagemaker.WorkerAccessConfiguration) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "s3_presign": flattenS3Presign(config.S3Presign), + } + + return []map[string]interface{}{m} +} + +func expandS3Presign(l []interface{}) *sagemaker.S3Presign { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.S3Presign{} + + if v, ok := m["iam_policy_constraints"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + config.IamPolicyConstraints = expandIAMPolicyConstraints(v) + } else { + return nil + } + + return config +} + +func flattenS3Presign(config *sagemaker.S3Presign) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "iam_policy_constraints": flattenIAMPolicyConstraints(config.IamPolicyConstraints), + } + + return []map[string]interface{}{m} +} + +func expandIAMPolicyConstraints(l []interface{}) *sagemaker.IamPolicyConstraints { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + config := &sagemaker.IamPolicyConstraints{} + + if v, ok := m["source_ip"].(string); ok && v != "" { + config.SourceIp = aws.String(v) + } + + if v, ok := m["vpc_source_ip"].(string); ok && v != "" { + config.VpcSourceIp = aws.String(v) + } + + return config +} + +func flattenIAMPolicyConstraints(config *sagemaker.IamPolicyConstraints) []map[string]interface{} { + if config == nil { + return []map[string]interface{}{} + } + + m := map[string]interface{}{ + "source_ip": aws.StringValue(config.SourceIp), + "vpc_source_ip": aws.StringValue(config.VpcSourceIp), + } + + return []map[string]interface{}{m} +} diff --git a/internal/service/sagemaker/workteam_test.go b/internal/service/sagemaker/workteam_test.go index 4aa509745b3..d1b41b6b541 100644 --- a/internal/service/sagemaker/workteam_test.go +++ b/internal/service/sagemaker/workteam_test.go @@ -251,6 +251,54 @@ func testAccWorkteam_notificationConfig(t *testing.T) { }) } +func testAccWorkteam_workerAccessConfiguration(t *testing.T) { + ctx := acctest.Context(t) + var workteam sagemaker.Workteam + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_sagemaker_workteam.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SageMakerServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckWorkteamDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccWorkteamConfig_workerAccessConfiguration(rName, "Enabled"), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkteamExists(ctx, resourceName, &workteam), + resource.TestCheckResourceAttr(resourceName, "workteam_name", rName), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "sagemaker", regexache.MustCompile(`workteam/.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, rName), + resource.TestCheckResourceAttr(resourceName, "worker_access_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "worker_access_configuration.0.s3_presign.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "worker_access_configuration.0.s3_presign.0.iam_policy_constraints.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "worker_access_configuration.0.s3_presign.0.iam_policy_constraints.0.source_ip", "Enabled"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"workforce_name"}, + }, + { + Config: testAccWorkteamConfig_workerAccessConfiguration(rName, "Disabled"), + Check: resource.ComposeTestCheckFunc( + testAccCheckWorkteamExists(ctx, resourceName, &workteam), + resource.TestCheckResourceAttr(resourceName, "workteam_name", rName), + acctest.MatchResourceAttrRegionalARN(resourceName, names.AttrARN, "sagemaker", regexache.MustCompile(`workteam/.+`)), + resource.TestCheckResourceAttr(resourceName, names.AttrDescription, rName), + resource.TestCheckResourceAttr(resourceName, "worker_access_configuration.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "worker_access_configuration.0.s3_presign.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "worker_access_configuration.0.s3_presign.0.iam_policy_constraints.#", acctest.Ct1), + resource.TestCheckResourceAttr(resourceName, "worker_access_configuration.0.s3_presign.0.iam_policy_constraints.0.source_ip", "Disabled"), + ), + }, + }, + }) +} + func testAccWorkteam_disappears(t *testing.T) { ctx := acctest.Context(t) var workteam sagemaker.Workteam @@ -505,6 +553,30 @@ resource "aws_sagemaker_workteam" "test" { `, rName)) } +func testAccWorkteamConfig_workerAccessConfiguration(rName, status string) string { + return acctest.ConfigCompose(testAccWorkteamOIDCBaseConfig(rName), fmt.Sprintf(` +resource "aws_sagemaker_workteam" "test" { + workteam_name = %[1]q + workforce_name = aws_sagemaker_workforce.test.id + description = %[1]q + + member_definition { + oidc_member_definition { + groups = [%[1]q] + } + } + + worker_access_configuration { + s3_presign { + iam_policy_constraints { + source_ip = %[2]q + } + } + } +} +`, rName, status)) +} + func testAccWorkteamConfig_tags1(rName, tagKey1, tagValue1 string) string { return acctest.ConfigCompose(testAccWorkteamOIDCBaseConfig(rName), fmt.Sprintf(` resource "aws_sagemaker_workteam" "test" { diff --git a/internal/service/scheduler/service_endpoint_resolver_gen.go b/internal/service/scheduler/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..44b5b63e288 --- /dev/null +++ b/internal/service/scheduler/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package scheduler + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + scheduler_sdkv2 "github.com/aws/aws-sdk-go-v2/service/scheduler" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ scheduler_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver scheduler_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: scheduler_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params scheduler_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up scheduler endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*scheduler_sdkv2.Options) { + return func(o *scheduler_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/scheduler/service_endpoints_gen_test.go b/internal/service/scheduler/service_endpoints_gen_test.go index 1215f07b26b..a4a51a32daf 100644 --- a/internal/service/scheduler/service_endpoints_gen_test.go +++ b/internal/service/scheduler/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := scheduler_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), scheduler_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := scheduler_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), scheduler_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/scheduler/service_package_gen.go b/internal/service/scheduler/service_package_gen.go index 5b9841bd3b8..ec194e82f3d 100644 --- a/internal/service/scheduler/service_package_gen.go +++ b/internal/service/scheduler/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package scheduler @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" scheduler_sdkv2 "github.com/aws/aws-sdk-go-v2/service/scheduler" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -52,19 +51,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*scheduler_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return scheduler_sdkv2.NewFromConfig(cfg, func(o *scheduler_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return scheduler_sdkv2.NewFromConfig(cfg, + scheduler_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/schemas/service_endpoint_resolver_gen.go b/internal/service/schemas/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..7cc298f1c7d --- /dev/null +++ b/internal/service/schemas/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package schemas + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + schemas_sdkv2 "github.com/aws/aws-sdk-go-v2/service/schemas" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ schemas_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver schemas_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: schemas_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params schemas_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up schemas endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*schemas_sdkv2.Options) { + return func(o *schemas_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/schemas/service_endpoints_gen_test.go b/internal/service/schemas/service_endpoints_gen_test.go index 91dd282436a..81388037caa 100644 --- a/internal/service/schemas/service_endpoints_gen_test.go +++ b/internal/service/schemas/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := schemas_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), schemas_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := schemas_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), schemas_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/schemas/service_package.go b/internal/service/schemas/service_package.go index 835a6b8c35f..7b949c08f55 100644 --- a/internal/service/schemas/service_package.go +++ b/internal/service/schemas/service_package.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/schemas" awstypes "github.com/aws/aws-sdk-go-v2/service/schemas/types" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/names" @@ -20,24 +19,16 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*schemas.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return schemas.NewFromConfig(cfg, func(o *schemas.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled - } - } - - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { - if errs.IsAErrorMessageContains[*awstypes.TooManyRequestsException](err, "Too Many Requests") { - return aws.TrueTernary - } - return aws.UnknownTernary // Delegate to configured Retryer. - })) - }), nil + return schemas.NewFromConfig(cfg, + schemas.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *schemas.Options) { + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if errs.IsAErrorMessageContains[*awstypes.TooManyRequestsException](err, "Too Many Requests") { + return aws.TrueTernary + } + return aws.UnknownTernary // Delegate to configured Retryer. + })) + }, + ), nil } diff --git a/internal/service/schemas/service_package_gen.go b/internal/service/schemas/service_package_gen.go index f3c5f3d266e..16633fbe0a4 100644 --- a/internal/service/schemas/service_package_gen.go +++ b/internal/service/schemas/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package schemas diff --git a/internal/service/secretsmanager/service_endpoint_resolver_gen.go b/internal/service/secretsmanager/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..1bbe5694033 --- /dev/null +++ b/internal/service/secretsmanager/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package secretsmanager + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + secretsmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/secretsmanager" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ secretsmanager_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver secretsmanager_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: secretsmanager_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params secretsmanager_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up secretsmanager endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*secretsmanager_sdkv2.Options) { + return func(o *secretsmanager_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/secretsmanager/service_endpoints_gen_test.go b/internal/service/secretsmanager/service_endpoints_gen_test.go index bec95418b18..c16b66c3db2 100644 --- a/internal/service/secretsmanager/service_endpoints_gen_test.go +++ b/internal/service/secretsmanager/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := secretsmanager_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), secretsmanager_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := secretsmanager_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), secretsmanager_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/secretsmanager/service_package_gen.go b/internal/service/secretsmanager/service_package_gen.go index d2bd84724c2..9606e9732fb 100644 --- a/internal/service/secretsmanager/service_package_gen.go +++ b/internal/service/secretsmanager/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package secretsmanager @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" secretsmanager_sdkv2 "github.com/aws/aws-sdk-go-v2/service/secretsmanager" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -92,19 +91,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*secretsmanager_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return secretsmanager_sdkv2.NewFromConfig(cfg, func(o *secretsmanager_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return secretsmanager_sdkv2.NewFromConfig(cfg, + secretsmanager_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/securityhub/service_endpoint_resolver_gen.go b/internal/service/securityhub/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..260276a3d68 --- /dev/null +++ b/internal/service/securityhub/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package securityhub + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + securityhub_sdkv2 "github.com/aws/aws-sdk-go-v2/service/securityhub" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ securityhub_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver securityhub_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: securityhub_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params securityhub_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up securityhub endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*securityhub_sdkv2.Options) { + return func(o *securityhub_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/securityhub/service_endpoints_gen_test.go b/internal/service/securityhub/service_endpoints_gen_test.go index 1a00849e38e..944ac88fa5b 100644 --- a/internal/service/securityhub/service_endpoints_gen_test.go +++ b/internal/service/securityhub/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := securityhub_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), securityhub_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := securityhub_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), securityhub_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/securityhub/service_package_gen.go b/internal/service/securityhub/service_package_gen.go index 74a3f26e8f2..60cac89f0bb 100644 --- a/internal/service/securityhub/service_package_gen.go +++ b/internal/service/securityhub/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package securityhub @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" securityhub_sdkv2 "github.com/aws/aws-sdk-go-v2/service/securityhub" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -113,19 +112,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*securityhub_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return securityhub_sdkv2.NewFromConfig(cfg, func(o *securityhub_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return securityhub_sdkv2.NewFromConfig(cfg, + securityhub_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/securitylake/service_endpoint_resolver_gen.go b/internal/service/securitylake/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..3b3ebacd62b --- /dev/null +++ b/internal/service/securitylake/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package securitylake + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + securitylake_sdkv2 "github.com/aws/aws-sdk-go-v2/service/securitylake" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ securitylake_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver securitylake_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: securitylake_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params securitylake_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up securitylake endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*securitylake_sdkv2.Options) { + return func(o *securitylake_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/securitylake/service_endpoints_gen_test.go b/internal/service/securitylake/service_endpoints_gen_test.go index c1c150dfff9..5ae32c53f8d 100644 --- a/internal/service/securitylake/service_endpoints_gen_test.go +++ b/internal/service/securitylake/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := securitylake_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), securitylake_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := securitylake_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), securitylake_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/securitylake/service_package_gen.go b/internal/service/securitylake/service_package_gen.go index af67ea2acda..1e3b5cb5d1f 100644 --- a/internal/service/securitylake/service_package_gen.go +++ b/internal/service/securitylake/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package securitylake @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" securitylake_sdkv2 "github.com/aws/aws-sdk-go-v2/service/securitylake" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -66,19 +65,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*securitylake_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return securitylake_sdkv2.NewFromConfig(cfg, func(o *securitylake_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return securitylake_sdkv2.NewFromConfig(cfg, + securitylake_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/serverlessrepo/application_data_source.go b/internal/service/serverlessrepo/application_data_source.go index 9f256155c4b..96988a8c8b8 100644 --- a/internal/service/serverlessrepo/application_data_source.go +++ b/internal/service/serverlessrepo/application_data_source.go @@ -56,7 +56,7 @@ func DataSourceApplication() *schema.Resource { func dataSourceApplicationRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ServerlessRepoConn(ctx) + conn := meta.(*conns.AWSClient).ServerlessRepoClient(ctx) applicationID := d.Get(names.AttrApplicationID).(string) semanticVersion := d.Get("semantic_version").(string) @@ -75,7 +75,7 @@ func dataSourceApplicationRead(ctx context.Context, d *schema.ResourceData, meta d.Set("semantic_version", output.Version.SemanticVersion) d.Set("source_code_url", output.Version.SourceCodeUrl) d.Set("template_url", output.Version.TemplateUrl) - if err = d.Set("required_capabilities", flex.FlattenStringSet(output.Version.RequiredCapabilities)); err != nil { + if err = d.Set("required_capabilities", flex.FlattenStringyValueSet(output.Version.RequiredCapabilities)); err != nil { return sdkdiag.AppendErrorf(diags, "to set required_capabilities: %s", err) } diff --git a/internal/service/serverlessrepo/cloudformation_stack.go b/internal/service/serverlessrepo/cloudformation_stack.go index 660036c8da9..2378984f583 100644 --- a/internal/service/serverlessrepo/cloudformation_stack.go +++ b/internal/service/serverlessrepo/cloudformation_stack.go @@ -9,17 +9,18 @@ import ( // nosemgrep:ci.semgrep.aws.multiple-service-imports "log" "strings" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/cloudformation" cloudformationtypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - serverlessrepo "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + serverlessrepo "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository" + awstypes "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tfcloudformation "github.com/hashicorp/terraform-provider-aws/internal/service/cloudformation" @@ -66,8 +67,8 @@ func ResourceCloudFormationStack() *schema.Resource { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(serverlessrepo.Capability_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[awstypes.Capability](), }, }, names.AttrName: { @@ -110,7 +111,7 @@ func resourceCloudFormationStackCreate(ctx context.Context, d *schema.ResourceDa log.Printf("[INFO] Serverless Application Repository CloudFormation Stack (%s) change set created", d.Id()) - d.SetId(aws.StringValue(changeSet.StackId)) + d.SetId(aws.ToString(changeSet.StackId)) requestToken := id.UniqueId() executeRequest := cloudformation.ExecuteChangeSetInput{ @@ -135,7 +136,7 @@ func resourceCloudFormationStackCreate(ctx context.Context, d *schema.ResourceDa func resourceCloudFormationStackRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - serverlessConn := meta.(*conns.AWSClient).ServerlessRepoConn(ctx) + serverlessConn := meta.(*conns.AWSClient).ServerlessRepoClient(ctx) cfConn := meta.(*conns.AWSClient).CloudFormationClient(ctx) stack, err := tfcloudformation.FindStackByName(ctx, cfConn, d.Id()) @@ -151,19 +152,19 @@ func resourceCloudFormationStackRead(ctx context.Context, d *schema.ResourceData } // Serverless Application Repo prefixes the stack name with "serverlessrepo-", so remove it from the saved string - stackName := strings.TrimPrefix(aws.StringValue(stack.StackName), CloudFormationStackNamePrefix) + stackName := strings.TrimPrefix(aws.ToString(stack.StackName), CloudFormationStackNamePrefix) d.Set(names.AttrName, &stackName) tags := tfcloudformation.KeyValueTags(ctx, stack.Tags) var applicationID, semanticVersion string if v, ok := tags[cloudFormationStackTagApplicationID]; ok { - applicationID = aws.StringValue(v.Value) + applicationID = aws.ToString(v.Value) d.Set(names.AttrApplicationID, applicationID) } else { return sdkdiag.AppendErrorf(diags, "describing Serverless Application Repository CloudFormation Stack (%s): missing required tag \"%s\"", d.Id(), cloudFormationStackTagApplicationID) } if v, ok := tags[cloudFormationStackTagSemanticVersion]; ok { - semanticVersion = aws.StringValue(v.Value) + semanticVersion = aws.ToString(v.Value) d.Set("semantic_version", semanticVersion) } else { return sdkdiag.AppendErrorf(diags, "describing Serverless Application Repository CloudFormation Stack (%s): missing required tag \"%s\"", d.Id(), cloudFormationStackTagSemanticVersion) @@ -197,23 +198,23 @@ func resourceCloudFormationStackRead(ctx context.Context, d *schema.ResourceData return diags } -func flattenNonDefaultCloudFormationParameters(cfParams []cloudformationtypes.Parameter, rawParameterDefinitions []*serverlessrepo.ParameterDefinition) map[string]interface{} { +func flattenNonDefaultCloudFormationParameters(cfParams []cloudformationtypes.Parameter, rawParameterDefinitions []awstypes.ParameterDefinition) map[string]interface{} { parameterDefinitions := flattenParameterDefinitions(rawParameterDefinitions) params := make(map[string]interface{}, len(cfParams)) for _, p := range cfParams { - key := aws.StringValue(p.ParameterKey) - value := aws.StringValue(p.ParameterValue) - if value != aws.StringValue(parameterDefinitions[key].DefaultValue) { + key := aws.ToString(p.ParameterKey) + value := aws.ToString(p.ParameterValue) + if value != aws.ToString(parameterDefinitions[key].DefaultValue) { params[key] = value } } return params } -func flattenParameterDefinitions(parameterDefinitions []*serverlessrepo.ParameterDefinition) map[string]*serverlessrepo.ParameterDefinition { - result := make(map[string]*serverlessrepo.ParameterDefinition, len(parameterDefinitions)) +func flattenParameterDefinitions(parameterDefinitions []awstypes.ParameterDefinition) map[string]awstypes.ParameterDefinition { + result := make(map[string]awstypes.ParameterDefinition, len(parameterDefinitions)) for _, p := range parameterDefinitions { - result[aws.StringValue(p.Name)] = p + result[aws.ToString(p.Name)] = p } return result } @@ -291,20 +292,20 @@ func resourceCloudFormationStackImport(ctx context.Context, d *schema.ResourceDa return nil, fmt.Errorf("describing Serverless Application Repository CloudFormation Stack (%s): %w", stackID, err) } - d.SetId(aws.StringValue(stack.StackId)) + d.SetId(aws.ToString(stack.StackId)) return []*schema.ResourceData{d}, nil } func createCloudFormationChangeSet(ctx context.Context, d *schema.ResourceData, client *conns.AWSClient) (*cloudformation.DescribeChangeSetOutput, error) { - serverlessConn := client.ServerlessRepoConn(ctx) + serverlessConn := client.ServerlessRepoClient(ctx) cfConn := client.CloudFormationClient(ctx) stackName := d.Get(names.AttrName).(string) - changeSetRequest := serverlessrepo.CreateCloudFormationChangeSetRequest{ + changeSetRequest := serverlessrepo.CreateCloudFormationChangeSetInput{ StackName: aws.String(stackName), ApplicationId: aws.String(d.Get(names.AttrApplicationID).(string)), - Capabilities: flex.ExpandStringSet(d.Get("capabilities").(*schema.Set)), + Capabilities: flex.ExpandStringValueSet(d.Get("capabilities").(*schema.Set)), Tags: getTagsIn(ctx), } if v, ok := d.GetOk("semantic_version"); ok { @@ -314,19 +315,18 @@ func createCloudFormationChangeSet(ctx context.Context, d *schema.ResourceData, changeSetRequest.ParameterOverrides = expandCloudFormationChangeSetParameters(v.(map[string]interface{})) } - log.Printf("[DEBUG] Creating Serverless Application Repository CloudFormation change set: %s", changeSetRequest) - changeSetResponse, err := serverlessConn.CreateCloudFormationChangeSetWithContext(ctx, &changeSetRequest) + changeSetResponse, err := serverlessConn.CreateCloudFormationChangeSet(ctx, &changeSetRequest) if err != nil { return nil, err } - return tfcloudformation.WaitChangeSetCreated(ctx, cfConn, aws.StringValue(changeSetResponse.StackId), aws.StringValue(changeSetResponse.ChangeSetId)) + return tfcloudformation.WaitChangeSetCreated(ctx, cfConn, aws.ToString(changeSetResponse.StackId), aws.ToString(changeSetResponse.ChangeSetId)) } -func expandCloudFormationChangeSetParameters(params map[string]interface{}) []*serverlessrepo.ParameterValue { - var appParams []*serverlessrepo.ParameterValue +func expandCloudFormationChangeSetParameters(params map[string]interface{}) []awstypes.ParameterValue { + var appParams []awstypes.ParameterValue for k, v := range params { - appParams = append(appParams, &serverlessrepo.ParameterValue{ + appParams = append(appParams, awstypes.ParameterValue{ Name: aws.String(k), Value: aws.String(v.(string)), }) @@ -334,13 +334,13 @@ func expandCloudFormationChangeSetParameters(params map[string]interface{}) []*s return appParams } -func flattenStackCapabilities(stackCapabilities []cloudformationtypes.Capability, applicationRequiredCapabilities []*string) *schema.Set { +func flattenStackCapabilities(stackCapabilities []cloudformationtypes.Capability, applicationRequiredCapabilities []awstypes.Capability) *schema.Set { // We need to preserve "CAPABILITY_RESOURCE_POLICY" if it has been set. It is not // returned by the CloudFormation APIs. capabilities := flex.FlattenStringyValueSet(stackCapabilities) for _, capability := range applicationRequiredCapabilities { - if aws.StringValue(capability) == serverlessrepo.CapabilityCapabilityResourcePolicy { - capabilities.Add(serverlessrepo.CapabilityCapabilityResourcePolicy) + if capability == awstypes.CapabilityCapabilityResourcePolicy { + capabilities.Add(string(awstypes.CapabilityCapabilityResourcePolicy)) break } } @@ -349,7 +349,7 @@ func flattenStackCapabilities(stackCapabilities []cloudformationtypes.Capability func flattenCloudFormationOutputs(cfOutputs []cloudformationtypes.Output) map[string]string { outputs := make(map[string]string, len(cfOutputs)) for _, o := range cfOutputs { - outputs[aws.StringValue(o.OutputKey)] = aws.StringValue(o.OutputValue) + outputs[aws.ToString(o.OutputKey)] = aws.ToString(o.OutputValue) } return outputs } diff --git a/internal/service/serverlessrepo/cloudformation_stack_test.go b/internal/service/serverlessrepo/cloudformation_stack_test.go index f983cebd44f..1bdbccbb104 100644 --- a/internal/service/serverlessrepo/cloudformation_stack_test.go +++ b/internal/service/serverlessrepo/cloudformation_stack_test.go @@ -9,14 +9,12 @@ import ( "log" "testing" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/cloudformation" cloudformationtypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/service/ec2" - serverlessrepo "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -114,7 +112,7 @@ func TestAccServerlessRepoCloudFormationStack_versioned(t *testing.T) { resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" const ( - version1 = "1.1.36" + version1 = "1.1.465" version2 = "1.1.88" ) @@ -173,7 +171,7 @@ func TestAccServerlessRepoCloudFormationStack_paired(t *testing.T) { appARN := testAccCloudFormationApplicationID() resourceName := "aws_serverlessapplicationrepository_cloudformation_stack.postgres-rotator" - const version = "1.1.36" + const version = "1.1.465" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -327,16 +325,16 @@ func testAccCloudFormationStackNameNoPrefixImportStateIdFunc(resourceName string } func testAccCloudFormationApplicationID() string { - arnRegion := endpoints.UsEast1RegionID + arnRegion := names.USEast1RegionID arnAccountID := "297356227824" - if acctest.Partition() == endpoints.AwsUsGovPartitionID { - arnRegion = endpoints.UsGovWest1RegionID + if acctest.Partition() == names.USGovCloudPartitionID { + arnRegion = names.USGovWest1RegionID arnAccountID = "023102451235" } return arn.ARN{ Partition: acctest.Partition(), - Service: serverlessrepo.ServiceName, + Service: names.ServerlessRepo, Region: arnRegion, AccountID: arnAccountID, Resource: "applications/SecretsManagerRDSPostgreSQLRotationSingleUser", @@ -551,7 +549,7 @@ resource "aws_serverlessapplicationrepository_cloudformation_stack" "postgres-ro func testAccCheckAMIDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_ami" { @@ -561,9 +559,9 @@ func testAccCheckAMIDestroy(ctx context.Context) resource.TestCheckFunc { // Try to find the AMI log.Printf("AMI-ID: %s", rs.Primary.ID) DescribeAmiOpts := &ec2.DescribeImagesInput{ - ImageIds: []*string{aws.String(rs.Primary.ID)}, + ImageIds: []string{rs.Primary.ID}, } - resp, err := conn.DescribeImagesWithContext(ctx, DescribeAmiOpts) + resp, err := conn.DescribeImages(ctx, DescribeAmiOpts) if err != nil { if tfawserr.ErrMessageContains(err, "InvalidAMIID", "NotFound") { log.Printf("[DEBUG] AMI not found, passing") @@ -574,8 +572,8 @@ func testAccCheckAMIDestroy(ctx context.Context) resource.TestCheckFunc { if len(resp.Images) > 0 { state := resp.Images[0].State - return fmt.Errorf("AMI %s still exists in the state: %s.", aws.StringValue(resp.Images[0].ImageId), - aws.StringValue(state)) + return fmt.Errorf("AMI %s still exists in the state: %s.", aws.ToString(resp.Images[0].ImageId), + string(state)) } } return nil @@ -602,7 +600,7 @@ func testAccCheckCloudFormationDestroy(ctx context.Context) resource.TestCheckFu } for _, s := range resp.Stacks { - if aws.StringValue(s.StackId) == rs.Primary.ID && s.StackStatus != cloudformationtypes.StackStatusDeleteComplete { + if aws.ToString(s.StackId) == rs.Primary.ID && s.StackStatus != cloudformationtypes.StackStatusDeleteComplete { return fmt.Errorf("CloudFormation stack still exists: %q", rs.Primary.ID) } } @@ -614,7 +612,7 @@ func testAccCheckCloudFormationDestroy(ctx context.Context) resource.TestCheckFu func testAccCheckCloudFormationStackNotRecreated(i, j *cloudformationtypes.Stack) resource.TestCheckFunc { return func(s *terraform.State) error { - if aws.StringValue(i.StackId) != aws.StringValue(j.StackId) { + if aws.ToString(i.StackId) != aws.ToString(j.StackId) { return fmt.Errorf("CloudFormation stack recreated") } diff --git a/internal/service/serverlessrepo/find.go b/internal/service/serverlessrepo/find.go index dda60b0c33c..fff55eb65d2 100644 --- a/internal/service/serverlessrepo/find.go +++ b/internal/service/serverlessrepo/find.go @@ -5,15 +5,15 @@ package serverlessrepo import ( "context" - "log" - "github.com/aws/aws-sdk-go/aws" - serverlessrepo "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + serverlessrepo "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository" + awstypes "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/errs" ) -func findApplication(ctx context.Context, conn *serverlessrepo.ServerlessApplicationRepository, applicationID, version string) (*serverlessrepo.GetApplicationOutput, error) { +func findApplication(ctx context.Context, conn *serverlessrepo.Client, applicationID, version string) (*serverlessrepo.GetApplicationOutput, error) { input := &serverlessrepo.GetApplicationInput{ ApplicationId: aws.String(applicationID), } @@ -21,9 +21,8 @@ func findApplication(ctx context.Context, conn *serverlessrepo.ServerlessApplica input.SemanticVersion = aws.String(version) } - log.Printf("[DEBUG] Getting Serverless findApplication Repository Application: %s", input) - resp, err := conn.GetApplicationWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, serverlessrepo.ErrCodeNotFoundException) { + resp, err := conn.GetApplication(ctx, input) + if errs.IsA[*awstypes.NotFoundException](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/serverlessrepo/generate.go b/internal/service/serverlessrepo/generate.go index 71aeaf48552..82bb54c5536 100644 --- a/internal/service/serverlessrepo/generate.go +++ b/internal/service/serverlessrepo/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ServiceTagsSlice +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -SkipAWSServiceImp -ServiceTagsSlice //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/serverlessrepo/service_endpoint_resolver_gen.go b/internal/service/serverlessrepo/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..fb2d0158e0d --- /dev/null +++ b/internal/service/serverlessrepo/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package serverlessrepo + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + serverlessapplicationrepository_sdkv2 "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ serverlessapplicationrepository_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver serverlessapplicationrepository_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: serverlessapplicationrepository_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params serverlessapplicationrepository_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up serverlessapplicationrepository endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*serverlessapplicationrepository_sdkv2.Options) { + return func(o *serverlessapplicationrepository_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/serverlessrepo/service_endpoints_gen_test.go b/internal/service/serverlessrepo/service_endpoints_gen_test.go index 7f6f71b8f95..49dddd1f5a5 100644 --- a/internal/service/serverlessrepo/service_endpoints_gen_test.go +++ b/internal/service/serverlessrepo/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package serverlessrepo_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - serverlessapplicationrepository_sdkv1 "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + serverlessapplicationrepository_sdkv2 "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/go-cty/cty" @@ -90,7 +95,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -330,7 +335,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -351,55 +356,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := serverlessapplicationrepository_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(serverlessapplicationrepository_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), serverlessapplicationrepository_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := serverlessapplicationrepository_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(serverlessapplicationrepository_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), serverlessapplicationrepository_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.ServerlessRepoConn(ctx) - - req, _ := client.ListApplicationsRequest(&serverlessapplicationrepository_sdkv1.ListApplicationsInput{}) + client := meta.ServerlessRepoClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListApplications(ctx, &serverlessapplicationrepository_sdkv2.ListApplicationsInput{}, + func(opts *serverlessapplicationrepository_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -466,16 +480,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -600,6 +636,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/serverlessrepo/service_package_gen.go b/internal/service/serverlessrepo/service_package_gen.go index 261c14f2597..9745aa007e1 100644 --- a/internal/service/serverlessrepo/service_package_gen.go +++ b/internal/service/serverlessrepo/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package serverlessrepo import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - serverlessapplicationrepository_sdkv1 "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + serverlessapplicationrepository_sdkv2 "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -49,25 +46,14 @@ func (p *servicePackage) ServicePackageName() string { return names.ServerlessRepo } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*serverlessapplicationrepository_sdkv1.ServerlessApplicationRepository, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*serverlessapplicationrepository_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } - - return serverlessapplicationrepository_sdkv1.New(sess.Copy(&cfg)), nil + return serverlessapplicationrepository_sdkv2.NewFromConfig(cfg, + serverlessapplicationrepository_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/serverlessrepo/tags_gen.go b/internal/service/serverlessrepo/tags_gen.go index 57f84930475..133656c9583 100644 --- a/internal/service/serverlessrepo/tags_gen.go +++ b/internal/service/serverlessrepo/tags_gen.go @@ -4,8 +4,8 @@ package serverlessrepo import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) @@ -13,11 +13,11 @@ import ( // []*SERVICE.Tag handling // Tags returns serverlessrepo service tags. -func Tags(tags tftags.KeyValueTags) []*serverlessapplicationrepository.Tag { - result := make([]*serverlessapplicationrepository.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &serverlessapplicationrepository.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -29,11 +29,11 @@ func Tags(tags tftags.KeyValueTags) []*serverlessapplicationrepository.Tag { } // KeyValueTags creates tftags.KeyValueTags from serverlessapplicationrepository service tags. -func KeyValueTags(ctx context.Context, tags []*serverlessapplicationrepository.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -41,7 +41,7 @@ func KeyValueTags(ctx context.Context, tags []*serverlessapplicationrepository.T // getTagsIn returns serverlessrepo service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*serverlessapplicationrepository.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -52,7 +52,7 @@ func getTagsIn(ctx context.Context) []*serverlessapplicationrepository.Tag { } // setTagsOut sets serverlessrepo service tags in Context. -func setTagsOut(ctx context.Context, tags []*serverlessapplicationrepository.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } diff --git a/internal/service/servicecatalog/service_endpoint_resolver_gen.go b/internal/service/servicecatalog/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..755ba5228f8 --- /dev/null +++ b/internal/service/servicecatalog/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package servicecatalog + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/servicecatalog/service_endpoints_gen_test.go b/internal/service/servicecatalog/service_endpoints_gen_test.go index 0bdec1330e5..8f361b45fc2 100644 --- a/internal/service/servicecatalog/service_endpoints_gen_test.go +++ b/internal/service/servicecatalog/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(servicecatalog_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(servicecatalog_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/servicecatalog/service_package_gen.go b/internal/service/servicecatalog/service_package_gen.go index 7b45ebd3169..3f91af76c0f 100644 --- a/internal/service/servicecatalog/service_package_gen.go +++ b/internal/service/servicecatalog/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package servicecatalog @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" servicecatalog_sdkv1 "github.com/aws/aws-sdk-go/service/servicecatalog" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -134,11 +133,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*s "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return servicecatalog_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/servicecatalogappregistry/service_endpoint_resolver_gen.go b/internal/service/servicecatalogappregistry/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f1bb6b26621 --- /dev/null +++ b/internal/service/servicecatalogappregistry/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package servicecatalogappregistry + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + servicecatalogappregistry_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ servicecatalogappregistry_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver servicecatalogappregistry_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: servicecatalogappregistry_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params servicecatalogappregistry_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up servicecatalogappregistry endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*servicecatalogappregistry_sdkv2.Options) { + return func(o *servicecatalogappregistry_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/servicecatalogappregistry/service_endpoints_gen_test.go b/internal/service/servicecatalogappregistry/service_endpoints_gen_test.go index 07a988a5a4a..66220fd8c54 100644 --- a/internal/service/servicecatalogappregistry/service_endpoints_gen_test.go +++ b/internal/service/servicecatalogappregistry/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -91,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -274,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -295,24 +297,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := servicecatalogappregistry_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), servicecatalogappregistry_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := servicecatalogappregistry_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), servicecatalogappregistry_sdkv2.EndpointParameters{ @@ -320,14 +322,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -408,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/servicecatalogappregistry/service_package_gen.go b/internal/service/servicecatalogappregistry/service_package_gen.go index 9e75da24839..affcfd818e4 100644 --- a/internal/service/servicecatalogappregistry/service_package_gen.go +++ b/internal/service/servicecatalogappregistry/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package servicecatalogappregistry @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" servicecatalogappregistry_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -49,19 +48,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*servicecatalogappregistry_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return servicecatalogappregistry_sdkv2.NewFromConfig(cfg, func(o *servicecatalogappregistry_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return servicecatalogappregistry_sdkv2.NewFromConfig(cfg, + servicecatalogappregistry_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/servicediscovery/service_endpoint_resolver_gen.go b/internal/service/servicediscovery/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..55fb327fef9 --- /dev/null +++ b/internal/service/servicediscovery/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package servicediscovery + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + servicediscovery_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicediscovery" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ servicediscovery_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver servicediscovery_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: servicediscovery_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params servicediscovery_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up servicediscovery endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*servicediscovery_sdkv2.Options) { + return func(o *servicediscovery_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/servicediscovery/service_endpoints_gen_test.go b/internal/service/servicediscovery/service_endpoints_gen_test.go index b8d5b97bdf9..13c0161c27f 100644 --- a/internal/service/servicediscovery/service_endpoints_gen_test.go +++ b/internal/service/servicediscovery/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := servicediscovery_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), servicediscovery_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := servicediscovery_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), servicediscovery_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/servicediscovery/service_package_gen.go b/internal/service/servicediscovery/service_package_gen.go index 00be99ea9d5..a6a8c608190 100644 --- a/internal/service/servicediscovery/service_package_gen.go +++ b/internal/service/servicediscovery/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package servicediscovery @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" servicediscovery_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicediscovery" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -93,19 +92,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*servicediscovery_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return servicediscovery_sdkv2.NewFromConfig(cfg, func(o *servicediscovery_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return servicediscovery_sdkv2.NewFromConfig(cfg, + servicediscovery_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/servicequotas/service_endpoint_resolver_gen.go b/internal/service/servicequotas/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..f461cb7eb56 --- /dev/null +++ b/internal/service/servicequotas/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package servicequotas + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + servicequotas_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicequotas" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ servicequotas_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver servicequotas_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: servicequotas_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params servicequotas_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up servicequotas endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*servicequotas_sdkv2.Options) { + return func(o *servicequotas_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/servicequotas/service_endpoints_gen_test.go b/internal/service/servicequotas/service_endpoints_gen_test.go index 8dce10beac3..172d188825f 100644 --- a/internal/service/servicequotas/service_endpoints_gen_test.go +++ b/internal/service/servicequotas/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := servicequotas_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), servicequotas_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := servicequotas_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), servicequotas_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/servicequotas/service_package_gen.go b/internal/service/servicequotas/service_package_gen.go index bed9668566e..7f1ccd410fb 100644 --- a/internal/service/servicequotas/service_package_gen.go +++ b/internal/service/servicequotas/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package servicequotas @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" servicequotas_sdkv2 "github.com/aws/aws-sdk-go-v2/service/servicequotas" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -67,19 +66,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*servicequotas_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return servicequotas_sdkv2.NewFromConfig(cfg, func(o *servicequotas_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return servicequotas_sdkv2.NewFromConfig(cfg, + servicequotas_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/ses/service_endpoint_resolver_gen.go b/internal/service/ses/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..69e9facc483 --- /dev/null +++ b/internal/service/ses/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ses + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/ses/service_endpoints_gen_test.go b/internal/service/ses/service_endpoints_gen_test.go index 1bd03cc4090..622ec129fef 100644 --- a/internal/service/ses/service_endpoints_gen_test.go +++ b/internal/service/ses/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -83,7 +84,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -217,7 +218,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -238,12 +239,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(ses_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -252,17 +253,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(ses_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -271,7 +272,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -323,16 +324,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ses/service_package_gen.go b/internal/service/ses/service_package_gen.go index ad16942a1f7..b062a6b2a30 100644 --- a/internal/service/ses/service_package_gen.go +++ b/internal/service/ses/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ses @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" ses_sdkv1 "github.com/aws/aws-sdk-go/service/ses" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -118,11 +117,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*s "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return ses_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/sesv2/service_endpoint_resolver_gen.go b/internal/service/sesv2/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..9ca7f81bec2 --- /dev/null +++ b/internal/service/sesv2/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package sesv2 + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + sesv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sesv2" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ sesv2_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver sesv2_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: sesv2_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params sesv2_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up sesv2 endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*sesv2_sdkv2.Options) { + return func(o *sesv2_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/sesv2/service_endpoints_gen_test.go b/internal/service/sesv2/service_endpoints_gen_test.go index bebd1c41fcc..f95a22b454b 100644 --- a/internal/service/sesv2/service_endpoints_gen_test.go +++ b/internal/service/sesv2/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := sesv2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), sesv2_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := sesv2_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), sesv2_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/sesv2/service_package_gen.go b/internal/service/sesv2/service_package_gen.go index fa74819686b..4fb43b1fecb 100644 --- a/internal/service/sesv2/service_package_gen.go +++ b/internal/service/sesv2/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package sesv2 @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" sesv2_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sesv2" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -118,19 +117,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*sesv2_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return sesv2_sdkv2.NewFromConfig(cfg, func(o *sesv2_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return sesv2_sdkv2.NewFromConfig(cfg, + sesv2_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/sfn/activity.go b/internal/service/sfn/activity.go index c41fc029f54..928271cb376 100644 --- a/internal/service/sfn/activity.go +++ b/internal/service/sfn/activity.go @@ -8,14 +8,15 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" + awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -25,7 +26,7 @@ import ( // @SDKResource("aws_sfn_activity", name="Activity") // @Tags(identifierAttribute="id") -func ResourceActivity() *schema.Resource { +func resourceActivity() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceActivityCreate, ReadWithoutTimeout: resourceActivityRead, @@ -57,7 +58,7 @@ func ResourceActivity() *schema.Resource { func resourceActivityCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) name := d.Get(names.AttrName).(string) input := &sfn.CreateActivityInput{ @@ -65,22 +66,22 @@ func resourceActivityCreate(ctx context.Context, d *schema.ResourceData, meta in Tags: getTagsIn(ctx), } - output, err := conn.CreateActivityWithContext(ctx, input) + output, err := conn.CreateActivity(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "creating Step Functions Activity (%s): %s", name, err) } - d.SetId(aws.StringValue(output.ActivityArn)) + d.SetId(aws.ToString(output.ActivityArn)) return append(diags, resourceActivityRead(ctx, d, meta)...) } func resourceActivityRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) - output, err := FindActivityByARN(ctx, conn, d.Id()) + output, err := findActivityByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Step Functions Activity (%s) not found, removing from state", d.Id()) @@ -105,10 +106,10 @@ func resourceActivityUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceActivityDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) log.Printf("[DEBUG] Deleting Step Functions Activity: %s", d.Id()) - _, err := conn.DeleteActivityWithContext(ctx, &sfn.DeleteActivityInput{ + _, err := conn.DeleteActivity(ctx, &sfn.DeleteActivityInput{ ActivityArn: aws.String(d.Id()), }) @@ -119,14 +120,14 @@ func resourceActivityDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } -func FindActivityByARN(ctx context.Context, conn *sfn.SFN, arn string) (*sfn.DescribeActivityOutput, error) { +func findActivityByARN(ctx context.Context, conn *sfn.Client, arn string) (*sfn.DescribeActivityOutput, error) { input := &sfn.DescribeActivityInput{ ActivityArn: aws.String(arn), } - output, err := conn.DescribeActivityWithContext(ctx, input) + output, err := conn.DescribeActivity(ctx, input) - if tfawserr.ErrCodeEquals(err, sfn.ErrCodeActivityDoesNotExist) { + if errs.IsA[*awstypes.ActivityDoesNotExist](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, diff --git a/internal/service/sfn/activity_data_source.go b/internal/service/sfn/activity_data_source.go index 16bf21e0aa7..1b95fc90cc7 100644 --- a/internal/service/sfn/activity_data_source.go +++ b/internal/service/sfn/activity_data_source.go @@ -7,17 +7,20 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" + awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_sfn_activity") -func DataSourceActivity() *schema.Resource { +// @SDKDataSource("aws_sfn_activity", name="Activity") +func dataSourceActivity() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceActivityRead, @@ -50,52 +53,38 @@ func DataSourceActivity() *schema.Resource { func dataSourceActivityRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) if v, ok := d.GetOk(names.AttrName); ok { name := v.(string) - var activities []*sfn.ActivityListItem - err := conn.ListActivitiesPagesWithContext(ctx, &sfn.ListActivitiesInput{}, func(page *sfn.ListActivitiesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.Activities { - if name == aws.StringValue(v.Name) { - activities = append(activities, v) - } - } - - return !lastPage - }) + output, err := findActivityByName(ctx, conn, name) if err != nil { return sdkdiag.AppendErrorf(diags, "listing Step Functions Activities: %s", err) } - - if n := len(activities); n == 0 { + if n := len(output); n == 0 { return sdkdiag.AppendErrorf(diags, "no Step Functions Activities matched") } else if n > 1 { return sdkdiag.AppendErrorf(diags, "%d Step Functions Activities matched; use additional constraints to reduce matches to a single Activity", n) } - activity := activities[0] + activity := output[0] - arn := aws.StringValue(activity.ActivityArn) + arn := aws.ToString(activity.ActivityArn) d.SetId(arn) d.Set(names.AttrARN, arn) d.Set(names.AttrCreationDate, activity.CreationDate.Format(time.RFC3339)) d.Set(names.AttrName, activity.Name) } else if v, ok := d.GetOk(names.AttrARN); ok { arn := v.(string) - activity, err := FindActivityByARN(ctx, conn, arn) + activity, err := findActivityByARN(ctx, conn, arn) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Step Functions Activity (%s): %s", arn, err) } - arn = aws.StringValue(activity.ActivityArn) + arn = aws.ToString(activity.ActivityArn) d.SetId(arn) d.Set(names.AttrARN, arn) d.Set(names.AttrCreationDate, activity.CreationDate.Format(time.RFC3339)) @@ -104,3 +93,31 @@ func dataSourceActivityRead(ctx context.Context, d *schema.ResourceData, meta in return diags } + +func findActivityByName(ctx context.Context, conn *sfn.Client, name string) ([]awstypes.ActivityListItem, error) { + var output []awstypes.ActivityListItem + + pages := sfn.NewListActivitiesPaginator(conn, &sfn.ListActivitiesInput{}) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.ActivityDoesNotExist](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: name, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.Activities { + if name == aws.ToString(v.Name) { + output = append(output, v) + } + } + } + + return output, nil +} diff --git a/internal/service/sfn/activity_test.go b/internal/service/sfn/activity_test.go index 8ef00bbd07a..b0d3125cc0c 100644 --- a/internal/service/sfn/activity_test.go +++ b/internal/service/sfn/activity_test.go @@ -124,11 +124,7 @@ func testAccCheckActivityExists(ctx context.Context, n string) resource.TestChec return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Step Functions Activity ID set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).SFNConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).SFNClient(ctx) _, err := tfsfn.FindActivityByARN(ctx, conn, rs.Primary.ID) @@ -138,7 +134,7 @@ func testAccCheckActivityExists(ctx context.Context, n string) resource.TestChec func testAccCheckActivityDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).SFNConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).SFNClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_sfn_activity" { diff --git a/internal/service/sfn/alias.go b/internal/service/sfn/alias.go index b0230646a02..c53a319c68a 100644 --- a/internal/service/sfn/alias.go +++ b/internal/service/sfn/alias.go @@ -9,20 +9,21 @@ import ( "log" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" + awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_sfn_alias") -func ResourceAlias() *schema.Resource { +// @SDKResource("aws_sfn_alias", name="Alias") +func resourceAlias() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceAliasCreate, ReadWithoutTimeout: resourceAliasRead, @@ -83,7 +84,7 @@ const ( func resourceAliasCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) in := &sfn.CreateStateMachineAliasInput{ Name: aws.String(d.Get(names.AttrName).(string)), @@ -94,7 +95,7 @@ func resourceAliasCreate(ctx context.Context, d *schema.ResourceData, meta inter in.RoutingConfiguration = expandAliasRoutingConfiguration(v.([]interface{})) } - out, err := conn.CreateStateMachineAliasWithContext(ctx, in) + out, err := conn.CreateStateMachineAlias(ctx, in) if err != nil { return create.AppendDiagError(diags, names.SFN, create.ErrActionCreating, ResNameAlias, d.Get(names.AttrName).(string), err) } @@ -103,16 +104,16 @@ func resourceAliasCreate(ctx context.Context, d *schema.ResourceData, meta inter return create.AppendDiagError(diags, names.SFN, create.ErrActionCreating, ResNameAlias, d.Get(names.AttrName).(string), errors.New("empty output")) } - d.SetId(aws.StringValue(out.StateMachineAliasArn)) + d.SetId(aws.ToString(out.StateMachineAliasArn)) return append(diags, resourceAliasRead(ctx, d, meta)...) } func resourceAliasRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) - out, err := FindAliasByARN(ctx, conn, d.Id()) + out, err := findAliasByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] SFN Alias (%s) not found, removing from state", d.Id()) @@ -127,8 +128,8 @@ func resourceAliasRead(ctx context.Context, d *schema.ResourceData, meta interfa d.Set(names.AttrARN, out.StateMachineAliasArn) d.Set(names.AttrName, out.Name) d.Set(names.AttrDescription, out.Description) - d.Set(names.AttrCreationDate, aws.TimeValue(out.CreationDate).Format(time.RFC3339)) - d.SetId(aws.StringValue(out.StateMachineAliasArn)) + d.Set(names.AttrCreationDate, aws.ToTime(out.CreationDate).Format(time.RFC3339)) + d.SetId(aws.ToString(out.StateMachineAliasArn)) if err := d.Set("routing_configuration", flattenAliasRoutingConfiguration(out.RoutingConfiguration)); err != nil { return create.AppendDiagError(diags, names.SFN, create.ErrActionSetting, ResNameAlias, d.Id(), err) @@ -138,7 +139,7 @@ func resourceAliasRead(ctx context.Context, d *schema.ResourceData, meta interfa func resourceAliasUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) update := false @@ -161,7 +162,7 @@ func resourceAliasUpdate(ctx context.Context, d *schema.ResourceData, meta inter } log.Printf("[DEBUG] Updating SFN Alias (%s): %#v", d.Id(), in) - _, err := conn.UpdateStateMachineAliasWithContext(ctx, in) + _, err := conn.UpdateStateMachineAlias(ctx, in) if err != nil { return create.AppendDiagError(diags, names.SFN, create.ErrActionUpdating, ResNameAlias, d.Id(), err) } @@ -171,10 +172,10 @@ func resourceAliasUpdate(ctx context.Context, d *schema.ResourceData, meta inter func resourceAliasDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) log.Printf("[INFO] Deleting SFN Alias %s", d.Id()) - _, err := conn.DeleteStateMachineAliasWithContext(ctx, &sfn.DeleteStateMachineAliasInput{ + _, err := conn.DeleteStateMachineAlias(ctx, &sfn.DeleteStateMachineAliasInput{ StateMachineAliasArn: aws.String(d.Id()), }) @@ -185,12 +186,12 @@ func resourceAliasDelete(ctx context.Context, d *schema.ResourceData, meta inter return diags } -func FindAliasByARN(ctx context.Context, conn *sfn.SFN, arn string) (*sfn.DescribeStateMachineAliasOutput, error) { +func findAliasByARN(ctx context.Context, conn *sfn.Client, arn string) (*sfn.DescribeStateMachineAliasOutput, error) { in := &sfn.DescribeStateMachineAliasInput{ StateMachineAliasArn: aws.String(arn), } - out, err := conn.DescribeStateMachineAliasWithContext(ctx, in) - if tfawserr.ErrCodeEquals(err, sfn.ErrCodeResourceNotFound) { + out, err := conn.DescribeStateMachineAlias(ctx, in) + if errs.IsA[*awstypes.ResourceNotFound](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: in, @@ -208,25 +209,19 @@ func FindAliasByARN(ctx context.Context, conn *sfn.SFN, arn string) (*sfn.Descri return out, nil } -func flattenAliasRoutingConfigurationItem(apiObject *sfn.RoutingConfigurationListItem) map[string]interface{} { - if apiObject == nil { - return nil +func flattenAliasRoutingConfigurationItem(apiObject awstypes.RoutingConfigurationListItem) map[string]interface{} { + tfMap := map[string]interface{}{ + names.AttrWeight: apiObject.Weight, } - tfMap := map[string]interface{}{} - if v := apiObject.StateMachineVersionArn; v != nil { - tfMap["state_machine_version_arn"] = aws.StringValue(v) - } - - if v := apiObject.Weight; v != nil { - tfMap[names.AttrWeight] = aws.Int64Value(v) + tfMap["state_machine_version_arn"] = aws.ToString(v) } return tfMap } -func flattenAliasRoutingConfiguration(apiObjects []*sfn.RoutingConfigurationListItem) []interface{} { +func flattenAliasRoutingConfiguration(apiObjects []awstypes.RoutingConfigurationListItem) []interface{} { if len(apiObjects) == 0 { return nil } @@ -234,21 +229,17 @@ func flattenAliasRoutingConfiguration(apiObjects []*sfn.RoutingConfigurationList var tfList []interface{} for _, apiObject := range apiObjects { - if apiObject == nil { - continue - } - tfList = append(tfList, flattenAliasRoutingConfigurationItem(apiObject)) } return tfList } -func expandAliasRoutingConfiguration(tfList []interface{}) []*sfn.RoutingConfigurationListItem { +func expandAliasRoutingConfiguration(tfList []interface{}) []awstypes.RoutingConfigurationListItem { if len(tfList) == 0 { return nil } - var configurationListItems []*sfn.RoutingConfigurationListItem + var configurationListItems []awstypes.RoutingConfigurationListItem for _, tfMapRaw := range tfList { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -259,7 +250,7 @@ func expandAliasRoutingConfiguration(tfList []interface{}) []*sfn.RoutingConfigu configurationListItem := expandAliasRoutingConfigurationItem(tfMap) - if configurationListItem == nil { + if configurationListItem.StateMachineVersionArn == nil { continue } @@ -269,18 +260,15 @@ func expandAliasRoutingConfiguration(tfList []interface{}) []*sfn.RoutingConfigu return configurationListItems } -func expandAliasRoutingConfigurationItem(tfMap map[string]interface{}) *sfn.RoutingConfigurationListItem { - if tfMap == nil { - return nil - } +func expandAliasRoutingConfigurationItem(tfMap map[string]interface{}) awstypes.RoutingConfigurationListItem { + apiObject := awstypes.RoutingConfigurationListItem{} - apiObject := &sfn.RoutingConfigurationListItem{} if v, ok := tfMap["state_machine_version_arn"].(string); ok && v != "" { apiObject.StateMachineVersionArn = aws.String(v) } if v, ok := tfMap[names.AttrWeight].(int); ok && v != 0 { - apiObject.Weight = aws.Int64(int64(v)) + apiObject.Weight = int32(v) } return apiObject diff --git a/internal/service/sfn/alias_data_source.go b/internal/service/sfn/alias_data_source.go index 476ecfa0107..aa5f2246e7a 100644 --- a/internal/service/sfn/alias_data_source.go +++ b/internal/service/sfn/alias_data_source.go @@ -8,8 +8,8 @@ import ( "strings" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -18,8 +18,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_sfn_alias") -func DataSourceAlias() *schema.Resource { +// @SDKDataSource("aws_sfn_alias", name="Alias") +func dataSourceAlias() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceAliasRead, @@ -70,14 +70,14 @@ const ( func dataSourceAliasRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) aliasArn := "" in := &sfn.ListStateMachineAliasesInput{ StateMachineArn: aws.String(d.Get("statemachine_arn").(string)), } - out, err := conn.ListStateMachineAliasesWithContext(ctx, in) + out, err := conn.ListStateMachineAliases(ctx, in) if err != nil { return sdkdiag.AppendErrorf(diags, "listing Step Functions State Machines: %s", err) @@ -88,7 +88,7 @@ func dataSourceAliasRead(ctx context.Context, d *schema.ResourceData, meta inter } for _, in := range out.StateMachineAliases { - if v := aws.StringValue(in.StateMachineAliasArn); strings.HasSuffix(v, d.Get(names.AttrName).(string)) { + if v := aws.ToString(in.StateMachineAliasArn); strings.HasSuffix(v, d.Get(names.AttrName).(string)) { aliasArn = v } } @@ -97,7 +97,7 @@ func dataSourceAliasRead(ctx context.Context, d *schema.ResourceData, meta inter return sdkdiag.AppendErrorf(diags, "no Step Functions State Machine Aliases matched") } - output, err := FindAliasByARN(ctx, conn, aliasArn) + output, err := findAliasByARN(ctx, conn, aliasArn) if err != nil { return sdkdiag.AppendErrorf(diags, "reading Step Functions State Machine Alias (%s): %s", aliasArn, err) @@ -107,7 +107,7 @@ func dataSourceAliasRead(ctx context.Context, d *schema.ResourceData, meta inter d.Set(names.AttrARN, output.StateMachineAliasArn) d.Set(names.AttrName, output.Name) d.Set(names.AttrDescription, output.Description) - d.Set(names.AttrCreationDate, aws.TimeValue(output.CreationDate).Format(time.RFC3339)) + d.Set(names.AttrCreationDate, aws.ToTime(output.CreationDate).Format(time.RFC3339)) if err := d.Set("routing_configuration", flattenAliasRoutingConfiguration(output.RoutingConfiguration)); err != nil { return create.AppendDiagError(diags, names.SFN, create.ErrActionSetting, ResNameAlias, d.Id(), err) diff --git a/internal/service/sfn/alias_test.go b/internal/service/sfn/alias_test.go index b86b721a1ff..03702a7117c 100644 --- a/internal/service/sfn/alias_test.go +++ b/internal/service/sfn/alias_test.go @@ -8,7 +8,7 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go-v2/service/sfn" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -103,7 +103,7 @@ func testAccCheckAliasAttributes(mapping *sfn.DescribeStateMachineAliasOutput) r func testAccCheckAliasDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).SFNConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).SFNClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_sfn_alias" { @@ -134,11 +134,7 @@ func testAccCheckAliasExists(ctx context.Context, name string, v *sfn.DescribeSt return fmt.Errorf("Not found: %s", name) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Step Functions State Machine Alias ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).SFNConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).SFNClient(ctx) output, err := tfsfn.FindAliasByARN(ctx, conn, rs.Primary.ID) diff --git a/internal/service/sfn/exports_test.go b/internal/service/sfn/exports_test.go new file mode 100644 index 00000000000..9f312716599 --- /dev/null +++ b/internal/service/sfn/exports_test.go @@ -0,0 +1,15 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package sfn + +// Exports for use in tests only. +var ( + ResourceActivity = resourceActivity + ResourceAlias = resourceAlias + ResourceStateMachine = resourceStateMachine + + FindActivityByARN = findActivityByARN + FindAliasByARN = findAliasByARN + FindStateMachineByARN = findStateMachineByARN +) diff --git a/internal/service/sfn/generate.go b/internal/service/sfn/generate.go index 5b3d8d3fe08..a171f3f40c1 100644 --- a/internal/service/sfn/generate.go +++ b/internal/service/sfn/generate.go @@ -1,8 +1,8 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/listpages/main.go -ListOps=ListStateMachineVersions -//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags +//go:generate go run ../../generate/listpages/main.go -ListOps=ListStateMachineVersions -AWSSDKVersion=2 +//go:generate go run ../../generate/tags/main.go -ListTags -ServiceTagsSlice -UpdateTags -AWSSDKVersion=2 //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/sfn/list_pages_gen.go b/internal/service/sfn/list_pages_gen.go index cd37e2773e3..e9013ce2c4d 100644 --- a/internal/service/sfn/list_pages_gen.go +++ b/internal/service/sfn/list_pages_gen.go @@ -1,23 +1,22 @@ -// Code generated by "internal/generate/listpages/main.go -ListOps=ListStateMachineVersions"; DO NOT EDIT. +// Code generated by "internal/generate/listpages/main.go -ListOps=ListStateMachineVersions -AWSSDKVersion=2"; DO NOT EDIT. package sfn import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" - "github.com/aws/aws-sdk-go/service/sfn/sfniface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" ) -func listStateMachineVersionsPages(ctx context.Context, conn sfniface.SFNAPI, input *sfn.ListStateMachineVersionsInput, fn func(*sfn.ListStateMachineVersionsOutput, bool) bool) error { +func listStateMachineVersionsPages(ctx context.Context, conn *sfn.Client, input *sfn.ListStateMachineVersionsInput, fn func(*sfn.ListStateMachineVersionsOutput, bool) bool) error { for { - output, err := conn.ListStateMachineVersionsWithContext(ctx, input) + output, err := conn.ListStateMachineVersions(ctx, input) if err != nil { return err } - lastPage := aws.StringValue(output.NextToken) == "" + lastPage := aws.ToString(output.NextToken) == "" if !fn(output, lastPage) || lastPage { break } diff --git a/internal/service/sfn/service_endpoint_resolver_gen.go b/internal/service/sfn/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..2abda1cfad8 --- /dev/null +++ b/internal/service/sfn/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package sfn + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + sfn_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sfn" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ sfn_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver sfn_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: sfn_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params sfn_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up sfn endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*sfn_sdkv2.Options) { + return func(o *sfn_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/sfn/service_endpoints_gen_test.go b/internal/service/sfn/service_endpoints_gen_test.go index 146be236e96..63aae5da534 100644 --- a/internal/service/sfn/service_endpoints_gen_test.go +++ b/internal/service/sfn/service_endpoints_gen_test.go @@ -4,17 +4,22 @@ package sfn_test import ( "context" + "errors" "fmt" "maps" + "net" "net/url" "os" "path/filepath" + "reflect" "strings" "testing" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/endpoints" - sfn_sdkv1 "github.com/aws/aws-sdk-go/service/sfn" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + sfn_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sfn" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" "github.com/google/go-cmp/cmp" "github.com/hashicorp/aws-sdk-go-base/v2/servicemocks" "github.com/hashicorp/go-cty/cty" @@ -88,7 +93,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -271,7 +276,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -292,55 +297,64 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultEndpoint(region string) (url.URL, error) { + r := sfn_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(sfn_sdkv1.EndpointsID, region) + ep, err := r.ResolveEndpoint(context.Background(), sfn_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { - r := endpoints.DefaultResolver() +func defaultFIPSEndpoint(region string) (url.URL, error) { + r := sfn_sdkv2.NewDefaultEndpointResolverV2() - ep, err := r.EndpointFor(sfn_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { - opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled + ep, err := r.ResolveEndpoint(context.Background(), sfn_sdkv2.EndpointParameters{ + Region: aws_sdkv2.String(region), + UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } - url, _ := url.Parse(ep.URL) - - if url.Path == "" { - url.Path = "/" + if ep.URI.Path == "" { + ep.URI.Path = "/" } - return url.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { t.Helper() - client := meta.SFNConn(ctx) - - req, _ := client.ListActivitiesRequest(&sfn_sdkv1.ListActivitiesInput{}) + client := meta.SFNClient(ctx) - req.HTTPRequest.URL.Path = "/" + var result apiCallParams - return apiCallParams{ - endpoint: req.HTTPRequest.URL.String(), - region: aws_sdkv1.StringValue(client.Config.Region), + _, err := client.ListActivities(ctx, &sfn_sdkv2.ListActivitiesInput{}, + func(opts *sfn_sdkv2.Options) { + opts.APIOptions = append(opts.APIOptions, + addRetrieveEndpointURLMiddleware(t, &result.endpoint), + addRetrieveRegionMiddleware(&result.region), + addCancelRequestMiddleware(), + ) + }, + ) + if err == nil { + t.Fatal("Expected an error, got none") + } else if !errors.Is(err, errCancelOperation) { + t.Fatalf("Unexpected error: %s", err) } + + return result } func withNoConfig(_ *caseSetup) { @@ -396,16 +410,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } @@ -523,6 +559,89 @@ func testEndpointCase(t *testing.T, region string, testcase endpointTestCase, ca } } +func addRetrieveEndpointURLMiddleware(t *testing.T, endpoint *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + retrieveEndpointURLMiddleware(t, endpoint), + middleware.After, + ) + } +} + +func retrieveEndpointURLMiddleware(t *testing.T, endpoint *string) middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Retrieve Endpoint", + func(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + t.Helper() + + request, ok := in.Request.(*smithyhttp.Request) + if !ok { + t.Fatalf("Expected *github.com/aws/smithy-go/transport/http.Request, got %s", fullTypeName(in.Request)) + } + + url := request.URL + url.RawQuery = "" + url.Path = "/" + + *endpoint = url.String() + + return next.HandleFinalize(ctx, in) + }) +} + +func addRetrieveRegionMiddleware(region *string) func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Serialize.Add( + retrieveRegionMiddleware(region), + middleware.After, + ) + } +} + +func retrieveRegionMiddleware(region *string) middleware.SerializeMiddleware { + return middleware.SerializeMiddlewareFunc( + "Test: Retrieve Region", + func(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (middleware.SerializeOutput, middleware.Metadata, error) { + *region = awsmiddleware.GetRegion(ctx) + + return next.HandleSerialize(ctx, in) + }, + ) +} + +var errCancelOperation = fmt.Errorf("Test: Canceling request") + +func addCancelRequestMiddleware() func(*middleware.Stack) error { + return func(stack *middleware.Stack) error { + return stack.Finalize.Add( + cancelRequestMiddleware(), + middleware.After, + ) + } +} + +// cancelRequestMiddleware creates a Smithy middleware that intercepts the request before sending and cancels it +func cancelRequestMiddleware() middleware.FinalizeMiddleware { + return middleware.FinalizeMiddlewareFunc( + "Test: Cancel Requests", + func(_ context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (middleware.FinalizeOutput, middleware.Metadata, error) { + return middleware.FinalizeOutput{}, middleware.Metadata{}, errCancelOperation + }) +} + +func fullTypeName(i interface{}) string { + return fullValueTypeName(reflect.ValueOf(i)) +} + +func fullValueTypeName(v reflect.Value) string { + if v.Kind() == reflect.Ptr { + return "*" + fullValueTypeName(reflect.Indirect(v)) + } + + requestType := v.Type() + return fmt.Sprintf("%s.%s", requestType.PkgPath(), requestType.Name()) +} + func generateSharedConfigFile(config configFile) string { var buf strings.Builder diff --git a/internal/service/sfn/service_package_gen.go b/internal/service/sfn/service_package_gen.go index 894c7ea6ac7..1234eb55fc1 100644 --- a/internal/service/sfn/service_package_gen.go +++ b/internal/service/sfn/service_package_gen.go @@ -1,15 +1,12 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package sfn import ( "context" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - sfn_sdkv1 "github.com/aws/aws-sdk-go/service/sfn" - "github.com/hashicorp/terraform-plugin-log/tflog" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + sfn_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sfn" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -28,20 +25,24 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePackageSDKDataSource { return []*types.ServicePackageSDKDataSource{ { - Factory: DataSourceActivity, + Factory: dataSourceActivity, TypeName: "aws_sfn_activity", + Name: "Activity", }, { - Factory: DataSourceAlias, + Factory: dataSourceAlias, TypeName: "aws_sfn_alias", + Name: "Alias", }, { - Factory: DataSourceStateMachine, + Factory: dataSourceStateMachine, TypeName: "aws_sfn_state_machine", + Name: "State Machine", }, { - Factory: DataSourceStateMachineVersions, + Factory: dataSourceStateMachineVersions, TypeName: "aws_sfn_state_machine_versions", + Name: "State Machine Versions", }, } } @@ -49,7 +50,7 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePackageSDKResource { return []*types.ServicePackageSDKResource{ { - Factory: ResourceActivity, + Factory: resourceActivity, TypeName: "aws_sfn_activity", Name: "Activity", Tags: &types.ServicePackageResourceTags{ @@ -57,11 +58,12 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka }, }, { - Factory: ResourceAlias, + Factory: resourceAlias, TypeName: "aws_sfn_alias", + Name: "Alias", }, { - Factory: ResourceStateMachine, + Factory: resourceStateMachine, TypeName: "aws_sfn_state_machine", Name: "State Machine", Tags: &types.ServicePackageResourceTags{ @@ -75,25 +77,14 @@ func (p *servicePackage) ServicePackageName() string { return names.SFN } -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*sfn_sdkv1.SFN, error) { - sess := config[names.AttrSession].(*session_sdkv1.Session) - - cfg := aws_sdkv1.Config{} - - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } - } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*sfn_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return sfn_sdkv1.New(sess.Copy(&cfg)), nil + return sfn_sdkv2.NewFromConfig(cfg, + sfn_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/sfn/state_machine.go b/internal/service/sfn/state_machine.go index f7c5e874f03..f7ce31dae14 100644 --- a/internal/service/sfn/state_machine.go +++ b/internal/service/sfn/state_machine.go @@ -10,9 +10,9 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" + awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -20,6 +20,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" @@ -29,7 +31,7 @@ import ( // @SDKResource("aws_sfn_state_machine", name="State Machine") // @Tags(identifierAttribute="id") -func ResourceStateMachine() *schema.Resource { +func resourceStateMachine() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceStateMachineCreate, ReadWithoutTimeout: resourceStateMachineRead, @@ -76,9 +78,9 @@ func ResourceStateMachine() *schema.Resource { Optional: true, }, "level": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(sfn.LogLevel_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[awstypes.LogLevel](), }, "log_destination": { Type: schema.TypeString, @@ -150,11 +152,11 @@ func ResourceStateMachine() *schema.Resource { DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, }, names.AttrType: { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - Default: sfn.StateMachineTypeStandard, - ValidateFunc: validation.StringInSlice(sfn.StateMachineType_Values(), false), + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: awstypes.StateMachineTypeStandard, + ValidateDiagFunc: enum.Validate[awstypes.StateMachineType](), }, "version_description": { Type: schema.TypeString, @@ -168,16 +170,16 @@ func ResourceStateMachine() *schema.Resource { func resourceStateMachineCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) name := create.Name(d.Get(names.AttrName).(string), d.Get(names.AttrNamePrefix).(string)) input := &sfn.CreateStateMachineInput{ Definition: aws.String(d.Get("definition").(string)), Name: aws.String(name), - Publish: aws.Bool(d.Get("publish").(bool)), + Publish: d.Get("publish").(bool), RoleArn: aws.String(d.Get(names.AttrRoleARN).(string)), Tags: getTagsIn(ctx), - Type: aws.String(d.Get(names.AttrType).(string)), + Type: awstypes.StateMachineType(d.Get(names.AttrType).(string)), } if v, ok := d.GetOk(names.AttrLoggingConfiguration); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -193,14 +195,14 @@ func resourceStateMachineCreate(ctx context.Context, d *schema.ResourceData, met // when creating the step function. This can happen when we are // updating the resource (since there is no update API call). outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.CreateStateMachineWithContext(ctx, input) - }, sfn.ErrCodeStateMachineDeleting, "AccessDeniedException") + return conn.CreateStateMachine(ctx, input) + }, "StateMachineDeleting", "AccessDeniedException") if err != nil { return sdkdiag.AppendErrorf(diags, "creating Step Functions State Machine (%s): %s", name, err) } - arn := aws.StringValue(outputRaw.(*sfn.CreateStateMachineOutput).StateMachineArn) + arn := aws.ToString(outputRaw.(*sfn.CreateStateMachineOutput).StateMachineArn) d.SetId(arn) return append(diags, resourceStateMachineRead(ctx, d, meta)...) @@ -208,9 +210,9 @@ func resourceStateMachineCreate(ctx context.Context, d *schema.ResourceData, met func resourceStateMachineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) - output, err := FindStateMachineByARN(ctx, conn, d.Id()) + output, err := findStateMachineByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Step Functions State Machine (%s) not found, removing from state", d.Id()) @@ -224,7 +226,7 @@ func resourceStateMachineRead(ctx context.Context, d *schema.ResourceData, meta d.Set(names.AttrARN, output.StateMachineArn) if output.CreationDate != nil { - d.Set(names.AttrCreationDate, aws.TimeValue(output.CreationDate).Format(time.RFC3339)) + d.Set(names.AttrCreationDate, aws.ToTime(output.CreationDate).Format(time.RFC3339)) } else { d.Set(names.AttrCreationDate, nil) } @@ -238,7 +240,7 @@ func resourceStateMachineRead(ctx context.Context, d *schema.ResourceData, meta d.Set(names.AttrLoggingConfiguration, nil) } d.Set(names.AttrName, output.Name) - d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.StringValue(output.Name))) + d.Set(names.AttrNamePrefix, create.NamePrefixFromName(aws.ToString(output.Name))) d.Set("publish", d.Get("publish").(bool)) d.Set(names.AttrRoleARN, output.RoleArn) d.Set("revision_id", output.RevisionId) @@ -255,7 +257,7 @@ func resourceStateMachineRead(ctx context.Context, d *schema.ResourceData, meta input := &sfn.ListStateMachineVersionsInput{ StateMachineArn: aws.String(d.Id()), } - listVersionsOutput, err := conn.ListStateMachineVersionsWithContext(ctx, input) + listVersionsOutput, err := conn.ListStateMachineVersions(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "listing Step Functions State Machine (%s) Versions: %s", d.Id(), err) @@ -274,7 +276,7 @@ func resourceStateMachineRead(ctx context.Context, d *schema.ResourceData, meta func resourceStateMachineUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { // "You must include at least one of definition or roleArn or you will receive a MissingRequiredParameter error" @@ -282,7 +284,7 @@ func resourceStateMachineUpdate(ctx context.Context, d *schema.ResourceData, met Definition: aws.String(d.Get("definition").(string)), RoleArn: aws.String(d.Get(names.AttrRoleARN).(string)), StateMachineArn: aws.String(d.Id()), - Publish: aws.Bool(d.Get("publish").(bool)), + Publish: d.Get("publish").(bool), } if v, ok := d.GetOk("publish"); ok && v == true { @@ -301,7 +303,7 @@ func resourceStateMachineUpdate(ctx context.Context, d *schema.ResourceData, met } } - _, err := conn.UpdateStateMachineWithContext(ctx, input) + _, err := conn.UpdateStateMachine(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating Step Functions State Machine (%s): %s", d.Id(), err) @@ -309,18 +311,18 @@ func resourceStateMachineUpdate(ctx context.Context, d *schema.ResourceData, met // Handle eventual consistency after update. err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError { // nosemgrep:ci.helper-schema-retry-RetryContext-without-TimeoutError-check - output, err := FindStateMachineByARN(ctx, conn, d.Id()) + output, err := findStateMachineByARN(ctx, conn, d.Id()) if err != nil { return retry.NonRetryableError(err) } - if d.HasChange("definition") && !verify.JSONBytesEqual([]byte(aws.StringValue(output.Definition)), []byte(d.Get("definition").(string))) || - d.HasChange(names.AttrRoleARN) && aws.StringValue(output.RoleArn) != d.Get(names.AttrRoleARN).(string) || + if d.HasChange("definition") && !verify.JSONBytesEqual([]byte(aws.ToString(output.Definition)), []byte(d.Get("definition").(string))) || + d.HasChange(names.AttrRoleARN) && aws.ToString(output.RoleArn) != d.Get(names.AttrRoleARN).(string) || //d.HasChange("publish") && aws.Bool(output.Publish) != d.Get("publish").(bool) || - d.HasChange("tracing_configuration.0.enabled") && output.TracingConfiguration != nil && aws.BoolValue(output.TracingConfiguration.Enabled) != d.Get("tracing_configuration.0.enabled").(bool) || - d.HasChange("logging_configuration.0.include_execution_data") && output.LoggingConfiguration != nil && aws.BoolValue(output.LoggingConfiguration.IncludeExecutionData) != d.Get("logging_configuration.0.include_execution_data").(bool) || - d.HasChange("logging_configuration.0.level") && output.LoggingConfiguration != nil && aws.StringValue(output.LoggingConfiguration.Level) != d.Get("logging_configuration.0.level").(string) { + d.HasChange("tracing_configuration.0.enabled") && output.TracingConfiguration != nil && output.TracingConfiguration.Enabled != d.Get("tracing_configuration.0.enabled").(bool) || + d.HasChange("logging_configuration.0.include_execution_data") && output.LoggingConfiguration != nil && output.LoggingConfiguration.IncludeExecutionData != d.Get("logging_configuration.0.include_execution_data").(bool) || + d.HasChange("logging_configuration.0.level") && output.LoggingConfiguration != nil && string(output.LoggingConfiguration.Level) != d.Get("logging_configuration.0.level").(string) { return retry.RetryableError(fmt.Errorf("Step Functions State Machine (%s) eventual consistency", d.Id())) } @@ -337,10 +339,10 @@ func resourceStateMachineUpdate(ctx context.Context, d *schema.ResourceData, met func resourceStateMachineDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) log.Printf("[DEBUG] Deleting Step Functions State Machine: %s", d.Id()) - _, err := conn.DeleteStateMachineWithContext(ctx, &sfn.DeleteStateMachineInput{ + _, err := conn.DeleteStateMachine(ctx, &sfn.DeleteStateMachineInput{ StateMachineArn: aws.String(d.Id()), }) @@ -355,14 +357,14 @@ func resourceStateMachineDelete(ctx context.Context, d *schema.ResourceData, met return diags } -func FindStateMachineByARN(ctx context.Context, conn *sfn.SFN, arn string) (*sfn.DescribeStateMachineOutput, error) { +func findStateMachineByARN(ctx context.Context, conn *sfn.Client, arn string) (*sfn.DescribeStateMachineOutput, error) { input := &sfn.DescribeStateMachineInput{ StateMachineArn: aws.String(arn), } - output, err := conn.DescribeStateMachineWithContext(ctx, input) + output, err := conn.DescribeStateMachine(ctx, input) - if tfawserr.ErrCodeEquals(err, sfn.ErrCodeStateMachineDoesNotExist) { + if errs.IsA[*awstypes.StateMachineDoesNotExist](err) { return nil, &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -380,9 +382,9 @@ func FindStateMachineByARN(ctx context.Context, conn *sfn.SFN, arn string) (*sfn return output, nil } -func statusStateMachine(ctx context.Context, conn *sfn.SFN, stateMachineArn string) retry.StateRefreshFunc { +func statusStateMachine(ctx context.Context, conn *sfn.Client, stateMachineArn string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - output, err := FindStateMachineByARN(ctx, conn, stateMachineArn) + output, err := findStateMachineByARN(ctx, conn, stateMachineArn) if tfresource.NotFound(err) { return nil, "", nil @@ -392,13 +394,13 @@ func statusStateMachine(ctx context.Context, conn *sfn.SFN, stateMachineArn stri return nil, "", err } - return output, aws.StringValue(output.Status), nil + return output, string(output.Status), nil } } -func waitStateMachineDeleted(ctx context.Context, conn *sfn.SFN, stateMachineArn string, timeout time.Duration) (*sfn.DescribeStateMachineOutput, error) { +func waitStateMachineDeleted(ctx context.Context, conn *sfn.Client, stateMachineArn string, timeout time.Duration) (*sfn.DescribeStateMachineOutput, error) { stateConf := &retry.StateChangeConf{ - Pending: []string{sfn.StateMachineStatusActive, sfn.StateMachineStatusDeleting}, + Pending: enum.Slice(awstypes.StateMachineStatusActive, awstypes.StateMachineStatusDeleting), Target: []string{}, Refresh: statusStateMachine(ctx, conn, stateMachineArn), Timeout: timeout, @@ -413,24 +415,24 @@ func waitStateMachineDeleted(ctx context.Context, conn *sfn.SFN, stateMachineArn return nil, err } -func expandLoggingConfiguration(tfMap map[string]interface{}) *sfn.LoggingConfiguration { +func expandLoggingConfiguration(tfMap map[string]interface{}) *awstypes.LoggingConfiguration { if tfMap == nil { return nil } - apiObject := &sfn.LoggingConfiguration{} + apiObject := &awstypes.LoggingConfiguration{} if v, ok := tfMap["include_execution_data"].(bool); ok { - apiObject.IncludeExecutionData = aws.Bool(v) + apiObject.IncludeExecutionData = v } if v, ok := tfMap["level"].(string); ok && v != "" { - apiObject.Level = aws.String(v) + apiObject.Level = awstypes.LogLevel(v) } if v, ok := tfMap["log_destination"].(string); ok && v != "" { - apiObject.Destinations = []*sfn.LogDestination{{ - CloudWatchLogsLogGroup: &sfn.CloudWatchLogsLogGroup{ + apiObject.Destinations = []awstypes.LogDestination{{ + CloudWatchLogsLogGroup: &awstypes.CloudWatchLogsLogGroup{ LogGroupArn: aws.String(v), }, }} @@ -439,51 +441,44 @@ func expandLoggingConfiguration(tfMap map[string]interface{}) *sfn.LoggingConfig return apiObject } -func flattenLoggingConfiguration(apiObject *sfn.LoggingConfiguration) map[string]interface{} { +func flattenLoggingConfiguration(apiObject *awstypes.LoggingConfiguration) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.IncludeExecutionData; v != nil { - tfMap["include_execution_data"] = aws.BoolValue(v) - } - - if v := apiObject.Level; v != nil { - tfMap["level"] = aws.StringValue(v) + tfMap := map[string]interface{}{ + "include_execution_data": apiObject.IncludeExecutionData, + "level": apiObject.Level, } if v := apiObject.Destinations; len(v) > 0 { - tfMap["log_destination"] = aws.StringValue(v[0].CloudWatchLogsLogGroup.LogGroupArn) + tfMap["log_destination"] = aws.ToString(v[0].CloudWatchLogsLogGroup.LogGroupArn) } return tfMap } -func expandTracingConfiguration(tfMap map[string]interface{}) *sfn.TracingConfiguration { +func expandTracingConfiguration(tfMap map[string]interface{}) *awstypes.TracingConfiguration { if tfMap == nil { return nil } - apiObject := &sfn.TracingConfiguration{} + apiObject := &awstypes.TracingConfiguration{} if v, ok := tfMap[names.AttrEnabled].(bool); ok { - apiObject.Enabled = aws.Bool(v) + apiObject.Enabled = v } return apiObject } -func flattenTracingConfiguration(apiObject *sfn.TracingConfiguration) map[string]interface{} { +func flattenTracingConfiguration(apiObject *awstypes.TracingConfiguration) map[string]interface{} { if apiObject == nil { return nil } - tfMap := map[string]interface{}{} - - if v := apiObject.Enabled; v != nil { - tfMap[names.AttrEnabled] = aws.BoolValue(v) + tfMap := map[string]interface{}{ + names.AttrEnabled: apiObject.Enabled, } return tfMap diff --git a/internal/service/sfn/state_machine_data_source.go b/internal/service/sfn/state_machine_data_source.go index 62fa4ef3498..b59eb93f62a 100644 --- a/internal/service/sfn/state_machine_data_source.go +++ b/internal/service/sfn/state_machine_data_source.go @@ -7,17 +7,20 @@ import ( "context" "time" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" + awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKDataSource("aws_sfn_state_machine") -func DataSourceStateMachine() *schema.Resource { +// @SDKDataSource("aws_sfn_state_machine", name="State Machine") +func dataSourceStateMachine() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceStateMachineRead, @@ -60,51 +63,64 @@ func DataSourceStateMachine() *schema.Resource { func dataSourceStateMachineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) name := d.Get(names.AttrName).(string) - var arns []string - - err := conn.ListStateMachinesPagesWithContext(ctx, &sfn.ListStateMachinesInput{}, func(page *sfn.ListStateMachinesOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, v := range page.StateMachines { - if aws.StringValue(v.Name) == name { - arns = append(arns, aws.StringValue(v.StateMachineArn)) - } - } - - return !lastPage - }) + output, err := findStateByARN(ctx, conn, name) if err != nil { return sdkdiag.AppendErrorf(diags, "listing Step Functions State Machines: %s", err) } - if n := len(arns); n == 0 { + if n := len(output); n == 0 { return sdkdiag.AppendErrorf(diags, "no Step Functions State Machines matched") } else if n > 1 { return sdkdiag.AppendErrorf(diags, "%d Step Functions State Machines matched; use additional constraints to reduce matches to a single State Machine", n) } - arn := arns[0] - output, err := FindStateMachineByARN(ctx, conn, arn) + out, err := findStateMachineByARN(ctx, conn, aws.ToString(output[0].StateMachineArn)) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading Step Functions State Machine (%s): %s", arn, err) + return sdkdiag.AppendErrorf(diags, "reading Step Functions State Machine (%s): %s", aws.ToString(output[0].StateMachineArn), err) } - d.SetId(arn) - d.Set(names.AttrARN, output.StateMachineArn) - d.Set(names.AttrCreationDate, output.CreationDate.Format(time.RFC3339)) - d.Set(names.AttrDescription, output.Description) - d.Set("definition", output.Definition) - d.Set(names.AttrName, output.Name) - d.Set(names.AttrRoleARN, output.RoleArn) - d.Set("revision_id", output.RevisionId) - d.Set(names.AttrStatus, output.Status) + d.SetId(aws.ToString(out.StateMachineArn)) + d.Set(names.AttrARN, out.StateMachineArn) + d.Set(names.AttrCreationDate, out.CreationDate.Format(time.RFC3339)) + d.Set(names.AttrDescription, out.Description) + d.Set("definition", out.Definition) + d.Set(names.AttrName, out.Name) + d.Set(names.AttrRoleARN, out.RoleArn) + d.Set("revision_id", out.RevisionId) + d.Set(names.AttrStatus, out.Status) return diags } + +func findStateByARN(ctx context.Context, conn *sfn.Client, name string) ([]awstypes.StateMachineListItem, error) { + var output []awstypes.StateMachineListItem + + pages := sfn.NewListStateMachinesPaginator(conn, &sfn.ListStateMachinesInput{}) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if errs.IsA[*awstypes.StateMachineDoesNotExist](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: name, + } + } + + if err != nil { + return nil, err + } + + for _, v := range page.StateMachines { + if name == aws.ToString(v.Name) { + output = append(output, v) + } + } + } + + return output, nil +} diff --git a/internal/service/sfn/state_machine_test.go b/internal/service/sfn/state_machine_test.go index bad437fc728..44ea5527392 100644 --- a/internal/service/sfn/state_machine_test.go +++ b/internal/service/sfn/state_machine_test.go @@ -10,7 +10,8 @@ import ( "testing" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go-v2/service/sfn" + awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -40,7 +41,7 @@ func TestAccSFNStateMachine_createUpdate(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckExists(ctx, resourceName, &sm), acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "states", fmt.Sprintf("stateMachine:%s", rName)), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, sfn.StateMachineStatusActive), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttr(resourceName, names.AttrNamePrefix, ""), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), @@ -70,7 +71,7 @@ func TestAccSFNStateMachine_createUpdate(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( testAccCheckExists(ctx, resourceName, &sm), acctest.CheckResourceAttrRegionalARN(resourceName, names.AttrARN, "states", fmt.Sprintf("stateMachine:%s", rName)), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, sfn.StateMachineStatusActive), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), resource.TestMatchResourceAttr(resourceName, "definition", regexache.MustCompile(`.*\"MaxAttempts\": 10.*`)), @@ -107,7 +108,7 @@ func TestAccSFNStateMachine_expressUpdate(t *testing.T) { Config: testAccStateMachineConfig_typed(rName, "EXPRESS", 5), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckExists(ctx, resourceName, &sm), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, sfn.StateMachineStatusActive), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), resource.TestCheckResourceAttrSet(resourceName, "definition"), @@ -128,7 +129,7 @@ func TestAccSFNStateMachine_expressUpdate(t *testing.T) { Config: testAccStateMachineConfig_typed(rName, "EXPRESS", 10), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckExists(ctx, resourceName, &sm), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, sfn.StateMachineStatusActive), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), resource.TestMatchResourceAttr(resourceName, "definition", regexache.MustCompile(`.*\"MaxAttempts\": 10.*`)), @@ -163,7 +164,7 @@ func TestAccSFNStateMachine_standardUpdate(t *testing.T) { Config: testAccStateMachineConfig_typed(rName, "STANDARD", 5), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckExists(ctx, resourceName, &sm), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, sfn.StateMachineStatusActive), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), resource.TestCheckResourceAttrSet(resourceName, "definition"), @@ -185,7 +186,7 @@ func TestAccSFNStateMachine_standardUpdate(t *testing.T) { Config: testAccStateMachineConfig_typed(rName, "STANDARD", 10), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckExists(ctx, resourceName, &sm), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, sfn.StateMachineStatusActive), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), resource.TestMatchResourceAttr(resourceName, "definition", regexache.MustCompile(`.*\"MaxAttempts\": 10.*`)), @@ -413,30 +414,30 @@ func TestAccSFNStateMachine_expressLogging(t *testing.T) { CheckDestroy: testAccCheckStateMachineDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccStateMachineConfig_expressLogConfiguration(rName, sfn.LogLevelError), + Config: testAccStateMachineConfig_expressLogConfiguration(rName, string(awstypes.LogLevelError)), Check: resource.ComposeTestCheckFunc( testAccCheckExists(ctx, resourceName, &sm), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, sfn.StateMachineStatusActive), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), resource.TestCheckResourceAttrSet(resourceName, "definition"), resource.TestMatchResourceAttr(resourceName, "definition", regexache.MustCompile(`.*\"MaxAttempts\": 5.*`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrRoleARN), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.level", sfn.LogLevelError), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.level", string(awstypes.LogLevelError)), ), }, { - Config: testAccStateMachineConfig_expressLogConfiguration(rName, sfn.LogLevelAll), + Config: testAccStateMachineConfig_expressLogConfiguration(rName, string(awstypes.LogLevelAll)), Check: resource.ComposeTestCheckFunc( testAccCheckExists(ctx, resourceName, &sm), - resource.TestCheckResourceAttr(resourceName, names.AttrStatus, sfn.StateMachineStatusActive), + resource.TestCheckResourceAttr(resourceName, names.AttrStatus, string(awstypes.StateMachineStatusActive)), resource.TestCheckResourceAttr(resourceName, names.AttrName, rName), resource.TestCheckResourceAttrSet(resourceName, names.AttrCreationDate), resource.TestMatchResourceAttr(resourceName, "definition", regexache.MustCompile(`.*\"MaxAttempts\": 5.*`)), resource.TestCheckResourceAttrSet(resourceName, names.AttrRoleARN), resource.TestCheckResourceAttr(resourceName, "logging_configuration.#", acctest.Ct1), - resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.level", sfn.LogLevelAll), + resource.TestCheckResourceAttr(resourceName, "logging_configuration.0.level", string(awstypes.LogLevelAll)), ), }, }, @@ -450,11 +451,7 @@ func testAccCheckExists(ctx context.Context, n string, v *sfn.DescribeStateMachi return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No Step Functions State Machine ID is set") - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).SFNConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).SFNClient(ctx) output, err := tfsfn.FindStateMachineByARN(ctx, conn, rs.Primary.ID) @@ -470,7 +467,7 @@ func testAccCheckExists(ctx context.Context, n string, v *sfn.DescribeStateMachi func testAccCheckStateMachineDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).SFNConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).SFNClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_sfn_state_machine" { diff --git a/internal/service/sfn/state_machine_versions_data_source.go b/internal/service/sfn/state_machine_versions_data_source.go index babbb6b089a..4386c951ebe 100644 --- a/internal/service/sfn/state_machine_versions_data_source.go +++ b/internal/service/sfn/state_machine_versions_data_source.go @@ -6,8 +6,8 @@ package sfn import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -15,8 +15,8 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/verify" ) -// @SDKDataSource("aws_sfn_state_machine_versions") -func DataSourceStateMachineVersions() *schema.Resource { +// @SDKDataSource("aws_sfn_state_machine_versions", name="State Machine Versions") +func dataSourceStateMachineVersions() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceStateMachineVersionsRead, @@ -37,7 +37,7 @@ func DataSourceStateMachineVersions() *schema.Resource { func dataSourceStateMachineVersionsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SFNConn(ctx) + conn := meta.(*conns.AWSClient).SFNClient(ctx) smARN := d.Get("statemachine_arn").(string) input := &sfn.ListStateMachineVersionsInput{ @@ -51,9 +51,7 @@ func dataSourceStateMachineVersionsRead(ctx context.Context, d *schema.ResourceD } for _, v := range page.StateMachineVersions { - if v != nil { - smvARNs = append(smvARNs, aws.StringValue(v.StateMachineVersionArn)) - } + smvARNs = append(smvARNs, aws.ToString(v.StateMachineVersionArn)) } return !lastPage diff --git a/internal/service/sfn/sweep.go b/internal/service/sfn/sweep.go index 01c846d6b3f..5428d551027 100644 --- a/internal/service/sfn/sweep.go +++ b/internal/service/sfn/sweep.go @@ -7,11 +7,11 @@ import ( "fmt" "log" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/sweep" - "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv1" + "github.com/hashicorp/terraform-provider-aws/internal/sweep/awsv2" ) func RegisterSweepers() { @@ -32,33 +32,30 @@ func sweepActivities(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.SFNConn(ctx) + conn := client.SFNClient(ctx) input := &sfn.ListActivitiesInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListActivitiesPagesWithContext(ctx, input, func(page *sfn.ListActivitiesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := sfn.NewListActivitiesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Step Functions Activity sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Step Functions Activities (%s): %w", region, err) } for _, v := range page.Activities { - r := ResourceActivity() + r := resourceActivity() d := r.Data(nil) - d.SetId(aws.StringValue(v.ActivityArn)) + d.SetId(aws.ToString(v.ActivityArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Step Functions Activity sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing Step Functions Activities (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) @@ -76,33 +73,30 @@ func sweepStateMachines(region string) error { if err != nil { return fmt.Errorf("error getting client: %s", err) } - conn := client.SFNConn(ctx) + conn := client.SFNClient(ctx) input := &sfn.ListStateMachinesInput{} sweepResources := make([]sweep.Sweepable, 0) - err = conn.ListStateMachinesPagesWithContext(ctx, input, func(page *sfn.ListStateMachinesOutput, lastPage bool) bool { - if page == nil { - return !lastPage + pages := sfn.NewListStateMachinesPaginator(conn, input) + for pages.HasMorePages() { + page, err := pages.NextPage(ctx) + + if awsv2.SkipSweepError(err) { + log.Printf("[WARN] Skipping Step Functions State Machine sweep for %s: %s", region, err) + return nil + } + + if err != nil { + return fmt.Errorf("error listing Step Functions State Machines (%s): %w", region, err) } for _, v := range page.StateMachines { - r := ResourceStateMachine() + r := resourceStateMachine() d := r.Data(nil) - d.SetId(aws.StringValue(v.StateMachineArn)) + d.SetId(aws.ToString(v.StateMachineArn)) sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) } - - return !lastPage - }) - - if awsv1.SkipSweepError(err) { - log.Printf("[WARN] Skipping Step Functions State Machine sweep for %s: %s", region, err) - return nil - } - - if err != nil { - return fmt.Errorf("error listing Step Functions State Machines (%s): %w", region, err) } err = sweep.SweepOrchestrator(ctx, sweepResources) diff --git a/internal/service/sfn/tags_gen.go b/internal/service/sfn/tags_gen.go index e05a11c1277..715c9f68cd5 100644 --- a/internal/service/sfn/tags_gen.go +++ b/internal/service/sfn/tags_gen.go @@ -5,9 +5,9 @@ import ( "context" "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sfn" - "github.com/aws/aws-sdk-go/service/sfn/sfniface" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/sfn" + awstypes "github.com/aws/aws-sdk-go-v2/service/sfn/types" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" @@ -19,12 +19,12 @@ import ( // listTags lists sfn service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func listTags(ctx context.Context, conn sfniface.SFNAPI, identifier string) (tftags.KeyValueTags, error) { +func listTags(ctx context.Context, conn *sfn.Client, identifier string, optFns ...func(*sfn.Options)) (tftags.KeyValueTags, error) { input := &sfn.ListTagsForResourceInput{ ResourceArn: aws.String(identifier), } - output, err := conn.ListTagsForResourceWithContext(ctx, input) + output, err := conn.ListTagsForResource(ctx, input, optFns...) if err != nil { return tftags.New(ctx, nil), err @@ -36,7 +36,7 @@ func listTags(ctx context.Context, conn sfniface.SFNAPI, identifier string) (tft // ListTags lists sfn service tags and set them in Context. // It is called from outside this package. func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier string) error { - tags, err := listTags(ctx, meta.(*conns.AWSClient).SFNConn(ctx), identifier) + tags, err := listTags(ctx, meta.(*conns.AWSClient).SFNClient(ctx), identifier) if err != nil { return err @@ -52,11 +52,11 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri // []*SERVICE.Tag handling // Tags returns sfn service tags. -func Tags(tags tftags.KeyValueTags) []*sfn.Tag { - result := make([]*sfn.Tag, 0, len(tags)) +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &sfn.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -68,11 +68,11 @@ func Tags(tags tftags.KeyValueTags) []*sfn.Tag { } // KeyValueTags creates tftags.KeyValueTags from sfn service tags. -func KeyValueTags(ctx context.Context, tags []*sfn.Tag) tftags.KeyValueTags { +func KeyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -80,7 +80,7 @@ func KeyValueTags(ctx context.Context, tags []*sfn.Tag) tftags.KeyValueTags { // getTagsIn returns sfn service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*sfn.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags @@ -91,7 +91,7 @@ func getTagsIn(ctx context.Context) []*sfn.Tag { } // setTagsOut sets sfn service tags in Context. -func setTagsOut(ctx context.Context, tags []*sfn.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } @@ -100,7 +100,7 @@ func setTagsOut(ctx context.Context, tags []*sfn.Tag) { // updateTags updates sfn service tags. // The identifier is typically the Amazon Resource Name (ARN), although // it may also be a different identifier depending on the service. -func updateTags(ctx context.Context, conn sfniface.SFNAPI, identifier string, oldTagsMap, newTagsMap any) error { +func updateTags(ctx context.Context, conn *sfn.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*sfn.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -111,10 +111,10 @@ func updateTags(ctx context.Context, conn sfniface.SFNAPI, identifier string, ol if len(removedTags) > 0 { input := &sfn.UntagResourceInput{ ResourceArn: aws.String(identifier), - TagKeys: aws.StringSlice(removedTags.Keys()), + TagKeys: removedTags.Keys(), } - _, err := conn.UntagResourceWithContext(ctx, input) + _, err := conn.UntagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("untagging resource (%s): %w", identifier, err) @@ -129,7 +129,7 @@ func updateTags(ctx context.Context, conn sfniface.SFNAPI, identifier string, ol Tags: Tags(updatedTags), } - _, err := conn.TagResourceWithContext(ctx, input) + _, err := conn.TagResource(ctx, input, optFns...) if err != nil { return fmt.Errorf("tagging resource (%s): %w", identifier, err) @@ -142,5 +142,5 @@ func updateTags(ctx context.Context, conn sfniface.SFNAPI, identifier string, ol // UpdateTags updates sfn service tags. // It is called from outside this package. func (p *servicePackage) UpdateTags(ctx context.Context, meta any, identifier string, oldTags, newTags any) error { - return updateTags(ctx, meta.(*conns.AWSClient).SFNConn(ctx), identifier, oldTags, newTags) + return updateTags(ctx, meta.(*conns.AWSClient).SFNClient(ctx), identifier, oldTags, newTags) } diff --git a/internal/service/shield/service_endpoint_resolver_gen.go b/internal/service/shield/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..c5b364751a9 --- /dev/null +++ b/internal/service/shield/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package shield + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + shield_sdkv2 "github.com/aws/aws-sdk-go-v2/service/shield" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ shield_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver shield_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: shield_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params shield_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up shield endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*shield_sdkv2.Options) { + return func(o *shield_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/shield/service_endpoints_gen_test.go b/internal/service/shield/service_endpoints_gen_test.go index 9a6a0d0937f..b3b2acfbecd 100644 --- a/internal/service/shield/service_endpoints_gen_test.go +++ b/internal/service/shield/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := shield_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), shield_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := shield_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), shield_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/shield/service_package.go b/internal/service/shield/service_package.go index a4f0521be0f..675d3e9e449 100644 --- a/internal/service/shield/service_package.go +++ b/internal/service/shield/service_package.go @@ -16,22 +16,20 @@ import ( func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*shield.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - // Force "global" services to correct Regions. - if config["partition"].(string) == names.StandardPartitionID { - cfg.Region = names.USEast1RegionID - } - - return shield.NewFromConfig(cfg, func(o *shield.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateDisabled + return shield.NewFromConfig(cfg, + shield.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + func(o *shield.Options) { + // Force "global" services to correct Regions. + if config["partition"].(string) == names.StandardPartitionID { + if cfg.Region != names.USEast1RegionID { + tflog.Info(ctx, "overriding region", map[string]any{ + "original_region": cfg.Region, + "override_region": names.USEast1RegionID, + }) + o.Region = names.USEast1RegionID + } } - } - }), nil + }, + ), nil } diff --git a/internal/service/shield/service_package_gen.go b/internal/service/shield/service_package_gen.go index 5ff95a39e3c..b68127ffff0 100644 --- a/internal/service/shield/service_package_gen.go +++ b/internal/service/shield/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package shield diff --git a/internal/service/signer/service_endpoint_resolver_gen.go b/internal/service/signer/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..298c6e9901e --- /dev/null +++ b/internal/service/signer/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package signer + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + signer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/signer" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ signer_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver signer_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: signer_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params signer_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up signer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*signer_sdkv2.Options) { + return func(o *signer_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/signer/service_endpoints_gen_test.go b/internal/service/signer/service_endpoints_gen_test.go index 8877c145270..4b785ccd0fc 100644 --- a/internal/service/signer/service_endpoints_gen_test.go +++ b/internal/service/signer/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := signer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), signer_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := signer_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), signer_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/signer/service_package_gen.go b/internal/service/signer/service_package_gen.go index 7574543d6a9..b5629cb5327 100644 --- a/internal/service/signer/service_package_gen.go +++ b/internal/service/signer/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package signer @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" signer_sdkv2 "github.com/aws/aws-sdk-go-v2/service/signer" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -65,19 +64,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*signer_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return signer_sdkv2.NewFromConfig(cfg, func(o *signer_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return signer_sdkv2.NewFromConfig(cfg, + signer_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/simpledb/service_endpoint_resolver_gen.go b/internal/service/simpledb/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..a046e07c6c9 --- /dev/null +++ b/internal/service/simpledb/service_endpoint_resolver_gen.go @@ -0,0 +1,76 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package simpledb + +import ( + "context" + "fmt" + "net" + "net/url" + + endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ endpoints_sdkv1.Resolver = resolverSDKv1{} + +type resolverSDKv1 struct { + ctx context.Context +} + +func newEndpointResolverSDKv1(ctx context.Context) resolverSDKv1 { + return resolverSDKv1{ + ctx: ctx, + } +} + +func (r resolverSDKv1) EndpointFor(service, region string, opts ...func(*endpoints_sdkv1.Options)) (endpoint endpoints_sdkv1.ResolvedEndpoint, err error) { + ctx := r.ctx + + var opt endpoints_sdkv1.Options + opt.Set(opts...) + + useFIPS := opt.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled + + defaultResolver := endpoints_sdkv1.DefaultResolver() + + if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = defaultResolver.EndpointFor(service, region, opts...) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URL, + }) + + var endpointURL *url.URL + endpointURL, err = url.Parse(endpoint.URL) + if err != nil { + return endpoint, err + } + + hostname := endpointURL.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + opts = append(opts, func(o *endpoints_sdkv1.Options) { + o.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled + }) + } else { + err = fmt.Errorf("looking up accessanalyzer endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return defaultResolver.EndpointFor(service, region, opts...) +} diff --git a/internal/service/simpledb/service_endpoints_gen_test.go b/internal/service/simpledb/service_endpoints_gen_test.go index 65786f2819d..d8083cc2992 100644 --- a/internal/service/simpledb/service_endpoints_gen_test.go +++ b/internal/service/simpledb/service_endpoints_gen_test.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "maps" + "net" "net/url" "os" "path/filepath" @@ -88,7 +89,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -271,7 +272,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -292,12 +293,12 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(simpledb_sdkv1.EndpointsID, region) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -306,17 +307,17 @@ func defaultEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := endpoints.DefaultResolver() ep, err := r.EndpointFor(simpledb_sdkv1.EndpointsID, region, func(opt *endpoints.Options) { opt.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled }) if err != nil { - return err.Error() + return url.URL{}, err } url, _ := url.Parse(ep.URL) @@ -325,7 +326,7 @@ func defaultFIPSEndpoint(region string) string { url.Path = "/" } - return url.String() + return *url, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -396,16 +397,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/simpledb/service_package_gen.go b/internal/service/simpledb/service_package_gen.go index d3c1ecf5893..2f25ee7b39f 100644 --- a/internal/service/simpledb/service_package_gen.go +++ b/internal/service/simpledb/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package simpledb @@ -6,7 +6,6 @@ import ( "context" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" simpledb_sdkv1 "github.com/aws/aws-sdk-go/service/simpledb" "github.com/hashicorp/terraform-plugin-log/tflog" @@ -52,11 +51,8 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*s "tf_aws.endpoint": endpoint, }) cfg.Endpoint = aws_sdkv1.String(endpoint) - - if sess.Config.UseFIPSEndpoint == endpoints_sdkv1.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - cfg.UseFIPSEndpoint = endpoints_sdkv1.FIPSEndpointStateDisabled - } + } else { + cfg.EndpointResolver = newEndpointResolverSDKv1(ctx) } return simpledb_sdkv1.New(sess.Copy(&cfg)), nil diff --git a/internal/service/sns/service_endpoint_resolver_gen.go b/internal/service/sns/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..2d7084ab7b6 --- /dev/null +++ b/internal/service/sns/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package sns + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + sns_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sns" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ sns_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver sns_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: sns_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params sns_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up sns endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*sns_sdkv2.Options) { + return func(o *sns_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/sns/service_endpoints_gen_test.go b/internal/service/sns/service_endpoints_gen_test.go index c0a08d95c41..cc432e099d6 100644 --- a/internal/service/sns/service_endpoints_gen_test.go +++ b/internal/service/sns/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := sns_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), sns_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := sns_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), sns_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/sns/service_package_gen.go b/internal/service/sns/service_package_gen.go index 8b643de6661..732d3b2a793 100644 --- a/internal/service/sns/service_package_gen.go +++ b/internal/service/sns/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package sns @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" sns_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sns" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -73,19 +72,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*sns_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return sns_sdkv2.NewFromConfig(cfg, func(o *sns_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return sns_sdkv2.NewFromConfig(cfg, + sns_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/sns/tags_gen.go b/internal/service/sns/tags_gen.go index c8e34e4324a..ec243c1bc76 100644 --- a/internal/service/sns/tags_gen.go +++ b/internal/service/sns/tags_gen.go @@ -98,12 +98,12 @@ func setTagsOut(ctx context.Context, tags []awstypes.Tag) { } // createTags creates sns service tags for new resources. -func createTags(ctx context.Context, conn *sns.Client, identifier string, tags []awstypes.Tag) error { +func createTags(ctx context.Context, conn *sns.Client, identifier string, tags []awstypes.Tag, optFns ...func(*sns.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates sns service tags. diff --git a/internal/service/sqs/service_endpoint_resolver_gen.go b/internal/service/sqs/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..cbbbf1ebfc7 --- /dev/null +++ b/internal/service/sqs/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package sqs + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + sqs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sqs" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ sqs_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver sqs_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: sqs_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params sqs_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up sqs endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*sqs_sdkv2.Options) { + return func(o *sqs_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/sqs/service_endpoints_gen_test.go b/internal/service/sqs/service_endpoints_gen_test.go index 540b319ec6f..f10a5421d62 100644 --- a/internal/service/sqs/service_endpoints_gen_test.go +++ b/internal/service/sqs/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := sqs_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), sqs_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := sqs_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), sqs_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/sqs/service_package_gen.go b/internal/service/sqs/service_package_gen.go index d9af2ca4333..66485af9376 100644 --- a/internal/service/sqs/service_package_gen.go +++ b/internal/service/sqs/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package sqs @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" sqs_sdkv2 "github.com/aws/aws-sdk-go-v2/service/sqs" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -72,19 +71,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*sqs_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return sqs_sdkv2.NewFromConfig(cfg, func(o *sqs_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return sqs_sdkv2.NewFromConfig(cfg, + sqs_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/sqs/tags_gen.go b/internal/service/sqs/tags_gen.go index 7545ba066bc..af47329dd48 100644 --- a/internal/service/sqs/tags_gen.go +++ b/internal/service/sqs/tags_gen.go @@ -80,12 +80,12 @@ func setTagsOut(ctx context.Context, tags map[string]string) { } // createTags creates sqs service tags for new resources. -func createTags(ctx context.Context, conn *sqs.Client, identifier string, tags map[string]string) error { +func createTags(ctx context.Context, conn *sqs.Client, identifier string, tags map[string]string, optFns ...func(*sqs.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, nil, tags) + return updateTags(ctx, conn, identifier, nil, tags, optFns...) } // updateTags updates sqs service tags. diff --git a/internal/service/ssm/association.go b/internal/service/ssm/association.go index 684ea2ef4bd..7ab40237eca 100644 --- a/internal/service/ssm/association.go +++ b/internal/service/ssm/association.go @@ -24,11 +24,14 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tfmaps "github.com/hashicorp/terraform-provider-aws/internal/maps" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKResource("aws_ssm_association", name="Association") +// @Tags(identifierAttribute="id", resourceType="Association") func resourceAssociation() *schema.Resource { //lintignore:R011 return &schema.Resource{ @@ -143,6 +146,8 @@ func resourceAssociation() *schema.Resource { Optional: true, ValidateDiagFunc: enum.Validate[awstypes.AssociationSyncCompliance](), }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), "targets": { Type: schema.TypeList, Optional: true, @@ -169,6 +174,8 @@ func resourceAssociation() *schema.Resource { Optional: true, }, }, + + CustomizeDiff: verify.SetTagsDiff, } } @@ -179,6 +186,7 @@ func resourceAssociationCreate(ctx context.Context, d *schema.ResourceData, meta name := d.Get(names.AttrName).(string) input := &ssm.CreateAssociationInput{ Name: aws.String(name), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("apply_only_at_cron_interval"); ok { @@ -304,63 +312,65 @@ func resourceAssociationUpdate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).SSMClient(ctx) - // AWS creates a new version every time the association is updated, so everything should be passed in the update. - input := &ssm.UpdateAssociationInput{ - AssociationId: aws.String(d.Id()), - } + if d.HasChangesExcept(names.AttrTags, names.AttrTagsAll) { + // AWS creates a new version every time the association is updated, so everything should be passed in the update. + input := &ssm.UpdateAssociationInput{ + AssociationId: aws.String(d.Id()), + } - if v, ok := d.GetOk("apply_only_at_cron_interval"); ok { - input.ApplyOnlyAtCronInterval = v.(bool) - } + if v, ok := d.GetOk("apply_only_at_cron_interval"); ok { + input.ApplyOnlyAtCronInterval = v.(bool) + } - if v, ok := d.GetOk("association_name"); ok { - input.AssociationName = aws.String(v.(string)) - } + if v, ok := d.GetOk("association_name"); ok { + input.AssociationName = aws.String(v.(string)) + } - if v, ok := d.GetOk("automation_target_parameter_name"); ok { - input.AutomationTargetParameterName = aws.String(v.(string)) - } + if v, ok := d.GetOk("automation_target_parameter_name"); ok { + input.AutomationTargetParameterName = aws.String(v.(string)) + } - if v, ok := d.GetOk("compliance_severity"); ok { - input.ComplianceSeverity = awstypes.AssociationComplianceSeverity(v.(string)) - } + if v, ok := d.GetOk("compliance_severity"); ok { + input.ComplianceSeverity = awstypes.AssociationComplianceSeverity(v.(string)) + } - if v, ok := d.GetOk("document_version"); ok { - input.DocumentVersion = aws.String(v.(string)) - } + if v, ok := d.GetOk("document_version"); ok { + input.DocumentVersion = aws.String(v.(string)) + } - if v, ok := d.GetOk("max_concurrency"); ok { - input.MaxConcurrency = aws.String(v.(string)) - } + if v, ok := d.GetOk("max_concurrency"); ok { + input.MaxConcurrency = aws.String(v.(string)) + } - if v, ok := d.GetOk("max_errors"); ok { - input.MaxErrors = aws.String(v.(string)) - } + if v, ok := d.GetOk("max_errors"); ok { + input.MaxErrors = aws.String(v.(string)) + } - if v, ok := d.GetOk("output_location"); ok { - input.OutputLocation = expandAssociationOutputLocation(v.([]interface{})) - } + if v, ok := d.GetOk("output_location"); ok { + input.OutputLocation = expandAssociationOutputLocation(v.([]interface{})) + } - if v, ok := d.GetOk(names.AttrParameters); ok { - input.Parameters = expandParameters(v.(map[string]interface{})) - } + if v, ok := d.GetOk(names.AttrParameters); ok { + input.Parameters = expandParameters(v.(map[string]interface{})) + } - if v, ok := d.GetOk(names.AttrScheduleExpression); ok { - input.ScheduleExpression = aws.String(v.(string)) - } + if v, ok := d.GetOk(names.AttrScheduleExpression); ok { + input.ScheduleExpression = aws.String(v.(string)) + } - if d.HasChange("sync_compliance") { - input.SyncCompliance = awstypes.AssociationSyncCompliance(d.Get("sync_compliance").(string)) - } + if d.HasChange("sync_compliance") { + input.SyncCompliance = awstypes.AssociationSyncCompliance(d.Get("sync_compliance").(string)) + } - if _, ok := d.GetOk("targets"); ok { - input.Targets = expandTargets(d.Get("targets").([]interface{})) - } + if _, ok := d.GetOk("targets"); ok { + input.Targets = expandTargets(d.Get("targets").([]interface{})) + } - _, err := conn.UpdateAssociation(ctx, input) + _, err := conn.UpdateAssociation(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating SSM Association (%s): %s", d.Id(), err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating SSM Association (%s): %s", d.Id(), err) + } } return append(diags, resourceAssociationRead(ctx, d, meta)...) diff --git a/internal/service/ssm/association_tags_gen_test.go b/internal/service/ssm/association_tags_gen_test.go new file mode 100644 index 00000000000..cb329913552 --- /dev/null +++ b/internal/service/ssm/association_tags_gen_test.go @@ -0,0 +1,1771 @@ +// Code generated by internal/generate/tagstests/main.go; DO NOT EDIT. + +package ssm_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-testing/config" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/knownvalue" + "github.com/hashicorp/terraform-plugin-testing/plancheck" + "github.com/hashicorp/terraform-plugin-testing/statecheck" + "github.com/hashicorp/terraform-plugin-testing/tfjsonpath" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSSMAssociation_tags(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_null(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_AddOnUpdate(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_EmptyTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_EmptyTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + acctest.CtKey2: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + acctest.CtKey2: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_EmptyTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_providerOnly(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1Updated), + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1Updated), + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey2: knownvalue.StringExact(acctest.CtValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey2: config.StringVariable(acctest.CtValue2), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_nonOverlapping(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable("providervalue1updated"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact("providervalue1updated"), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact("providervalue1updated"), + acctest.CtResourceKey1: knownvalue.StringExact(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable("providervalue1updated"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: config.StringVariable(acctest.CtResourceValue1Updated), + acctest.CtResourceKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{})), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_overlapping(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue1), + acctest.CtOverlapKey2: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + acctest.CtOverlapKey2: config.StringVariable("providervalue2"), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue1), + acctest.CtOverlapKey2: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtOverlapKey1: knownvalue.StringExact(acctest.CtResourceValue2), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtOverlapKey1: config.StringVariable(acctest.CtResourceValue2), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_updateToProviderOnly(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{})), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_updateToResourceOnly(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_emptyResourceTag(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_emptyProviderOnlyTag(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(""), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + // TODO: Should be known + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(""), + }), + acctest.CtResourceTags: nil, + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_nullOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_DefaultTags_nullNonOverlappingResourceTag(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.Null()), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtProviderKey1: knownvalue.StringExact(acctest.CtProviderValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags_defaults/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtProviderTags: config.MapVariable(map[string]config.Variable{ + acctest.CtProviderKey1: config.StringVariable(acctest.CtProviderValue1), + }), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtResourceKey1: nil, + }), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_ComputedTag_OnCreate(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_ComputedTag_OnUpdate_Add(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "tags.computedkey1", "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(2)), + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapPartial(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tagsComputed2/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable("computedkey1"), + "knownTagKey": config.StringVariable(acctest.CtKey1), + "knownTagValue": config.StringVariable(acctest.CtValue1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSMAssociation_tags_ComputedTag_OnUpdate_Replace(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_ssm_association.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSMServiceID), + CheckDestroy: testAccCheckAssociationDestroy(ctx), + Steps: []resource.TestStep{ + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tags/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + acctest.CtResourceTags: config.MapVariable(map[string]config.Variable{ + acctest.CtKey1: config.StringVariable(acctest.CtValue1), + }), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionCreate), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + plancheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTagsAll), knownvalue.MapExact(map[string]knownvalue.Check{ + acctest.CtKey1: knownvalue.StringExact(acctest.CtValue1), + })), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckAssociationExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, acctest.CtTagsKey1, "null_resource.test", names.AttrID), + ), + ConfigStateChecks: []statecheck.StateCheck{ + statecheck.ExpectKnownValue(resourceName, tfjsonpath.New(names.AttrTags), knownvalue.MapSizeExact(1)), + }, + ConfigPlanChecks: resource.ConfigPlanChecks{ + PreApply: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionUpdate), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTags)), + plancheck.ExpectUnknownValue(resourceName, tfjsonpath.New(names.AttrTagsAll)), + }, + PostApplyPreRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + PostApplyPostRefresh: []plancheck.PlanCheck{ + plancheck.ExpectResourceAction(resourceName, plancheck.ResourceActionNoop), + }, + }, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + ConfigDirectory: config.StaticDirectory("testdata/Association/tagsComputed1/"), + ConfigVariables: config.Variables{ + acctest.CtRName: config.StringVariable(rName), + "unknownTagKey": config.StringVariable(acctest.CtKey1), + }, + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/internal/service/ssm/service_endpoint_resolver_gen.go b/internal/service/ssm/service_endpoint_resolver_gen.go new file mode 100644 index 00000000000..e40aad9a4db --- /dev/null +++ b/internal/service/ssm/service_endpoint_resolver_gen.go @@ -0,0 +1,82 @@ +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. + +package ssm + +import ( + "context" + "fmt" + "net" + + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + ssm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ssm" + smithyendpoints "github.com/aws/smithy-go/endpoints" + "github.com/hashicorp/terraform-plugin-log/tflog" + "github.com/hashicorp/terraform-provider-aws/internal/errs" +) + +var _ ssm_sdkv2.EndpointResolverV2 = resolverSDKv2{} + +type resolverSDKv2 struct { + defaultResolver ssm_sdkv2.EndpointResolverV2 +} + +func newEndpointResolverSDKv2() resolverSDKv2 { + return resolverSDKv2{ + defaultResolver: ssm_sdkv2.NewDefaultEndpointResolverV2(), + } +} + +func (r resolverSDKv2) ResolveEndpoint(ctx context.Context, params ssm_sdkv2.EndpointParameters) (endpoint smithyendpoints.Endpoint, err error) { + params = params.WithDefaults() + useFIPS := aws_sdkv2.ToBool(params.UseFIPS) + + if eps := params.Endpoint; aws_sdkv2.ToString(eps) != "" { + tflog.Debug(ctx, "setting endpoint", map[string]any{ + "tf_aws.endpoint": endpoint, + }) + + if useFIPS { + tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") + params.UseFIPS = aws_sdkv2.Bool(false) + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) + } else if useFIPS { + ctx = tflog.SetField(ctx, "tf_aws.use_fips", useFIPS) + + endpoint, err = r.defaultResolver.ResolveEndpoint(ctx, params) + if err != nil { + return endpoint, err + } + + tflog.Debug(ctx, "endpoint resolved", map[string]any{ + "tf_aws.endpoint": endpoint.URI.String(), + }) + + hostname := endpoint.URI.Hostname() + _, err = net.LookupHost(hostname) + if err != nil { + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + tflog.Debug(ctx, "default endpoint host not found, disabling FIPS", map[string]any{ + "tf_aws.hostname": hostname, + }) + params.UseFIPS = aws_sdkv2.Bool(false) + } else { + err = fmt.Errorf("looking up ssm endpoint %q: %s", hostname, err) + return + } + } else { + return endpoint, err + } + } + + return r.defaultResolver.ResolveEndpoint(ctx, params) +} + +func withBaseEndpoint(endpoint string) func(*ssm_sdkv2.Options) { + return func(o *ssm_sdkv2.Options) { + if endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + } +} diff --git a/internal/service/ssm/service_endpoints_gen_test.go b/internal/service/ssm/service_endpoints_gen_test.go index dc8de4b23d1..c325e7ec88d 100644 --- a/internal/service/ssm/service_endpoints_gen_test.go +++ b/internal/service/ssm/service_endpoints_gen_test.go @@ -7,6 +7,8 @@ import ( "errors" "fmt" "maps" + "net" + "net/url" "os" "path/filepath" "reflect" @@ -86,7 +88,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S testcases := map[string]endpointTestCase{ "no config": { with: []setupFunc{withNoConfig}, - expected: expectDefaultEndpoint(expectedEndpointRegion), + expected: expectDefaultEndpoint(t, expectedEndpointRegion), }, // Package name endpoint on Config @@ -220,7 +222,7 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S with: []setupFunc{ withUseFIPSInConfig, }, - expected: expectDefaultFIPSEndpoint(expectedEndpointRegion), + expected: expectDefaultFIPSEndpoint(t, expectedEndpointRegion), }, "use fips config with package name endpoint config": { @@ -241,24 +243,24 @@ func TestEndpointConfiguration(t *testing.T) { //nolint:paralleltest // uses t.S } } -func defaultEndpoint(region string) string { +func defaultEndpoint(region string) (url.URL, error) { r := ssm_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ssm_sdkv2.EndpointParameters{ Region: aws_sdkv2.String(region), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } -func defaultFIPSEndpoint(region string) string { +func defaultFIPSEndpoint(region string) (url.URL, error) { r := ssm_sdkv2.NewDefaultEndpointResolverV2() ep, err := r.ResolveEndpoint(context.Background(), ssm_sdkv2.EndpointParameters{ @@ -266,14 +268,14 @@ func defaultFIPSEndpoint(region string) string { UseFIPS: aws_sdkv2.Bool(true), }) if err != nil { - return err.Error() + return url.URL{}, err } if ep.URI.Path == "" { ep.URI.Path = "/" } - return ep.URI.String() + return ep.URI, nil } func callService(ctx context.Context, t *testing.T, meta *conns.AWSClient) apiCallParams { @@ -335,16 +337,38 @@ func withUseFIPSInConfig(setup *caseSetup) { setup.config["use_fips_endpoint"] = true } -func expectDefaultEndpoint(region string) caseExpectations { +func expectDefaultEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer default endpoint: %s", err) + } + return caseExpectations{ - endpoint: defaultEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } -func expectDefaultFIPSEndpoint(region string) caseExpectations { +func expectDefaultFIPSEndpoint(t *testing.T, region string) caseExpectations { + t.Helper() + + endpoint, err := defaultFIPSEndpoint(region) + if err != nil { + t.Fatalf("resolving accessanalyzer FIPS endpoint: %s", err) + } + + hostname := endpoint.Hostname() + _, err = net.LookupHost(hostname) + if dnsErr, ok := errs.As[*net.DNSError](err); ok && dnsErr.IsNotFound { + return expectDefaultEndpoint(t, region) + } else if err != nil { + t.Fatalf("looking up accessanalyzer endpoint %q: %s", hostname, err) + } + return caseExpectations{ - endpoint: defaultFIPSEndpoint(region), + endpoint: endpoint.String(), region: expectedCallRegion, } } diff --git a/internal/service/ssm/service_package_gen.go b/internal/service/ssm/service_package_gen.go index 9ffd8c4e99d..d3f13d7eaa3 100644 --- a/internal/service/ssm/service_package_gen.go +++ b/internal/service/ssm/service_package_gen.go @@ -1,4 +1,4 @@ -// Code generated by internal/generate/servicepackages/main.go; DO NOT EDIT. +// Code generated by internal/generate/servicepackage/main.go; DO NOT EDIT. package ssm @@ -7,7 +7,6 @@ import ( aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" ssm_sdkv2 "github.com/aws/aws-sdk-go-v2/service/ssm" - "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/names" @@ -70,6 +69,10 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka Factory: resourceAssociation, TypeName: "aws_ssm_association", Name: "Association", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: names.AttrID, + ResourceType: "Association", + }, }, { Factory: resourceDefaultPatchBaseline, @@ -148,19 +151,10 @@ func (p *servicePackage) ServicePackageName() string { func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*ssm_sdkv2.Client, error) { cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) - return ssm_sdkv2.NewFromConfig(cfg, func(o *ssm_sdkv2.Options) { - if endpoint := config[names.AttrEndpoint].(string); endpoint != "" { - tflog.Debug(ctx, "setting endpoint", map[string]any{ - "tf_aws.endpoint": endpoint, - }) - o.BaseEndpoint = aws_sdkv2.String(endpoint) - - if o.EndpointOptions.UseFIPSEndpoint == aws_sdkv2.FIPSEndpointStateEnabled { - tflog.Debug(ctx, "endpoint set, ignoring UseFIPSEndpoint setting") - o.EndpointOptions.UseFIPSEndpoint = aws_sdkv2.FIPSEndpointStateDisabled - } - } - }), nil + return ssm_sdkv2.NewFromConfig(cfg, + ssm_sdkv2.WithEndpointResolverV2(newEndpointResolverSDKv2()), + withBaseEndpoint(config[names.AttrEndpoint].(string)), + ), nil } func ServicePackage(ctx context.Context) conns.ServicePackage { diff --git a/internal/service/ssm/tags_gen.go b/internal/service/ssm/tags_gen.go index 5a17cb75c31..6b2a6bc389d 100644 --- a/internal/service/ssm/tags_gen.go +++ b/internal/service/ssm/tags_gen.go @@ -99,12 +99,12 @@ func setTagsOut(ctx context.Context, tags []awstypes.Tag) { } // createTags creates ssm service tags for new resources. -func createTags(ctx context.Context, conn *ssm.Client, identifier, resourceType string, tags []awstypes.Tag) error { +func createTags(ctx context.Context, conn *ssm.Client, identifier, resourceType string, tags []awstypes.Tag, optFns ...func(*ssm.Options)) error { if len(tags) == 0 { return nil } - return updateTags(ctx, conn, identifier, resourceType, nil, KeyValueTags(ctx, tags)) + return updateTags(ctx, conn, identifier, resourceType, nil, KeyValueTags(ctx, tags), optFns...) } // updateTags updates ssm service tags. diff --git a/internal/service/ssm/testdata/Association/tags/main_gen.tf b/internal/service/ssm/testdata/Association/tags/main_gen.tf new file mode 100644 index 00000000000..97edc33bd17 --- /dev/null +++ b/internal/service/ssm/testdata/Association/tags/main_gen.tf @@ -0,0 +1,53 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +resource "aws_ssm_association" "test" { + name = aws_ssm_document.test.name + schedule_expression = "cron(0 16 ? * WED *)" + + targets { + key = "tag:Name" + values = ["acceptanceTest"] + } + + tags = var.resource_tags +} + +resource "aws_ssm_document" "test" { + name = var.rName + document_type = "Command" + + content = <_` | -| 17 | **HumanFriendly** | Code | [REQUIRED] Human-friendly name of service as used by AWS; documentation `subcategory` must exactly match this value; used in website navigation and error messages | -| 18 | **Brand** | Code | Either `Amazon`, `AWS`, or blank (rare) as used by AWS; used in error messages | -| 19 | **Exclude** | Code | Whether the service should be included; if included (blank), **ProviderPackageActual** or **ProviderPackageCorrect** must have a value | -| 20 | **NotImplemented** | Code | Whether the service is implemented by the provider | -| 21 | **EndpointOnly** | Code | If **NotImplemented** is non-blank, whether the service endpoint should be included in the provider `endpoints` configuration | -| 22 | **AllowedSubcategory** | Code | If **Exclude** is non-blank, whether to include **HumanFriendly** in `website/allowed-subcategories.txt` anyway. In other words, if non-blank, overrides **Exclude** in some situations. Some excluded pseudo-services (_e.g._, VPC is part of EC2) are still subcategories. Only applies if **Exclude** is non-blank. | -| 23 | **DeprecatedEnvVar** | Code | Deprecated `AWS__ENDPOINT` envvar defined for some services | -| 24 | **TFAWSEnvVar** | Code | `TF_AWS__ENDPOINT` envvar defined for some services | -| 25 | **Note** | Reference | Very brief note usually to explain why excluded | +After any edits to `data/names_data.hcl`, run `make gen`. Doing so regenerates code and performs checks on `data/names_data.hcl`. + +The schema of the attributes and blocks of `data/names_data.hcl` are as follows: + +```hcl +service "" { + + // If both of these attributes are the same as the service block's name, this block will be ommitted + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + // If both of these attributes are the same as the service block's name, this block will be ommitted + go_packages { + v1_package = "" + v2_package = "" + } + + // If any blocks below here have attirbutes with empty strings or false bools, they will be ommitted + // Blocks with zero attributes will be ommitted + sdk { + id = "" + client_version = [] + } + + names { + aliases = [""] // This can also be excluded if it is empty + provider_name_upper = "" + human_friendly = "" + } + + client { + go_v1_client_typename = "" + skip_client_generate = bool + } + + env_var { + deprecated_env_var = "" + tf_aws_env_var = "" + } + + endpoint_info { + endpoint_api_call = "" + endpoint_api_params = "" + endpoint_region_override = "" + endpoint_only = bool + } + + resource_prefix { + actual = "" + correct = "" + } + + provider_package_correct = "" + split_package = "" + file_prefix = "" + doc_prefix = [""] + brand = "" + exclude = bool + not_implemented = bool + allowed_subcategory = bool + note = "" +} + +``` + +The explanation of the attributes of `data/names_data.hcl` are as follows: + +| Name | Use | Description | +| --- | --- | --- | +| **ProviderPackageActual** | Code | Actual TF AWS provide package name _if_ `provider_package_correct` is not used; takes precedence over `provider_package_correct` for service block name if both are defined | +| `aws_cli_v2_command` | Reference | Service command in [AWS CLI v2](https://awscli.amazonaws.com/v2/documentation/api/latest/index.html) | +| `aws_cli_v2_command_no_dashes` | Reference | Same as `aws_cli_v2_command` without dashes | +| `v1_package` | Code | [AWS SDK for Go v1](https://docs.aws.amazon.com/sdk-for-go/api/) package name | +| `v2_package` | Code | [AWS SDK for Go v2](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2) package name | +| `id` | Code | Represents the ServiceID of a AWS service which is a unique identifier of a specific service | +| `client_version` | Code | HCL int list containing if in the TF AWS Provider, the service currently uses AWS SDK for Go v1 and/or v2; each integer represents the correlating version| +| `aliases` | Code | HCL string list of name variations (_e.g._, for "AMP", `prometheus,prometheusservice`). Do not include **ProviderPackageActual (or `provider_package_correct`, if blank) since that will create duplicates in the [Custom Endpoints guide](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/guides/custom-service-endpoints). | +| `provider_name_upper` | Code | [Correctly capitalized](https://hashicorp.github.io/terraform-provider-aws/naming/#mixed-caps) `ProviderPackageActual`, if it exists, otherwise `provider_package_correct` | +| `human_friendly` | Code | [REQUIRED] Human-friendly name of service as used by AWS; documentation `subcategory` must exactly match this value; used in website navigation and error messages | +| `go_v1_client_typename` | Code | _Exact name_ (_i.e._, spelling and capitalization) of the AWS SDK for Go v1 client type (_e.g._, see the [`New()` return type](https://docs.aws.amazon.com/sdk-for-go/api/service/ses/#New) for SES). Also excluded when service only supports AWS SDK for Go v2| +| `skip_client_generate` | Code | Some service clients need special configuration rather than the default generated configuration; use a non-empty value to skip generation but you must then manually configure the client in `internal/conns/config.go` | +| `deprecated_env_var` | Code | Deprecated `AWS__ENDPOINT` envvar defined for some services | +| `tf_aws_env_var` | Code | `TF_AWS__ENDPOINT` envvar defined for some services | +| `endpoint_api_call` | Code | Command for the AWS cli for describing the current service | +| `endpoint_api_params` | Code | Used in `service_endpoints_gen_test.go` files for API calls that require a configured value | +| `endpoint_region_override` | Code | Specified alternate regional [endpoint]([https://docs.aws.amazon.com/general/latest/gr/rande.html) for API requests | +| `endpoint_only` | Code | Bool based on if `not_implemented` is non-blank, whether the service endpoint should be included in the provider `endpoints` configuration | +| `resource_prefix_actual` | Code | Regular expression to match anomalous TF resource name prefixes (_e.g._, for the resource name `aws_config_config_rule`, `aws_config_` will match all resources); only use if `resource_prefix_correct` is not suitable (_e.g._, `aws_codepipeline_` won't work as there is only one resource named `aws_codepipeline`); takes precedence over `resource_prefix_correct` | +| `resource_prefix_correct` | Code | Regular expression to match what resource name prefixes _should be_ (_i.e._, `aws_` + `provider_package_correct` + `_`); used if `resource_prefix_actual` is blank | +| `provider_package_correct` | Code | Shorter of `aws_cli_v2_command_no_dashes` and `v2_package`; should _not_ be blank if either exists; same as [Service Identifier](https://hashicorp.github.io/terraform-provider-aws/naming/#service-identifier); what the TF AWS Provider package name _should be_; `ProviderPackageActual` takes precedence | +| `split_package_real_package` | Code | If multiple "services" live in one service, this is the package where the service's Go files live (_e.g._, VPC is part of EC2) | +| `file_prefix` | Code | If multiple "services" live in one service, this is the prefix that files must have to be associated with this sub-service (_e.g._, VPC files in the EC2 service are prefixed with `vpc_`); see also `split_packages_real_packages` | +| `doc_prefix` | Code | Hcl string list of prefixes for service documentation files in `website/docs/r` and `website/docs/d`; usually only one prefix, _i.e._, `<`provider_package_correct`>_` | +| `brand` | Code | Either `Amazon`, `AWS`, or blank (rare) as used by AWS; used in error messages | +| `exclude` | Code | Bool based on whether the service should be included; if included (blank), `ProviderPackageActual` or `provider_package_correct` must have a value | +| `allowed_subcategory` | Code | Bool based on if `Exclude` is non-blank, whether to include `human_friendly` in `website/allowed-subcategories.txt` anyway. In other words, if non-blank, overrides `exclude` in some situations. Some excluded pseudo-services (_e.g._, VPC is part of EC2) are still subcategories. Only applies if `Exclude` is non-blank. | +| `not_implemented` | Code | Bool based on whether the service is implemented by the provider | +| `note` | Reference | Very brief note usually to explain why excluded | For more information about service naming, see [the Naming Guide](https://hashicorp.github.io/terraform-provider-aws/naming/#service-identifier). diff --git a/names/consts_gen.go b/names/consts_gen.go index 7bcfa81a4a8..c3ce84a004a 100644 --- a/names/consts_gen.go +++ b/names/consts_gen.go @@ -20,6 +20,7 @@ const ( AppStream = "appstream" AppSync = "appsync" ApplicationInsights = "applicationinsights" + ApplicationSignals = "applicationsignals" Athena = "athena" AuditManager = "auditmanager" AutoScaling = "autoscaling" @@ -70,6 +71,7 @@ const ( DMS = "dms" DRS = "drs" DS = "ds" + DataBrew = "databrew" DataExchange = "dataexchange" DataPipeline = "datapipeline" DataSync = "datasync" @@ -158,6 +160,7 @@ const ( NeptuneGraph = "neptunegraph" NetworkFirewall = "networkfirewall" NetworkManager = "networkmanager" + NetworkMonitor = "networkmonitor" ObservabilityAccessManager = "oam" OpenSearch = "opensearch" OpenSearchIngestion = "osis" @@ -261,6 +264,7 @@ const ( AppStreamServiceID = "AppStream" AppSyncServiceID = "AppSync" ApplicationInsightsServiceID = "Application Insights" + ApplicationSignalsServiceID = "Application Signals" AthenaServiceID = "Athena" AuditManagerServiceID = "AuditManager" AutoScalingServiceID = "Auto Scaling" @@ -311,6 +315,7 @@ const ( DMSServiceID = "Database Migration Service" DRSServiceID = "DRS" DSServiceID = "Directory Service" + DataBrewServiceID = "DataBrew" DataExchangeServiceID = "DataExchange" DataPipelineServiceID = "Data Pipeline" DataSyncServiceID = "DataSync" @@ -399,6 +404,7 @@ const ( NeptuneGraphServiceID = "Neptune Graph" NetworkFirewallServiceID = "Network Firewall" NetworkManagerServiceID = "NetworkManager" + NetworkMonitorServiceID = "NetworkMonitor" ObservabilityAccessManagerServiceID = "OAM" OpenSearchServiceID = "OpenSearch" OpenSearchIngestionServiceID = "OSIS" diff --git a/names/data/lookup.go b/names/data/lookup.go new file mode 100644 index 00000000000..f646d66b0c8 --- /dev/null +++ b/names/data/lookup.go @@ -0,0 +1,21 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package data + +import "fmt" + +func LookupService(name string) (result ServiceRecord, err error) { + serviceData, err := ReadAllServiceData() + if err != nil { + return result, fmt.Errorf("error reading service data: %s", err) + } + + for _, s := range serviceData { + if name == s.ProviderPackage() { + return s, nil + } + } + + return result, fmt.Errorf("package not found: %s", name) +} diff --git a/names/data/names_data.csv b/names/data/names_data.csv deleted file mode 100644 index e36b7a9a0c4..00000000000 --- a/names/data/names_data.csv +++ /dev/null @@ -1,398 +0,0 @@ -AWSCLIV2Command,AWSCLIV2CommandNoDashes,GoV1Package,GoV2Package,ProviderPackageActual,ProviderPackageCorrect,SplitPackageRealPackage,Aliases,ProviderNameUpper,GoV1ClientTypeName,SkipClientGenerate,ClientSDKV1,ClientSDKV2,ResourcePrefixActual,ResourcePrefixCorrect,FilePrefix,DocPrefix,HumanFriendly,Brand,Exclude,NotImplemented,EndpointOnly,AllowedSubcategory,DeprecatedEnvVar,TFAWSEnvVar,SDKID,EndpointAPICall,EndpointAPIParams,EndpointRegionOverride,Note -accessanalyzer,accessanalyzer,accessanalyzer,accessanalyzer,,accessanalyzer,,,AccessAnalyzer,AccessAnalyzer,,,2,,aws_accessanalyzer_,,accessanalyzer_,IAM Access Analyzer,AWS,,,,,,,AccessAnalyzer,ListAnalyzers,,, -account,account,account,account,,account,,,Account,Account,,,2,,aws_account_,,account_,Account Management,AWS,,,,,,,Account,ListRegions,,, -acm,acm,acm,acm,,acm,,,ACM,ACM,,,2,,aws_acm_,,acm_,ACM (Certificate Manager),AWS,,,,,,,ACM,ListCertificates,,, -acm-pca,acmpca,acmpca,acmpca,,acmpca,,,ACMPCA,ACMPCA,,,2,,aws_acmpca_,,acmpca_,ACM PCA (Certificate Manager Private Certificate Authority),AWS,,,,,,,ACM PCA,ListCertificateAuthorities,,, -alexaforbusiness,alexaforbusiness,alexaforbusiness,alexaforbusiness,,alexaforbusiness,,,AlexaForBusiness,AlexaForBusiness,,1,,,aws_alexaforbusiness_,,alexaforbusiness_,Alexa for Business,,,x,,,,,Alexa For Business,,,, -amp,amp,prometheusservice,amp,,amp,,prometheus;prometheusservice,AMP,PrometheusService,,,2,aws_prometheus_,aws_amp_,,prometheus_,AMP (Managed Prometheus),Amazon,,,,,,,amp,ListScrapers,,, -amplify,amplify,amplify,amplify,,amplify,,,Amplify,Amplify,,,2,,aws_amplify_,,amplify_,Amplify,AWS,,,,,,,Amplify,ListApps,,, -amplifybackend,amplifybackend,amplifybackend,amplifybackend,,amplifybackend,,,AmplifyBackend,AmplifyBackend,,1,,,aws_amplifybackend_,,amplifybackend_,Amplify Backend,AWS,,x,,,,,AmplifyBackend,,,, -amplifyuibuilder,amplifyuibuilder,amplifyuibuilder,amplifyuibuilder,,amplifyuibuilder,,,AmplifyUIBuilder,AmplifyUIBuilder,,1,,,aws_amplifyuibuilder_,,amplifyuibuilder_,Amplify UI Builder,AWS,,x,,,,,AmplifyUIBuilder,,,, -,,,,,,,,,,,,,,,,,Apache MXNet on AWS,AWS,x,,,,,,,,,,Documentation -apigateway,apigateway,apigateway,apigateway,,apigateway,,,APIGateway,APIGateway,x,,2,aws_api_gateway_,aws_apigateway_,,api_gateway_,API Gateway,Amazon,,,,,,,API Gateway,GetAccount,,, -apigatewaymanagementapi,apigatewaymanagementapi,apigatewaymanagementapi,apigatewaymanagementapi,,apigatewaymanagementapi,,,APIGatewayManagementAPI,ApiGatewayManagementApi,,1,,,aws_apigatewaymanagementapi_,,apigatewaymanagementapi_,API Gateway Management API,Amazon,,x,,,,,ApiGatewayManagementApi,,,, -apigatewayv2,apigatewayv2,apigatewayv2,apigatewayv2,,apigatewayv2,,,APIGatewayV2,ApiGatewayV2,x,,2,,aws_apigatewayv2_,,apigatewayv2_,API Gateway V2,Amazon,,,,,,,ApiGatewayV2,GetApis,,, -appfabric,appfabric,appfabric,appfabric,,appfabric,,,AppFabric,AppFabric,,,2,,aws_appfabric_,,appfabric_,AppFabric,AWS,,,,,,,AppFabric,ListAppBundles,,, -appmesh,appmesh,appmesh,appmesh,,appmesh,,,AppMesh,AppMesh,,1,,,aws_appmesh_,,appmesh_,App Mesh,AWS,,,,,,,App Mesh,ListMeshes,,, -apprunner,apprunner,apprunner,apprunner,,apprunner,,,AppRunner,AppRunner,,,2,,aws_apprunner_,,apprunner_,App Runner,AWS,,,,,,,AppRunner,ListConnections,,, -,,,,,,,,,,,,,,,,,App2Container,AWS,x,,,,,,,,,,No SDK support -appconfig,appconfig,appconfig,appconfig,,appconfig,,,AppConfig,AppConfig,,,2,,aws_appconfig_,,appconfig_,AppConfig,AWS,,,,,,,AppConfig,ListApplications,,, -appconfigdata,appconfigdata,appconfigdata,appconfigdata,,appconfigdata,,,AppConfigData,AppConfigData,,1,,,aws_appconfigdata_,,appconfigdata_,AppConfig Data,AWS,,x,,,,,AppConfigData,,,, -appflow,appflow,appflow,appflow,,appflow,,,AppFlow,Appflow,,,2,,aws_appflow_,,appflow_,AppFlow,Amazon,,,,,,,Appflow,ListFlows,,, -appintegrations,appintegrations,appintegrationsservice,appintegrations,,appintegrations,,appintegrationsservice,AppIntegrations,AppIntegrationsService,,,2,,aws_appintegrations_,,appintegrations_,AppIntegrations,Amazon,,,,,,,AppIntegrations,ListApplications,,, -application-autoscaling,applicationautoscaling,applicationautoscaling,applicationautoscaling,appautoscaling,applicationautoscaling,,applicationautoscaling,AppAutoScaling,ApplicationAutoScaling,,,2,aws_appautoscaling_,aws_applicationautoscaling_,,appautoscaling_,Application Auto Scaling,,,,,,,,Application Auto Scaling,DescribeScalableTargets,ServiceNamespace: awstypes.ServiceNamespaceEcs,, -applicationcostprofiler,applicationcostprofiler,applicationcostprofiler,applicationcostprofiler,,applicationcostprofiler,,,ApplicationCostProfiler,ApplicationCostProfiler,,1,,,aws_applicationcostprofiler_,,applicationcostprofiler_,Application Cost Profiler,AWS,,x,,,,,ApplicationCostProfiler,,,, -discovery,discovery,applicationdiscoveryservice,applicationdiscoveryservice,,discovery,,applicationdiscovery;applicationdiscoveryservice,Discovery,ApplicationDiscoveryService,,1,,,aws_discovery_,,discovery_,Application Discovery,AWS,,x,,,,,Application Discovery Service,,,, -mgn,mgn,mgn,mgn,,mgn,,,Mgn,Mgn,,1,,,aws_mgn_,,mgn_,Application Migration (Mgn),AWS,,x,,,,,mgn,,,, -appstream,appstream,appstream,appstream,,appstream,,,AppStream,AppStream,,,2,,aws_appstream_,,appstream_,AppStream 2.0,Amazon,,,,,,,AppStream,ListAssociatedFleets,"StackName: aws_sdkv2.String(""test"")",, -appsync,appsync,appsync,appsync,,appsync,,,AppSync,AppSync,,1,,,aws_appsync_,,appsync_,AppSync,AWS,,,,,,,AppSync,ListDomainNames,,, -,,,,,,,,,,,,,,,,,Artifact,AWS,x,,,,,,,,,,No SDK support -athena,athena,athena,athena,,athena,,,Athena,Athena,,,2,,aws_athena_,,athena_,Athena,Amazon,,,,,,,Athena,ListDataCatalogs,,, -auditmanager,auditmanager,auditmanager,auditmanager,,auditmanager,,,AuditManager,AuditManager,,,2,,aws_auditmanager_,,auditmanager_,Audit Manager,AWS,,,,,,,AuditManager,GetAccountStatus,,, -autoscaling,autoscaling,autoscaling,autoscaling,,autoscaling,,,AutoScaling,AutoScaling,,,2,aws_(autoscaling_|launch_configuration),aws_autoscaling_,,autoscaling_;launch_configuration,Auto Scaling,,,,,,,,Auto Scaling,DescribeAutoScalingGroups,,, -autoscaling-plans,autoscalingplans,autoscalingplans,autoscalingplans,,autoscalingplans,,,AutoScalingPlans,AutoScalingPlans,,,2,,aws_autoscalingplans_,,autoscalingplans_,Auto Scaling Plans,,,,,,,,Auto Scaling Plans,DescribeScalingPlans,,, -,,,,,,,,,,,,,,,,,Backint Agent for SAP HANA,AWS,x,,,,,,,,,,No SDK support -backup,backup,backup,backup,,backup,,,Backup,Backup,,1,,,aws_backup_,,backup_,Backup,AWS,,,,,,,Backup,ListBackupPlans,,, -backup-gateway,backupgateway,backupgateway,backupgateway,,backupgateway,,,BackupGateway,BackupGateway,,1,,,aws_backupgateway_,,backupgateway_,Backup Gateway,AWS,,x,,,,,Backup Gateway,,,, -batch,batch,batch,batch,,batch,,,Batch,Batch,,1,2,,aws_batch_,,batch_,Batch,AWS,,,,,,,Batch,ListJobs,,, -bedrock,bedrock,bedrock,bedrock,,bedrock,,,Bedrock,Bedrock,,,2,,aws_bedrock_,,bedrock_,Amazon Bedrock,Amazon,,,,,,,Bedrock,ListFoundationModels,,, -bedrock-agent,bedrockagent,bedrockagent,bedrockagent,,bedrockagent,,,BedrockAgent,BedrockAgent,,,2,,aws_bedrockagent_,,bedrockagent_,Agents for Amazon Bedrock,Amazon,,,,,,,Bedrock Agent,ListAgents,,, -bcmdataexports,bcmdataexports,bcmdataexports,bcmdataexports,,bcmdataexports,,,BCMDataExports,BCMDataExports,,,2,,aws_bcmdataexports_,,bcmdataexports_,BCM Data Exports,Amazon,,,,,,,BCM Data Exports,ListExports,,, -billingconductor,billingconductor,billingconductor,,,billingconductor,,,BillingConductor,BillingConductor,,1,,,aws_billingconductor_,,billingconductor_,Billing Conductor,AWS,,x,,,,,billingconductor,,,, -braket,braket,braket,braket,,braket,,,Braket,Braket,,1,,,aws_braket_,,braket_,Braket,Amazon,,x,,,,,Braket,,,, -ce,ce,costexplorer,costexplorer,,ce,,costexplorer,CE,CostExplorer,,,2,,aws_ce_,,ce_,CE (Cost Explorer),AWS,,,,,,,Cost Explorer,ListCostCategoryDefinitions,,, -chatbot,chatbot,chatbot,chatbot,,chatbot,,,Chatbot,,x,,2,,aws_chatbot_,,chatbot_,Chatbot,AWS,,,,,,,Chatbot,GetAccountPreferences,,, -chime,chime,chime,chime,,chime,,,Chime,Chime,,1,,,aws_chime_,,chime_,Chime,Amazon,,,,,,,Chime,ListAccounts,,, -chime-sdk-identity,chimesdkidentity,chimesdkidentity,chimesdkidentity,,chimesdkidentity,,,ChimeSDKIdentity,ChimeSDKIdentity,,1,,,aws_chimesdkidentity_,,chimesdkidentity_,Chime SDK Identity,Amazon,,x,,,,,Chime SDK Identity,,,, -chime-sdk-mediapipelines,chimesdkmediapipelines,chimesdkmediapipelines,chimesdkmediapipelines,,chimesdkmediapipelines,,,ChimeSDKMediaPipelines,ChimeSDKMediaPipelines,,,2,,aws_chimesdkmediapipelines_,,chimesdkmediapipelines_,Chime SDK Media Pipelines,Amazon,,,,,,,Chime SDK Media Pipelines,ListMediaPipelines,,, -chime-sdk-meetings,chimesdkmeetings,chimesdkmeetings,chimesdkmeetings,,chimesdkmeetings,,,ChimeSDKMeetings,ChimeSDKMeetings,,1,,,aws_chimesdkmeetings_,,chimesdkmeetings_,Chime SDK Meetings,Amazon,,x,,,,,Chime SDK Meetings,,,, -chime-sdk-messaging,chimesdkmessaging,chimesdkmessaging,chimesdkmessaging,,chimesdkmessaging,,,ChimeSDKMessaging,ChimeSDKMessaging,,1,,,aws_chimesdkmessaging_,,chimesdkmessaging_,Chime SDK Messaging,Amazon,,x,,,,,Chime SDK Messaging,,,, -chime-sdk-voice,chimesdkvoice,chimesdkvoice,chimesdkvoice,,chimesdkvoice,,,ChimeSDKVoice,ChimeSDKVoice,,,2,,aws_chimesdkvoice_,,chimesdkvoice_,Chime SDK Voice,Amazon,,,,,,,Chime SDK Voice,ListPhoneNumbers,,, -cleanrooms,cleanrooms,cleanrooms,cleanrooms,,cleanrooms,,,CleanRooms,CleanRooms,,,2,,aws_cleanrooms_,,cleanrooms_,Clean Rooms,AWS,,,,,,,CleanRooms,ListCollaborations,,, -,,,,,,,,,,,,,,,,,CLI (Command Line Interface),AWS,x,,,,,,,,,,No SDK support -configure,configure,,,,,,,,,,,,,,,,CLI Configure options,AWS,x,,,,,,,,,,CLI only -ddb,ddb,,,,,,,,,,,,,,,,CLI High-level DynamoDB commands,AWS,x,,,,,,,,,,Part of DynamoDB -s3,s3,,,,,,,,,,,,,,,,CLI High-level S3 commands,AWS,x,,,,,,,,,,CLI only -history,history,,,,,,,,,,,,,,,,CLI History of commands,AWS,x,,,,,,,,,,CLI only -importexport,importexport,,,,,,,,,,,,,,,,CLI Import/Export,AWS,x,,,,,,,,,,CLI only -cli-dev,clidev,,,,,,,,,,,,,,,,CLI Internal commands for development,AWS,x,,,,,,,,,,CLI only -cloudcontrol,cloudcontrol,cloudcontrolapi,cloudcontrol,,cloudcontrol,,cloudcontrolapi,CloudControl,CloudControlApi,,,2,aws_cloudcontrolapi_,aws_cloudcontrol_,,cloudcontrolapi_,Cloud Control API,AWS,,,,,,,CloudControl,ListResourceRequests,,, -,,,,,,,,,,,,,,,,,Cloud Digital Interface SDK,AWS,x,,,,,,,,,,No SDK support -clouddirectory,clouddirectory,clouddirectory,clouddirectory,,clouddirectory,,,CloudDirectory,CloudDirectory,,1,,,aws_clouddirectory_,,clouddirectory_,Cloud Directory,Amazon,,x,,,,,CloudDirectory,,,, -servicediscovery,servicediscovery,servicediscovery,servicediscovery,,servicediscovery,,,ServiceDiscovery,ServiceDiscovery,,,2,aws_service_discovery_,aws_servicediscovery_,,service_discovery_,Cloud Map,AWS,,,,,,,ServiceDiscovery,ListNamespaces,,, -cloud9,cloud9,cloud9,cloud9,,cloud9,,,Cloud9,Cloud9,,,2,,aws_cloud9_,,cloud9_,Cloud9,AWS,,,,,,,Cloud9,ListEnvironments,,, -cloudformation,cloudformation,cloudformation,cloudformation,,cloudformation,,,CloudFormation,CloudFormation,x,,2,,aws_cloudformation_,,cloudformation_,CloudFormation,AWS,,,,,,,CloudFormation,ListStackInstances,"StackSetName: aws_sdkv2.String(""test"")",, -cloudfront,cloudfront,cloudfront,cloudfront,,cloudfront,,,CloudFront,CloudFront,,,2,,aws_cloudfront_,,cloudfront_,CloudFront,Amazon,,,,,,,CloudFront,ListDistributions,,, -cloudfront-keyvaluestore,cloudfrontkeyvaluestore,,cloudfrontkeyvaluestore,,cloudfrontkeyvaluestore,,,CloudFrontKeyValueStore,CloudFrontKeyValueStore,,,2,,aws_cloudfrontkeyvaluestore_,,cloudfrontkeyvaluestore_,CloudFront KeyValueStore,Amazon,,,,,,,CloudFront KeyValueStore,ListKeys,"KvsARN: aws_sdkv2.String(""arn:aws:cloudfront::111122223333:key-value-store/MaxAge"")",, -cloudhsm,cloudhsm,cloudhsm,cloudhsm,,,,,,,,,,,,,,CloudHSM,AWS,x,,,,,,,,,,Legacy -cloudhsmv2,cloudhsmv2,cloudhsmv2,cloudhsmv2,,cloudhsmv2,,cloudhsm,CloudHSMV2,CloudHSMV2,x,,2,aws_cloudhsm_v2_,aws_cloudhsmv2_,,cloudhsm,CloudHSM,AWS,,,,,,,CloudHSM V2,DescribeClusters,,, -cloudsearch,cloudsearch,cloudsearch,cloudsearch,,cloudsearch,,,CloudSearch,CloudSearch,,,2,,aws_cloudsearch_,,cloudsearch_,CloudSearch,Amazon,,,,,,,CloudSearch,ListDomainNames,,, -cloudsearchdomain,cloudsearchdomain,cloudsearchdomain,cloudsearchdomain,,cloudsearchdomain,,,CloudSearchDomain,CloudSearchDomain,,1,,,aws_cloudsearchdomain_,,cloudsearchdomain_,CloudSearch Domain,Amazon,,x,,,,,CloudSearch Domain,,,, -,,,,,,,,,,,,,,,,,CloudShell,AWS,x,,,,,,,,,,No SDK support -cloudtrail,cloudtrail,cloudtrail,cloudtrail,,cloudtrail,,,CloudTrail,CloudTrail,,,2,aws_cloudtrail,aws_cloudtrail_,,cloudtrail,CloudTrail,AWS,,,,,,,CloudTrail,ListChannels,,, -cloudwatch,cloudwatch,cloudwatch,cloudwatch,,cloudwatch,,,CloudWatch,CloudWatch,,,2,aws_cloudwatch_(?!(event_|log_|query_)),aws_cloudwatch_,,cloudwatch_dashboard;cloudwatch_metric_;cloudwatch_composite_,CloudWatch,Amazon,,,,,,,CloudWatch,ListDashboards,,, -application-insights,applicationinsights,applicationinsights,applicationinsights,,applicationinsights,,,ApplicationInsights,ApplicationInsights,,,2,,aws_applicationinsights_,,applicationinsights_,CloudWatch Application Insights,Amazon,,,,,,,Application Insights,CreateApplication,,, -evidently,evidently,cloudwatchevidently,evidently,,evidently,,cloudwatchevidently,Evidently,CloudWatchEvidently,,,2,,aws_evidently_,,evidently_,CloudWatch Evidently,Amazon,,,,,,,Evidently,ListProjects,,, -internetmonitor,internetmonitor,internetmonitor,internetmonitor,,internetmonitor,,,InternetMonitor,InternetMonitor,,,2,,aws_internetmonitor_,,internetmonitor_,CloudWatch Internet Monitor,Amazon,,,,,,,InternetMonitor,ListMonitors,,, -logs,logs,cloudwatchlogs,cloudwatchlogs,,logs,,cloudwatchlog;cloudwatchlogs,Logs,CloudWatchLogs,,,2,aws_cloudwatch_(log_|query_),aws_logs_,,cloudwatch_log_;cloudwatch_query_,CloudWatch Logs,Amazon,,,,,,,CloudWatch Logs,ListAnomalies,,, -rum,rum,cloudwatchrum,rum,,rum,,cloudwatchrum,RUM,CloudWatchRUM,,1,,,aws_rum_,,rum_,CloudWatch RUM,Amazon,,,,,,,RUM,ListAppMonitors,,, -synthetics,synthetics,synthetics,synthetics,,synthetics,,,Synthetics,Synthetics,,,2,,aws_synthetics_,,synthetics_,CloudWatch Synthetics,Amazon,,,,,,,synthetics,ListGroups,,, -codeartifact,codeartifact,codeartifact,codeartifact,,codeartifact,,,CodeArtifact,CodeArtifact,,,2,,aws_codeartifact_,,codeartifact_,CodeArtifact,AWS,,,,,,,codeartifact,ListDomains,,, -codebuild,codebuild,codebuild,codebuild,,codebuild,,,CodeBuild,CodeBuild,,,2,,aws_codebuild_,,codebuild_,CodeBuild,AWS,,,,,,,CodeBuild,ListBuildBatches,,, -codecommit,codecommit,codecommit,codecommit,,codecommit,,,CodeCommit,CodeCommit,,,2,,aws_codecommit_,,codecommit_,CodeCommit,AWS,,,,,,,CodeCommit,ListRepositories,,, -deploy,deploy,codedeploy,codedeploy,,deploy,,codedeploy,Deploy,CodeDeploy,,,2,aws_codedeploy_,aws_deploy_,,codedeploy_,CodeDeploy,AWS,,,,,,,CodeDeploy,ListApplications,,, -codeguruprofiler,codeguruprofiler,codeguruprofiler,codeguruprofiler,,codeguruprofiler,,,CodeGuruProfiler,CodeGuruProfiler,,,2,,aws_codeguruprofiler_,,codeguruprofiler_,CodeGuru Profiler,Amazon,,,,,,,CodeGuruProfiler,ListProfilingGroups,,, -codeguru-reviewer,codegurureviewer,codegurureviewer,codegurureviewer,,codegurureviewer,,,CodeGuruReviewer,CodeGuruReviewer,,,2,,aws_codegurureviewer_,,codegurureviewer_,CodeGuru Reviewer,Amazon,,,,,,,CodeGuru Reviewer,ListCodeReviews,Type: awstypes.TypePullRequest,, -codepipeline,codepipeline,codepipeline,codepipeline,,codepipeline,,,CodePipeline,CodePipeline,,,2,aws_codepipeline,aws_codepipeline_,,codepipeline,CodePipeline,AWS,,,,,,,CodePipeline,ListPipelines,,, -codestar,codestar,codestar,codestar,,codestar,,,CodeStar,CodeStar,,1,,,aws_codestar_,,codestar_,CodeStar,AWS,,x,,,,,CodeStar,,,, -codestar-connections,codestarconnections,codestarconnections,codestarconnections,,codestarconnections,,,CodeStarConnections,CodeStarConnections,,,2,,aws_codestarconnections_,,codestarconnections_,CodeStar Connections,AWS,,,,,,,CodeStar connections,ListConnections,,, -codestar-notifications,codestarnotifications,codestarnotifications,codestarnotifications,,codestarnotifications,,,CodeStarNotifications,CodeStarNotifications,,,2,,aws_codestarnotifications_,,codestarnotifications_,CodeStar Notifications,AWS,,,,,,,codestar notifications,ListTargets,,, -cognito-identity,cognitoidentity,cognitoidentity,cognitoidentity,,cognitoidentity,,,CognitoIdentity,CognitoIdentity,,,2,aws_cognito_identity_(?!provider),aws_cognitoidentity_,,cognito_identity_pool,Cognito Identity,Amazon,,,,,,,Cognito Identity,ListIdentityPools,MaxResults: aws_sdkv2.Int32(1),, -cognito-idp,cognitoidp,cognitoidentityprovider,cognitoidentityprovider,,cognitoidp,,cognitoidentityprovider,CognitoIDP,CognitoIdentityProvider,,1,,aws_cognito_(identity_provider|resource|user|risk),aws_cognitoidp_,,cognito_identity_provider;cognito_managed_user;cognito_resource_;cognito_user;cognito_risk,Cognito IDP (Identity Provider),Amazon,,,,,,,Cognito Identity Provider,ListUserPools,,, -cognito-sync,cognitosync,cognitosync,cognitosync,,cognitosync,,,CognitoSync,CognitoSync,,1,,,aws_cognitosync_,,cognitosync_,Cognito Sync,Amazon,,x,,,,,Cognito Sync,,,, -comprehend,comprehend,comprehend,comprehend,,comprehend,,,Comprehend,Comprehend,,,2,,aws_comprehend_,,comprehend_,Comprehend,Amazon,,,,,,,Comprehend,ListDocumentClassifiers,,, -comprehendmedical,comprehendmedical,comprehendmedical,comprehendmedical,,comprehendmedical,,,ComprehendMedical,ComprehendMedical,,1,,,aws_comprehendmedical_,,comprehendmedical_,Comprehend Medical,Amazon,,x,,,,,ComprehendMedical,,,, -compute-optimizer,computeoptimizer,computeoptimizer,computeoptimizer,,computeoptimizer,,,ComputeOptimizer,ComputeOptimizer,,,2,,aws_computeoptimizer_,,computeoptimizer_,Compute Optimizer,AWS,,,,,,,Compute Optimizer,GetEnrollmentStatus,,, -configservice,configservice,configservice,configservice,,configservice,,config,ConfigService,ConfigService,,,2,aws_config_,aws_configservice_,,config_,Config,AWS,,,,,,,Config Service,ListStoredQueries,,, -connect,connect,connect,connect,,connect,,,Connect,Connect,,1,,,aws_connect_,,connect_,Connect,Amazon,,,,,,,Connect,ListInstances,,, -connectcases,connectcases,connectcases,connectcases,,connectcases,,,ConnectCases,ConnectCases,,,2,,aws_connectcases_,,connectcases_,Connect Cases,Amazon,,,,,,,ConnectCases,ListDomains,,, -connect-contact-lens,connectcontactlens,connectcontactlens,connectcontactlens,,connectcontactlens,,,ConnectContactLens,ConnectContactLens,,1,,,aws_connectcontactlens_,,connectcontactlens_,Connect Contact Lens,Amazon,,x,,,,,Connect Contact Lens,,,, -customer-profiles,customerprofiles,customerprofiles,customerprofiles,,customerprofiles,,,CustomerProfiles,CustomerProfiles,,,2,,aws_customerprofiles_,,customerprofiles_,Connect Customer Profiles,Amazon,,,,,,,Customer Profiles,ListDomains,,, -connectparticipant,connectparticipant,connectparticipant,connectparticipant,,connectparticipant,,,ConnectParticipant,ConnectParticipant,,1,,,aws_connectparticipant_,,connectparticipant_,Connect Participant,Amazon,,x,,,,,ConnectParticipant,,,, -voice-id,voiceid,voiceid,voiceid,,voiceid,,,VoiceID,VoiceID,,1,,,aws_voiceid_,,voiceid_,Connect Voice ID,Amazon,,x,,,,,Voice ID,,,, -wisdom,wisdom,connectwisdomservice,wisdom,,wisdom,,connectwisdomservice,Wisdom,ConnectWisdomService,,1,,,aws_wisdom_,,wisdom_,Connect Wisdom,Amazon,,x,,,,,Wisdom,,,, -,,,,,,,,,,,,,,,,,Console Mobile Application,AWS,x,,,,,,,,,,No SDK support -controltower,controltower,controltower,controltower,,controltower,,,ControlTower,ControlTower,,,2,,aws_controltower_,,controltower_,Control Tower,AWS,,,,,,,ControlTower,ListLandingZones,,, -cost-optimization-hub,costoptimizationhub,costoptimizationhub,costoptimizationhub,,costoptimizationhub,,,CostOptimizationHub,CostOptimizationHub,x,,2,,aws_costoptimizationhub_,,costoptimizationhub_,Cost Optimization Hub,AWS,,,,,,,Cost Optimization Hub,GetPreferences,,us-east-1, -cur,cur,costandusagereportservice,costandusagereportservice,,cur,,costandusagereportservice,CUR,CostandUsageReportService,x,,2,,aws_cur_,,cur_,Cost and Usage Report,AWS,,,,,,,Cost and Usage Report Service,DescribeReportDefinitions,,us-east-1, -,,,,,,,,,,,,,,,,,Crypto Tools,AWS,x,,,,,,,,,,No SDK support -,,,,,,,,,,,,,,,,,Cryptographic Services Overview,AWS,x,,,,,,,,,,No SDK support -dataexchange,dataexchange,dataexchange,dataexchange,,dataexchange,,,DataExchange,DataExchange,,1,,,aws_dataexchange_,,dataexchange_,Data Exchange,AWS,,,,,,,DataExchange,ListDataSets,,, -datapipeline,datapipeline,datapipeline,datapipeline,,datapipeline,,,DataPipeline,DataPipeline,,1,,,aws_datapipeline_,,datapipeline_,Data Pipeline,AWS,,,,,,,Data Pipeline,ListPipelines,,, -datasync,datasync,datasync,datasync,,datasync,,,DataSync,DataSync,,,2,,aws_datasync_,,datasync_,DataSync,AWS,,,,,,,DataSync,ListAgents,,, -datazone,datazone,datazone,datazone,,datazone,,,DataZone,DataZone,,,2,,aws_datazone_,,datazone_,DataZone,Amazon,,,,,,,DataZone,ListDomains,,, -,,,,,,,,,,,,,,,,,Deep Learning AMIs,AWS,x,,,,,,,,,,No SDK support -,,,,,,,,,,,,,,,,,Deep Learning Containers,AWS,x,,,,,,,,,,No SDK support -,,,,,,,,,,,,,,,,,DeepComposer,AWS,x,,,,,,,,,,No SDK support -,,,,,,,,,,,,,,,,,DeepLens,AWS,x,,,,,,,,,,No SDK support -,,,,,,,,,,,,,,,,,DeepRacer,AWS,x,,,,,,,,,,No SDK support -detective,detective,detective,detective,,detective,,,Detective,Detective,,1,,,aws_detective_,,detective_,Detective,Amazon,,,,,,,Detective,ListGraphs,,, -devicefarm,devicefarm,devicefarm,devicefarm,,devicefarm,,,DeviceFarm,DeviceFarm,,,2,,aws_devicefarm_,,devicefarm_,Device Farm,AWS,,,,,,,Device Farm,ListDeviceInstances,,, -devops-guru,devopsguru,devopsguru,devopsguru,,devopsguru,,,DevOpsGuru,DevOpsGuru,,,2,,aws_devopsguru_,,devopsguru_,DevOps Guru,Amazon,,,,,,,DevOps Guru,DescribeAccountHealth,,, -directconnect,directconnect,directconnect,directconnect,,directconnect,,,DirectConnect,DirectConnect,,1,,aws_dx_,aws_directconnect_,,dx_,Direct Connect,AWS,,,,,,,Direct Connect,DescribeConnections,,, -dlm,dlm,dlm,dlm,,dlm,,,DLM,DLM,,,2,,aws_dlm_,,dlm_,DLM (Data Lifecycle Manager),Amazon,,,,,,,DLM,GetLifecyclePolicies,,, -dms,dms,databasemigrationservice,databasemigrationservice,,dms,,databasemigration;databasemigrationservice,DMS,DatabaseMigrationService,,1,,,aws_dms_,,dms_,DMS (Database Migration),AWS,,,,,,,Database Migration Service,DescribeCertificates,,, -docdb,docdb,docdb,docdb,,docdb,,,DocDB,DocDB,,,2,,aws_docdb_,,docdb_,DocumentDB,Amazon,,,,,,,DocDB,DescribeDBClusters,,, -docdb-elastic,docdbelastic,docdbelastic,docdbelastic,,docdbelastic,,,DocDBElastic,DocDBElastic,,,2,,aws_docdbelastic_,,docdbelastic_,DocumentDB Elastic,Amazon,,,,,,,DocDB Elastic,ListClusters,,, -drs,drs,drs,drs,,drs,,,DRS,Drs,,,2,,aws_drs_,,drs_,DRS (Elastic Disaster Recovery),AWS,,,,,,,DRS,DescribeJobs,,, -ds,ds,directoryservice,directoryservice,,ds,,directoryservice,DS,DirectoryService,,1,2,aws_directory_service_,aws_ds_,,directory_service_,Directory Service,AWS,,,,,,,Directory Service,DescribeDirectories,,, -dynamodb,dynamodb,dynamodb,dynamodb,,dynamodb,,,DynamoDB,DynamoDB,x,,2,,aws_dynamodb_,,dynamodb_,DynamoDB,Amazon,,,,,AWS_DYNAMODB_ENDPOINT,TF_AWS_DYNAMODB_ENDPOINT,DynamoDB,ListTables,,, -dax,dax,dax,dax,,dax,,,DAX,DAX,,,2,,aws_dax_,,dax_,DynamoDB Accelerator (DAX),Amazon,,,,,,,DAX,DescribeClusters,,, -dynamodbstreams,dynamodbstreams,dynamodbstreams,dynamodbstreams,,dynamodbstreams,,,DynamoDBStreams,DynamoDBStreams,,1,,,aws_dynamodbstreams_,,dynamodbstreams_,DynamoDB Streams,Amazon,,x,,,,,DynamoDB Streams,,,, -,,,,,ec2ebs,ec2,,EC2EBS,,,,,aws_(ebs_|volume_attach|snapshot_create),aws_ec2ebs_,ebs_,ebs_;volume_attachment;snapshot_,EBS (EC2),Amazon,x,,,x,,,,,,,Part of EC2 -ebs,ebs,ebs,ebs,,ebs,,,EBS,EBS,,1,,,aws_ebs_,,changewhenimplemented,EBS (Elastic Block Store),Amazon,,x,,,,,EBS,,,, -ec2,ec2,ec2,ec2,,ec2,ec2,,EC2,EC2,x,1,2,aws_(ami|availability_zone|ec2_(availability|capacity|fleet|host|instance|public_ipv4_pool|serial|spot|tag)|eip|instance|key_pair|launch_template|placement_group|spot),aws_ec2_,ec2_,ami;availability_zone;ec2_availability_;ec2_capacity_;ec2_fleet;ec2_host;ec2_image_;ec2_instance_;ec2_public_ipv4_pool;ec2_serial_;ec2_spot_;ec2_tag;eip;instance;key_pair;launch_template;placement_group;spot_,EC2 (Elastic Compute Cloud),Amazon,,,,,,,EC2,DescribeVpcs,,, -imagebuilder,imagebuilder,imagebuilder,imagebuilder,,imagebuilder,,,ImageBuilder,Imagebuilder,,1,,,aws_imagebuilder_,,imagebuilder_,EC2 Image Builder,Amazon,,,,,,,imagebuilder,ListImages,,, -ec2-instance-connect,ec2instanceconnect,ec2instanceconnect,ec2instanceconnect,,ec2instanceconnect,,,EC2InstanceConnect,EC2InstanceConnect,,1,,,aws_ec2instanceconnect_,,ec2instanceconnect_,EC2 Instance Connect,AWS,,x,,,,,EC2 Instance Connect,,,, -ecr,ecr,ecr,ecr,,ecr,,,ECR,ECR,,,2,,aws_ecr_,,ecr_,ECR (Elastic Container Registry),Amazon,,,,,,,ECR,DescribeRepositories,,, -ecr-public,ecrpublic,ecrpublic,ecrpublic,,ecrpublic,,,ECRPublic,ECRPublic,,,2,,aws_ecrpublic_,,ecrpublic_,ECR Public,Amazon,,,,,,,ECR PUBLIC,DescribeRepositories,,, -ecs,ecs,ecs,ecs,,ecs,,,ECS,ECS,,1,2,,aws_ecs_,,ecs_,ECS (Elastic Container),Amazon,,,,,,,ECS,ListClusters,,, -efs,efs,efs,efs,,efs,,,EFS,EFS,,1,,,aws_efs_,,efs_,EFS (Elastic File System),Amazon,,,,,,,EFS,DescribeFileSystems,,, -eks,eks,eks,eks,,eks,,,EKS,EKS,,,2,,aws_eks_,,eks_,EKS (Elastic Kubernetes),Amazon,,,,,,,EKS,ListClusters,,, -elasticbeanstalk,elasticbeanstalk,elasticbeanstalk,elasticbeanstalk,,elasticbeanstalk,,beanstalk,ElasticBeanstalk,ElasticBeanstalk,,,2,aws_elastic_beanstalk_,aws_elasticbeanstalk_,,elastic_beanstalk_,Elastic Beanstalk,AWS,,,,,,,Elastic Beanstalk,ListAvailableSolutionStacks,,, -elastic-inference,elasticinference,elasticinference,elasticinference,,elasticinference,,,ElasticInference,ElasticInference,,1,,,aws_elasticinference_,,elasticinference_,Elastic Inference,Amazon,,x,,,,,Elastic Inference,,,, -elastictranscoder,elastictranscoder,elastictranscoder,elastictranscoder,,elastictranscoder,,,ElasticTranscoder,ElasticTranscoder,,1,,,aws_elastictranscoder_,,elastictranscoder_,Elastic Transcoder,Amazon,,,,,,,Elastic Transcoder,ListPipelines,,, -elasticache,elasticache,elasticache,elasticache,,elasticache,,,ElastiCache,ElastiCache,,1,2,,aws_elasticache_,,elasticache_,ElastiCache,Amazon,,,,,,,ElastiCache,DescribeCacheClusters,,, -es,es,elasticsearchservice,elasticsearchservice,elasticsearch,es,,es;elasticsearchservice,Elasticsearch,ElasticsearchService,,1,,aws_elasticsearch_,aws_es_,,elasticsearch_,Elasticsearch,Amazon,,,,,,,Elasticsearch Service,ListDomainNames,,, -elbv2,elbv2,elbv2,elasticloadbalancingv2,,elbv2,,elasticloadbalancingv2,ELBV2,ELBV2,,1,2,aws_a?lb(\b|_listener|_target_group|s|_trust_store),aws_elbv2_,,lbs?\.;lb_listener;lb_target_group;lb_hosted;lb_trust_store,ELB (Elastic Load Balancing),,,,,,,,Elastic Load Balancing v2,DescribeLoadBalancers,,, -elb,elb,elb,elasticloadbalancing,,elb,,elasticloadbalancing,ELB,ELB,,1,,aws_(app_cookie_stickiness_policy|elb|lb_cookie_stickiness_policy|lb_ssl_negotiation_policy|load_balancer_|proxy_protocol_policy),aws_elb_,,app_cookie_stickiness_policy;elb;lb_cookie_stickiness_policy;lb_ssl_negotiation_policy;load_balancer;proxy_protocol_policy,ELB Classic,,,,,,,,Elastic Load Balancing,DescribeLoadBalancers,,, -mediaconnect,mediaconnect,mediaconnect,mediaconnect,,mediaconnect,,,MediaConnect,MediaConnect,,,2,,aws_mediaconnect_,,mediaconnect_,Elemental MediaConnect,AWS,,,,,,,MediaConnect,ListBridges,,, -mediaconvert,mediaconvert,mediaconvert,mediaconvert,,mediaconvert,,,MediaConvert,MediaConvert,,,2,aws_media_convert_,aws_mediaconvert_,,media_convert_,Elemental MediaConvert,AWS,,,,,,,MediaConvert,ListJobs,,, -medialive,medialive,medialive,medialive,,medialive,,,MediaLive,MediaLive,,,2,,aws_medialive_,,medialive_,Elemental MediaLive,AWS,,,,,,,MediaLive,ListOfferings,,, -mediapackage,mediapackage,mediapackage,mediapackage,,mediapackage,,,MediaPackage,MediaPackage,,,2,aws_media_package_,aws_mediapackage_,,media_package_,Elemental MediaPackage,AWS,,,,,,,MediaPackage,ListChannels,,, -mediapackage-vod,mediapackagevod,mediapackagevod,mediapackagevod,,mediapackagevod,,,MediaPackageVOD,MediaPackageVod,,1,,,aws_mediapackagevod_,,mediapackagevod_,Elemental MediaPackage VOD,AWS,,x,,,,,MediaPackage Vod,,,, -mediastore,mediastore,mediastore,mediastore,,mediastore,,,MediaStore,MediaStore,,,2,aws_media_store_,aws_mediastore_,,media_store_,Elemental MediaStore,AWS,,,,,,,MediaStore,ListContainers,,, -mediastore-data,mediastoredata,mediastoredata,mediastoredata,,mediastoredata,,,MediaStoreData,MediaStoreData,,1,,,aws_mediastoredata_,,mediastoredata_,Elemental MediaStore Data,AWS,,x,,,,,MediaStore Data,,,, -mediatailor,mediatailor,mediatailor,mediatailor,,mediatailor,,,MediaTailor,MediaTailor,,1,,,aws_mediatailor_,,media_tailor_,Elemental MediaTailor,AWS,,x,,,,,MediaTailor,,,, -,,,,,,,,,,,,,,,,,Elemental On-Premises,AWS,x,,,,,,,,,,No SDK support -emr,emr,emr,emr,,emr,,,EMR,EMR,,1,2,,aws_emr_,,emr_,EMR,Amazon,,,,,,,EMR,ListClusters,,, -emr-containers,emrcontainers,emrcontainers,emrcontainers,,emrcontainers,,,EMRContainers,EMRContainers,,1,,,aws_emrcontainers_,,emrcontainers_,EMR Containers,Amazon,,,,,,,EMR containers,ListVirtualClusters,,, -emr-serverless,emrserverless,emrserverless,emrserverless,,emrserverless,,,EMRServerless,EMRServerless,,,2,,aws_emrserverless_,,emrserverless_,EMR Serverless,Amazon,,,,,,,EMR Serverless,ListApplications,,, -,,,,,,,,,,,,,,,,,End-of-Support Migration Program (EMP) for Windows Server,AWS,x,,,,,,,,,,No SDK support -events,events,eventbridge,eventbridge,,events,,eventbridge;cloudwatchevents,Events,EventBridge,,,2,aws_cloudwatch_event_,aws_events_,,cloudwatch_event_,EventBridge,Amazon,,,,,,,EventBridge,ListEventBuses,,, -schemas,schemas,schemas,schemas,,schemas,,,Schemas,Schemas,x,,2,,aws_schemas_,,schemas_,EventBridge Schemas,Amazon,,,,,,,schemas,ListRegistries,,, -fis,fis,fis,fis,,fis,,,FIS,FIS,,,2,,aws_fis_,,fis_,FIS (Fault Injection Simulator),AWS,,,,,,,fis,ListExperiments,,, -finspace,finspace,finspace,finspace,,finspace,,,FinSpace,Finspace,,,2,,aws_finspace_,,finspace_,FinSpace,Amazon,,,,,,,finspace,ListEnvironments,,, -finspace-data,finspacedata,finspacedata,finspacedata,,finspacedata,,,FinSpaceData,FinSpaceData,,1,,,aws_finspacedata_,,finspacedata_,FinSpace Data,Amazon,,x,,,,,finspace data,,,, -fms,fms,fms,fms,,fms,,,FMS,FMS,x,,2,,aws_fms_,,fms_,FMS (Firewall Manager),AWS,,,,,,,FMS,ListAppsLists,MaxResults: aws_sdkv2.Int32(1),, -forecast,forecast,forecastservice,forecast,,forecast,,forecastservice,Forecast,ForecastService,,1,,,aws_forecast_,,forecast_,Forecast,Amazon,,x,,,,,forecast,,,, -forecastquery,forecastquery,forecastqueryservice,forecastquery,,forecastquery,,forecastqueryservice,ForecastQuery,ForecastQueryService,,1,,,aws_forecastquery_,,forecastquery_,Forecast Query,Amazon,,x,,,,,forecastquery,,,, -frauddetector,frauddetector,frauddetector,frauddetector,,frauddetector,,,FraudDetector,FraudDetector,,1,,,aws_frauddetector_,,frauddetector_,Fraud Detector,Amazon,,x,,,,,FraudDetector,,,, -,,,,,,,,,,,,,,,,,FreeRTOS,,x,,,,,,,,,,No SDK support -fsx,fsx,fsx,fsx,,fsx,,,FSx,FSx,,1,,,aws_fsx_,,fsx_,FSx,Amazon,,,,,,,FSx,DescribeFileSystems,,, -gamelift,gamelift,gamelift,gamelift,,gamelift,,,GameLift,GameLift,,1,,,aws_gamelift_,,gamelift_,GameLift,Amazon,,,,,,,GameLift,ListGameServerGroups,,, -globalaccelerator,globalaccelerator,globalaccelerator,globalaccelerator,,globalaccelerator,,,GlobalAccelerator,GlobalAccelerator,x,,2,,aws_globalaccelerator_,,globalaccelerator_,Global Accelerator,AWS,,,,,,,Global Accelerator,ListAccelerators,,us-west-2, -glue,glue,glue,glue,,glue,,,Glue,Glue,,1,,,aws_glue_,,glue_,Glue,AWS,,,,,,,Glue,ListRegistries,,, -databrew,databrew,gluedatabrew,databrew,,databrew,,gluedatabrew,DataBrew,GlueDataBrew,,1,,,aws_databrew_,,databrew_,Glue DataBrew,AWS,,x,,,,,DataBrew,,,, -groundstation,groundstation,groundstation,groundstation,,groundstation,,,GroundStation,GroundStation,,,2,,aws_groundstation_,,groundstation_,Ground Station,AWS,,,,,,,GroundStation,ListConfigs,,, -guardduty,guardduty,guardduty,guardduty,,guardduty,,,GuardDuty,GuardDuty,,1,2,,aws_guardduty_,,guardduty_,GuardDuty,Amazon,,,,,,,GuardDuty,ListDetectors,,, -health,health,health,health,,health,,,Health,Health,,1,,,aws_health_,,health_,Health,AWS,,x,,,,,Health,,,, -healthlake,healthlake,healthlake,healthlake,,healthlake,,,HealthLake,HealthLake,,,2,,aws_healthlake_,,healthlake_,HealthLake,Amazon,,,,,,,HealthLake,ListFHIRDatastores,,, -honeycode,honeycode,honeycode,honeycode,,honeycode,,,Honeycode,Honeycode,,1,,,aws_honeycode_,,honeycode_,Honeycode,Amazon,,x,,,,,Honeycode,,,, -iam,iam,iam,iam,,iam,,,IAM,IAM,,,2,,aws_iam_,,iam_,IAM (Identity & Access Management),AWS,,,,,AWS_IAM_ENDPOINT,TF_AWS_IAM_ENDPOINT,IAM,ListRoles,,, -inspector,inspector,inspector,inspector,,inspector,,,Inspector,Inspector,,1,,,aws_inspector_,,inspector_,Inspector Classic,Amazon,,,,,,,Inspector,ListRulesPackages,,, -inspector2,inspector2,inspector2,inspector2,,inspector2,,inspectorv2,Inspector2,Inspector2,,,2,,aws_inspector2_,,inspector2_,Inspector,Amazon,,,,,,,Inspector2,ListAccountPermissions,,, -iot1click-devices,iot1clickdevices,iot1clickdevicesservice,iot1clickdevicesservice,,iot1clickdevices,,iot1clickdevicesservice,IoT1ClickDevices,IoT1ClickDevicesService,,1,,,aws_iot1clickdevices_,,iot1clickdevices_,IoT 1-Click Devices,AWS,,x,,,,,IoT 1Click Devices Service,,,, -iot1click-projects,iot1clickprojects,iot1clickprojects,iot1clickprojects,,iot1clickprojects,,,IoT1ClickProjects,IoT1ClickProjects,,1,,,aws_iot1clickprojects_,,iot1clickprojects_,IoT 1-Click Projects,AWS,,x,,,,,IoT 1Click Projects,,,, -iotanalytics,iotanalytics,iotanalytics,iotanalytics,,iotanalytics,,,IoTAnalytics,IoTAnalytics,,1,,,aws_iotanalytics_,,iotanalytics_,IoT Analytics,AWS,,,,,,,IoTAnalytics,ListChannels,,, -iot,iot,iot,iot,,iot,,,IoT,IoT,,1,,,aws_iot_,,iot_,IoT Core,AWS,,,,,,,IoT,DescribeDefaultAuthorizer,,, -iot-data,iotdata,iotdataplane,iotdataplane,,iotdata,,iotdataplane,IoTData,IoTDataPlane,,1,,,aws_iotdata_,,iotdata_,IoT Data Plane,AWS,,x,,,,,IoT Data Plane,,,, -,,,,,,,,,,,,,,,,,IoT Device Defender,AWS,x,,,,,,,,,,Part of IoT -iotdeviceadvisor,iotdeviceadvisor,iotdeviceadvisor,iotdeviceadvisor,,iotdeviceadvisor,,,IoTDeviceAdvisor,IoTDeviceAdvisor,,1,,,aws_iotdeviceadvisor_,,iotdeviceadvisor_,IoT Device Management,AWS,,x,,,,,IotDeviceAdvisor,,,, -iotevents,iotevents,iotevents,iotevents,,iotevents,,,IoTEvents,IoTEvents,,1,,,aws_iotevents_,,iotevents_,IoT Events,AWS,,,,,,,IoT Events,ListAlarmModels,,, -iotevents-data,ioteventsdata,ioteventsdata,ioteventsdata,,ioteventsdata,,,IoTEventsData,IoTEventsData,,1,,,aws_ioteventsdata_,,ioteventsdata_,IoT Events Data,AWS,,x,,,,,IoT Events Data,,,, -,,,,,,,,,,,,,,,,,IoT ExpressLink,AWS,x,,,,,,,,,,No SDK support -iotfleethub,iotfleethub,iotfleethub,iotfleethub,,iotfleethub,,,IoTFleetHub,IoTFleetHub,,1,,,aws_iotfleethub_,,iotfleethub_,IoT Fleet Hub,AWS,,x,,,,,IoTFleetHub,,,, -,,,,,,,,,,,,,,,,,IoT FleetWise,AWS,x,,,,,,IoTFleetWise,,,,No SDK support -greengrass,greengrass,greengrass,greengrass,,greengrass,,,Greengrass,Greengrass,,1,,,aws_greengrass_,,greengrass_,IoT Greengrass,AWS,,,,,,,Greengrass,ListGroups,,, -greengrassv2,greengrassv2,greengrassv2,greengrassv2,,greengrassv2,,,GreengrassV2,GreengrassV2,,1,,,aws_greengrassv2_,,greengrassv2_,IoT Greengrass V2,AWS,,x,,,,,GreengrassV2,,,, -iot-jobs-data,iotjobsdata,iotjobsdataplane,iotjobsdataplane,,iotjobsdata,,iotjobsdataplane,IoTJobsData,IoTJobsDataPlane,,1,,,aws_iotjobsdata_,,iotjobsdata_,IoT Jobs Data Plane,AWS,,x,,,,,IoT Jobs Data Plane,,,, -,,,,,,,,,,,,,,,,,IoT RoboRunner,AWS,x,,,,,,,,,,No SDK support -iotsecuretunneling,iotsecuretunneling,iotsecuretunneling,iotsecuretunneling,,iotsecuretunneling,,,IoTSecureTunneling,IoTSecureTunneling,,1,,,aws_iotsecuretunneling_,,iotsecuretunneling_,IoT Secure Tunneling,AWS,,x,,,,,IoTSecureTunneling,,,, -iotsitewise,iotsitewise,iotsitewise,iotsitewise,,iotsitewise,,,IoTSiteWise,IoTSiteWise,,1,,,aws_iotsitewise_,,iotsitewise_,IoT SiteWise,AWS,,x,,,,,IoTSiteWise,,,, -iotthingsgraph,iotthingsgraph,iotthingsgraph,iotthingsgraph,,iotthingsgraph,,,IoTThingsGraph,IoTThingsGraph,,1,,,aws_iotthingsgraph_,,iotthingsgraph_,IoT Things Graph,AWS,,x,,,,,IoTThingsGraph,,,, -iottwinmaker,iottwinmaker,iottwinmaker,iottwinmaker,,iottwinmaker,,,IoTTwinMaker,IoTTwinMaker,,1,,,aws_iottwinmaker_,,iottwinmaker_,IoT TwinMaker,AWS,,x,,,,,IoTTwinMaker,,,, -iotwireless,iotwireless,iotwireless,iotwireless,,iotwireless,,,IoTWireless,IoTWireless,,1,,,aws_iotwireless_,,iotwireless_,IoT Wireless,AWS,,x,,,,,IoT Wireless,,,, -,,,,,,,,,,,,,,,,,IQ,AWS,x,,,,,,,,,,No SDK support -ivs,ivs,ivs,ivs,,ivs,,,IVS,IVS,,1,,,aws_ivs_,,ivs_,IVS (Interactive Video),Amazon,,,,,,,ivs,ListChannels,,, -ivschat,ivschat,ivschat,ivschat,,ivschat,,,IVSChat,Ivschat,,,2,,aws_ivschat_,,ivschat_,IVS (Interactive Video) Chat,Amazon,,,,,,,ivschat,ListRooms,,, -kendra,kendra,kendra,kendra,,kendra,,,Kendra,Kendra,,,2,,aws_kendra_,,kendra_,Kendra,Amazon,,,,,,,kendra,ListIndices,,, -keyspaces,keyspaces,keyspaces,keyspaces,,keyspaces,,,Keyspaces,Keyspaces,,,2,,aws_keyspaces_,,keyspaces_,Keyspaces (for Apache Cassandra),Amazon,,,,,,,Keyspaces,ListKeyspaces,,, -kinesis,kinesis,kinesis,kinesis,,kinesis,,,Kinesis,Kinesis,x,,2,aws_kinesis_stream,aws_kinesis_,,kinesis_stream;kinesis_resource_policy,Kinesis,Amazon,,,,,,,Kinesis,ListStreams,,, -kinesisanalytics,kinesisanalytics,kinesisanalytics,kinesisanalytics,,kinesisanalytics,,,KinesisAnalytics,KinesisAnalytics,,1,,aws_kinesis_analytics_,aws_kinesisanalytics_,,kinesis_analytics_,Kinesis Analytics,Amazon,,,,,,,Kinesis Analytics,ListApplications,,, -kinesisanalyticsv2,kinesisanalyticsv2,kinesisanalyticsv2,kinesisanalyticsv2,,kinesisanalyticsv2,,,KinesisAnalyticsV2,KinesisAnalyticsV2,,1,,,aws_kinesisanalyticsv2_,,kinesisanalyticsv2_,Kinesis Analytics V2,Amazon,,,,,,,Kinesis Analytics V2,ListApplications,,, -firehose,firehose,firehose,firehose,,firehose,,,Firehose,Firehose,,,2,aws_kinesis_firehose_,aws_firehose_,,kinesis_firehose_,Kinesis Firehose,Amazon,,,,,,,Firehose,ListDeliveryStreams,,, -kinesisvideo,kinesisvideo,kinesisvideo,kinesisvideo,,kinesisvideo,,,KinesisVideo,KinesisVideo,,1,,,aws_kinesisvideo_,,kinesis_video_,Kinesis Video,Amazon,,,,,,,Kinesis Video,ListStreams,,, -kinesis-video-archived-media,kinesisvideoarchivedmedia,kinesisvideoarchivedmedia,kinesisvideoarchivedmedia,,kinesisvideoarchivedmedia,,,KinesisVideoArchivedMedia,KinesisVideoArchivedMedia,,1,,,aws_kinesisvideoarchivedmedia_,,kinesisvideoarchivedmedia_,Kinesis Video Archived Media,Amazon,,x,,,,,Kinesis Video Archived Media,,,, -kinesis-video-media,kinesisvideomedia,kinesisvideomedia,kinesisvideomedia,,kinesisvideomedia,,,KinesisVideoMedia,KinesisVideoMedia,,1,,,aws_kinesisvideomedia_,,kinesisvideomedia_,Kinesis Video Media,Amazon,,x,,,,,Kinesis Video Media,,,, -kinesis-video-signaling,kinesisvideosignaling,kinesisvideosignalingchannels,kinesisvideosignaling,,kinesisvideosignaling,,kinesisvideosignalingchannels,KinesisVideoSignaling,KinesisVideoSignalingChannels,,1,,,aws_kinesisvideosignaling_,,kinesisvideosignaling_,Kinesis Video Signaling,Amazon,,x,,,,,Kinesis Video Signaling,,,, -kms,kms,kms,kms,,kms,,,KMS,KMS,,,2,,aws_kms_,,kms_,KMS (Key Management),AWS,,,,,,,KMS,ListKeys,,, -lakeformation,lakeformation,lakeformation,lakeformation,,lakeformation,,,LakeFormation,LakeFormation,,,2,,aws_lakeformation_,,lakeformation_,Lake Formation,AWS,,,,,,,LakeFormation,ListResources,,, -lambda,lambda,lambda,lambda,,lambda,,,Lambda,Lambda,,,2,,aws_lambda_,,lambda_,Lambda,AWS,,,,,,,Lambda,ListFunctions,,, -launch-wizard,launchwizard,launchwizard,launchwizard,,launchwizard,,,LaunchWizard,LaunchWizard,,,2,,aws_launchwizard_,,launchwizard_,Launch Wizard,AWS,,,,,,,Launch Wizard,ListWorkloads,,, -lex-models,lexmodels,lexmodelbuildingservice,lexmodelbuildingservice,,lexmodels,,lexmodelbuilding;lexmodelbuildingservice;lex,LexModels,LexModelBuildingService,,1,,aws_lex_,aws_lexmodels_,,lex_,Lex Model Building,Amazon,,,,,,,Lex Model Building Service,GetBots,,, -lexv2-models,lexv2models,lexmodelsv2,lexmodelsv2,,lexv2models,,lexmodelsv2,LexV2Models,LexModelsV2,,,2,,aws_lexv2models_,,lexv2models_,Lex V2 Models,Amazon,,,,,,,Lex Models V2,ListBots,,, -lex-runtime,lexruntime,lexruntimeservice,lexruntimeservice,,lexruntime,,lexruntimeservice,LexRuntime,LexRuntimeService,,1,,,aws_lexruntime_,,lexruntime_,Lex Runtime,Amazon,,x,,,,,Lex Runtime Service,,,, -lexv2-runtime,lexv2runtime,lexruntimev2,lexruntimev2,,lexruntimev2,,lexv2runtime,LexRuntimeV2,LexRuntimeV2,,1,,,aws_lexruntimev2_,,lexruntimev2_,Lex Runtime V2,Amazon,,x,,,,,Lex Runtime V2,,,, -license-manager,licensemanager,licensemanager,licensemanager,,licensemanager,,,LicenseManager,LicenseManager,,1,,,aws_licensemanager_,,licensemanager_,License Manager,AWS,,,,,,,License Manager,ListLicenseConfigurations,,, -lightsail,lightsail,lightsail,lightsail,,lightsail,,,Lightsail,Lightsail,x,,2,,aws_lightsail_,,lightsail_,Lightsail,Amazon,,,,,,,Lightsail,GetInstances,,, -location,location,locationservice,location,,location,,locationservice,Location,LocationService,,1,,,aws_location_,,location_,Location,Amazon,,,,,,,Location,ListGeofenceCollections,,, -lookoutequipment,lookoutequipment,lookoutequipment,lookoutequipment,,lookoutequipment,,,LookoutEquipment,LookoutEquipment,,1,,,aws_lookoutequipment_,,lookoutequipment_,Lookout for Equipment,Amazon,,x,,,,,LookoutEquipment,,,, -lookoutmetrics,lookoutmetrics,lookoutmetrics,lookoutmetrics,,lookoutmetrics,,,LookoutMetrics,LookoutMetrics,,,2,,aws_lookoutmetrics_,,lookoutmetrics_,Lookout for Metrics,Amazon,,,,,,,LookoutMetrics,ListMetricSets,,, -lookoutvision,lookoutvision,lookoutforvision,lookoutvision,,lookoutvision,,lookoutforvision,LookoutVision,LookoutForVision,,1,,,aws_lookoutvision_,,lookoutvision_,Lookout for Vision,Amazon,,x,,,,,LookoutVision,,,, -,,,,,,,,,,,,,,,,,Lumberyard,Amazon,x,,,,,,,,,,No SDK support -machinelearning,machinelearning,machinelearning,machinelearning,,machinelearning,,,MachineLearning,MachineLearning,,1,,,aws_machinelearning_,,machinelearning_,Machine Learning,Amazon,,x,,,,,Machine Learning,,,, -macie2,macie2,macie2,macie2,,macie2,,,Macie2,Macie2,,1,,,aws_macie2_,,macie2_,Macie,Amazon,,,,,,,Macie2,ListFindings,,, -macie,macie,macie,macie,,macie,,,Macie,Macie,,1,,,aws_macie_,,macie_,Macie Classic,Amazon,,x,,,,,Macie,,,, -m2,m2,m2,m2,,m2,,,M2,M2,,,2,,aws_m2_,,m2_,Mainframe Modernization,AWS,,,,,,,m2,ListApplications,,, -managedblockchain,managedblockchain,managedblockchain,managedblockchain,,managedblockchain,,,ManagedBlockchain,ManagedBlockchain,,1,,,aws_managedblockchain_,,managedblockchain_,Managed Blockchain,Amazon,,x,,,,,ManagedBlockchain,,,, -grafana,grafana,managedgrafana,grafana,,grafana,,managedgrafana;amg,Grafana,ManagedGrafana,,1,,,aws_grafana_,,grafana_,Managed Grafana,Amazon,,,,,,,grafana,ListWorkspaces,,, -kafka,kafka,kafka,kafka,,kafka,,msk,Kafka,Kafka,x,,2,aws_msk_,aws_kafka_,,msk_,Managed Streaming for Kafka,Amazon,,,,,,,Kafka,ListClusters,,, -kafkaconnect,kafkaconnect,kafkaconnect,kafkaconnect,,kafkaconnect,,,KafkaConnect,KafkaConnect,,1,,aws_mskconnect_,aws_kafkaconnect_,,mskconnect_,Managed Streaming for Kafka Connect,Amazon,,,,,,,KafkaConnect,ListConnectors,,, -,,,,,,,,,,,,,,,,,Management Console,AWS,x,,,,,,,,,,No SDK support -marketplace-catalog,marketplacecatalog,marketplacecatalog,marketplacecatalog,,marketplacecatalog,,,MarketplaceCatalog,MarketplaceCatalog,,1,,,aws_marketplacecatalog_,,marketplace_catalog_,Marketplace Catalog,AWS,,x,,,,,Marketplace Catalog,,,, -marketplacecommerceanalytics,marketplacecommerceanalytics,marketplacecommerceanalytics,marketplacecommerceanalytics,,marketplacecommerceanalytics,,,MarketplaceCommerceAnalytics,MarketplaceCommerceAnalytics,,1,,,aws_marketplacecommerceanalytics_,,marketplacecommerceanalytics_,Marketplace Commerce Analytics,AWS,,x,,,,,Marketplace Commerce Analytics,,,, -marketplace-entitlement,marketplaceentitlement,marketplaceentitlementservice,marketplaceentitlementservice,,marketplaceentitlement,,marketplaceentitlementservice,MarketplaceEntitlement,MarketplaceEntitlementService,,1,,,aws_marketplaceentitlement_,,marketplaceentitlement_,Marketplace Entitlement,AWS,,x,,,,,Marketplace Entitlement Service,,,, -meteringmarketplace,meteringmarketplace,marketplacemetering,marketplacemetering,,marketplacemetering,,meteringmarketplace,MarketplaceMetering,MarketplaceMetering,,1,,,aws_marketplacemetering_,,marketplacemetering_,Marketplace Metering,AWS,,x,,,,,Marketplace Metering,,,, -memorydb,memorydb,memorydb,memorydb,,memorydb,,,MemoryDB,MemoryDB,,1,,,aws_memorydb_,,memorydb_,MemoryDB for Redis,Amazon,,,,,,,MemoryDB,DescribeClusters,,, -,,,,,meta,,,Meta,,,,,aws_(arn|billing_service_account|default_tags|ip_ranges|partition|regions?|service)$,aws_meta_,,arn;ip_ranges;billing_service_account;default_tags;partition;region;service\.,Meta Data Sources,,x,,,x,,,,,,,Not an AWS service (metadata) -mgh,mgh,migrationhub,migrationhub,,mgh,,migrationhub,MgH,MigrationHub,,1,,,aws_mgh_,,mgh_,MgH (Migration Hub),AWS,,x,,,,,Migration Hub,,,, -,,,,,,,,,,,,,,,,,Microservice Extractor for .NET,AWS,x,,,,,,,,,,No SDK support -migrationhub-config,migrationhubconfig,migrationhubconfig,migrationhubconfig,,migrationhubconfig,,,MigrationHubConfig,MigrationHubConfig,,1,,,aws_migrationhubconfig_,,migrationhubconfig_,Migration Hub Config,AWS,,x,,,,,MigrationHub Config,,,, -migration-hub-refactor-spaces,migrationhubrefactorspaces,migrationhubrefactorspaces,migrationhubrefactorspaces,,migrationhubrefactorspaces,,,MigrationHubRefactorSpaces,MigrationHubRefactorSpaces,,1,,,aws_migrationhubrefactorspaces_,,migrationhubrefactorspaces_,Migration Hub Refactor Spaces,AWS,,x,,,,,Migration Hub Refactor Spaces,,,, -migrationhubstrategy,migrationhubstrategy,migrationhubstrategyrecommendations,migrationhubstrategy,,migrationhubstrategy,,migrationhubstrategyrecommendations,MigrationHubStrategy,MigrationHubStrategyRecommendations,,1,,,aws_migrationhubstrategy_,,migrationhubstrategy_,Migration Hub Strategy,AWS,,x,,,,,MigrationHubStrategy,,,, -mobile,mobile,mobile,mobile,,mobile,,,Mobile,Mobile,,1,,,aws_mobile_,,mobile_,Mobile,AWS,,x,,,,,Mobile,,,, -,,mobileanalytics,,,,,,MobileAnalytics,MobileAnalytics,,,,,,,,Mobile Analytics,AWS,x,,,,,,,,,,Only in Go SDK v1 -,,,,,,,,,,,,,,,,,Mobile SDK for Unity,AWS,x,,,,,,,,,,No SDK support -,,,,,,,,,,,,,,,,,Mobile SDK for Xamarin,AWS,x,,,,,,,,,,No SDK support -,,,,,,,,,,,,,,,,,Monitron,Amazon,x,,,,,,,,,,No SDK support -mq,mq,mq,mq,,mq,,,MQ,MQ,,,2,,aws_mq_,,mq_,MQ,Amazon,,,,,,,mq,ListBrokers,,, -mturk,mturk,mturk,mturk,,mturk,,,MTurk,MTurk,,1,,,aws_mturk_,,mturk_,MTurk (Mechanical Turk),Amazon,,x,,,,,MTurk,,,, -mwaa,mwaa,mwaa,mwaa,,mwaa,,,MWAA,MWAA,,,2,,aws_mwaa_,,mwaa_,MWAA (Managed Workflows for Apache Airflow),Amazon,,,,,,,MWAA,ListEnvironments,,, -neptune,neptune,neptune,neptune,,neptune,,,Neptune,Neptune,,1,,,aws_neptune_,,neptune_,Neptune,Amazon,,,,,,,Neptune,DescribeDBClusters,,, -neptune-graph,neptunegraph,,neptunegraph,,neptunegraph,,,NeptuneGraph,,,,2,,aws_neptunegraph_,,neptunegraph_,Neptune Analytics,Amazon,,,,,,,Neptune Graph,ListGraphs,,, -network-firewall,networkfirewall,networkfirewall,networkfirewall,,networkfirewall,,,NetworkFirewall,NetworkFirewall,,1,,,aws_networkfirewall_,,networkfirewall_,Network Firewall,AWS,,,,,,,Network Firewall,ListFirewalls,,, -networkmanager,networkmanager,networkmanager,networkmanager,,networkmanager,,,NetworkManager,NetworkManager,,1,,,aws_networkmanager_,,networkmanager_,Network Manager,AWS,,,,,,,NetworkManager,ListCoreNetworks,,, -,,,,,,,,,,,,,,,,,NICE DCV,,x,,,,,,,,,,No SDK support -nimble,nimble,nimblestudio,nimble,,nimble,,nimblestudio,Nimble,NimbleStudio,,1,,,aws_nimble_,,nimble_,Nimble Studio,Amazon,,x,,,,,nimble,,,, -oam,oam,oam,oam,,oam,,cloudwatchobservabilityaccessmanager,ObservabilityAccessManager,OAM,,,2,,aws_oam_,,oam_,CloudWatch Observability Access Manager,Amazon,,,,,,,OAM,ListLinks,,, -opensearch,opensearch,opensearchservice,opensearch,,opensearch,,opensearchservice,OpenSearch,OpenSearchService,,1,,,aws_opensearch_,,opensearch_,OpenSearch,Amazon,,,,,,,OpenSearch,ListDomainNames,,, -opensearchserverless,opensearchserverless,opensearchserverless,opensearchserverless,,opensearchserverless,,,OpenSearchServerless,OpenSearchServerless,,,2,,aws_opensearchserverless_,,opensearchserverless_,OpenSearch Serverless,Amazon,,,,,,,OpenSearchServerless,ListCollections,,, -osis,osis,osis,osis,,osis,,opensearchingestion,OpenSearchIngestion,OSIS,,,2,,aws_osis_,,osis_,OpenSearch Ingestion,Amazon,,,,,,,OSIS,ListPipelines,,, -opsworks,opsworks,opsworks,opsworks,,opsworks,,,OpsWorks,OpsWorks,,1,,,aws_opsworks_,,opsworks_,OpsWorks,AWS,,,,,,,OpsWorks,DescribeApps,,, -opsworks-cm,opsworkscm,opsworkscm,opsworkscm,,opsworkscm,,,OpsWorksCM,OpsWorksCM,,1,,,aws_opsworkscm_,,opsworkscm_,OpsWorks CM,AWS,,x,,,,,OpsWorksCM,,,, -organizations,organizations,organizations,organizations,,organizations,,,Organizations,Organizations,x,,2,,aws_organizations_,,organizations_,Organizations,AWS,,,,,,,Organizations,ListAccounts,,, -outposts,outposts,outposts,outposts,,outposts,,,Outposts,Outposts,,1,,,aws_outposts_,,outposts_,Outposts,AWS,,,,,,,Outposts,ListSites,,, -,,,,,ec2outposts,ec2,,EC2Outposts,,,,,aws_ec2_(coip_pool|local_gateway),aws_ec2outposts_,outposts_,ec2_coip_pool;ec2_local_gateway,Outposts (EC2),AWS,x,,,x,,,,,,,Part of EC2 -panorama,panorama,panorama,panorama,,panorama,,,Panorama,Panorama,,1,,,aws_panorama_,,panorama_,Panorama,AWS,,x,,,,,Panorama,,,, -,,,,,,,,,,,,,,,,,ParallelCluster,AWS,x,,,,,,,,,,No SDK support -payment-cryptography,paymentcryptography,paymentcryptography,paymentcryptography,,paymentcryptography,,,PaymentCryptography,PaymentCryptography,,,2,,aws_paymentcryptography_,,paymentcryptography_,Payment Cryptography Control Plane,AWS,,,,,,,PaymentCryptography,ListKeys,,, -pca-connector-ad,pcaconnectorad,pcaconnectorad,pcaconnectorad,,pcaconnectorad,,,PCAConnectorAD,PcaConnectorAd,,,2,,aws_pcaconnectorad_,,pcaconnectorad_,Private CA Connector for Active Directory,AWS,,,,,,,Pca Connector Ad,ListConnectors,,, -personalize,personalize,personalize,personalize,,personalize,,,Personalize,Personalize,,1,,,aws_personalize_,,personalize_,Personalize,Amazon,,x,,,,,Personalize,,,, -personalize-events,personalizeevents,personalizeevents,personalizeevents,,personalizeevents,,,PersonalizeEvents,PersonalizeEvents,,1,,,aws_personalizeevents_,,personalizeevents_,Personalize Events,Amazon,,x,,,,,Personalize Events,,,, -personalize-runtime,personalizeruntime,personalizeruntime,personalizeruntime,,personalizeruntime,,,PersonalizeRuntime,PersonalizeRuntime,,1,,,aws_personalizeruntime_,,personalizeruntime_,Personalize Runtime,Amazon,,x,,,,,Personalize Runtime,,,, -pinpoint,pinpoint,pinpoint,pinpoint,,pinpoint,,,Pinpoint,Pinpoint,,1,,,aws_pinpoint_,,pinpoint_,Pinpoint,Amazon,,,,,,,Pinpoint,GetApps,,, -pinpoint-email,pinpointemail,pinpointemail,pinpointemail,,pinpointemail,,,PinpointEmail,PinpointEmail,,1,,,aws_pinpointemail_,,pinpointemail_,Pinpoint Email,Amazon,,x,,,,,Pinpoint Email,,,, -pinpoint-sms-voice,pinpointsmsvoice,pinpointsmsvoice,pinpointsmsvoice,,pinpointsmsvoice,,,PinpointSMSVoice,PinpointSMSVoice,,1,,,aws_pinpointsmsvoice_,,pinpointsmsvoice_,Pinpoint SMS and Voice,Amazon,,x,,,,,Pinpoint SMS Voice,,,, -pipes,pipes,pipes,pipes,,pipes,,,Pipes,Pipes,,,2,,aws_pipes_,,pipes_,EventBridge Pipes,Amazon,,,,,,,Pipes,ListPipes,,, -polly,polly,polly,polly,,polly,,,Polly,Polly,,,2,,aws_polly_,,polly_,Polly,Amazon,,,,,,,Polly,ListLexicons,,, -,,,,,,,,,,,,,,,,,Porting Assistant for .NET,,x,,,,,,,,,,No SDK support -pricing,pricing,pricing,pricing,,pricing,,,Pricing,Pricing,,,2,,aws_pricing_,,pricing_,Pricing Calculator,AWS,,,,,,,Pricing,DescribeServices,,, -proton,proton,proton,proton,,proton,,,Proton,Proton,,1,,,aws_proton_,,proton_,Proton,AWS,,x,,,,,Proton,,,, -qbusiness,qbusiness,qbusiness,qbusiness,,qbusiness,,,QBusiness,QBusiness,,,2,,aws_qbusiness_,,qbusiness_,Amazon Q Business,Amazon,,,,,,,QBusiness,ListApplications,,, -qldb,qldb,qldb,qldb,,qldb,,,QLDB,QLDB,,,2,,aws_qldb_,,qldb_,QLDB (Quantum Ledger Database),Amazon,,,,,,,QLDB,ListLedgers,,, -qldb-session,qldbsession,qldbsession,qldbsession,,qldbsession,,,QLDBSession,QLDBSession,,1,,,aws_qldbsession_,,qldbsession_,QLDB Session,Amazon,,x,,,,,QLDB Session,,,, -quicksight,quicksight,quicksight,quicksight,,quicksight,,,QuickSight,QuickSight,,1,,,aws_quicksight_,,quicksight_,QuickSight,Amazon,,,,,,,QuickSight,ListDashboards,"AwsAccountId: aws_sdkv1.String(""123456789012"")",, -ram,ram,ram,ram,,ram,,,RAM,RAM,,,2,,aws_ram_,,ram_,RAM (Resource Access Manager),AWS,,,,,,,RAM,ListPermissions,,, -rds,rds,rds,rds,,rds,,,RDS,RDS,,1,2,aws_(db_|rds_),aws_rds_,,rds_;db_,RDS (Relational Database),Amazon,,,,,,,RDS,DescribeDBInstances,,, -rds-data,rdsdata,rdsdataservice,rdsdata,,rdsdata,,rdsdataservice,RDSData,RDSDataService,,1,,,aws_rdsdata_,,rdsdata_,RDS Data,Amazon,,x,,,,,RDS Data,,,, -pi,pi,pi,pi,,pi,,,PI,PI,,1,,,aws_pi_,,pi_,RDS Performance Insights (PI),Amazon,,x,,,,,PI,,,, -rbin,rbin,recyclebin,rbin,,rbin,,recyclebin,RBin,RecycleBin,,,2,,aws_rbin_,,rbin_,Recycle Bin (RBin),Amazon,,,,,,,rbin,ListRules,ResourceType: awstypes.ResourceTypeEc2Image,, -,,,,,,,,,,,,,,,,,Red Hat OpenShift Service on AWS (ROSA),AWS,x,,,,,,,,,,No SDK support -redshift,redshift,redshift,redshift,,redshift,,,Redshift,Redshift,,1,2,,aws_redshift_,,redshift_,Redshift,Amazon,,,,,,,Redshift,DescribeClusters,,, -redshift-data,redshiftdata,redshiftdataapiservice,redshiftdata,,redshiftdata,,redshiftdataapiservice,RedshiftData,RedshiftDataAPIService,,,2,,aws_redshiftdata_,,redshiftdata_,Redshift Data,Amazon,,,,,,,Redshift Data,ListDatabases,"Database: aws_sdkv2.String(""test"")",, -redshift-serverless,redshiftserverless,redshiftserverless,redshiftserverless,,redshiftserverless,,,RedshiftServerless,RedshiftServerless,,1,2,,aws_redshiftserverless_,,redshiftserverless_,Redshift Serverless,Amazon,,,,,,,Redshift Serverless,ListNamespaces,,, -rekognition,rekognition,rekognition,rekognition,,rekognition,,,Rekognition,Rekognition,,,2,,aws_rekognition_,,rekognition_,Rekognition,Amazon,,,,,,,Rekognition,ListCollections,,, -resiliencehub,resiliencehub,resiliencehub,resiliencehub,,resiliencehub,,,ResilienceHub,ResilienceHub,,1,,,aws_resiliencehub_,,resiliencehub_,Resilience Hub,AWS,,x,,,,,resiliencehub,,,, -resource-explorer-2,resourceexplorer2,resourceexplorer2,resourceexplorer2,,resourceexplorer2,,,ResourceExplorer2,ResourceExplorer2,,,2,,aws_resourceexplorer2_,,resourceexplorer2_,Resource Explorer,AWS,,,,,,,Resource Explorer 2,ListIndexes,,, -resource-groups,resourcegroups,resourcegroups,resourcegroups,,resourcegroups,,,ResourceGroups,ResourceGroups,,,2,,aws_resourcegroups_,,resourcegroups_,Resource Groups,AWS,,,,,,,Resource Groups,ListGroups,,, -resourcegroupstaggingapi,resourcegroupstaggingapi,resourcegroupstaggingapi,resourcegroupstaggingapi,,resourcegroupstaggingapi,,resourcegroupstagging,ResourceGroupsTaggingAPI,ResourceGroupsTaggingAPI,,,2,,aws_resourcegroupstaggingapi_,,resourcegroupstaggingapi_,Resource Groups Tagging,AWS,,,,,,,Resource Groups Tagging API,GetResources,,, -robomaker,robomaker,robomaker,robomaker,,robomaker,,,RoboMaker,RoboMaker,,1,,,aws_robomaker_,,robomaker_,RoboMaker,AWS,,x,,,,,RoboMaker,,,, -rolesanywhere,rolesanywhere,rolesanywhere,rolesanywhere,,rolesanywhere,,,RolesAnywhere,RolesAnywhere,,,2,,aws_rolesanywhere_,,rolesanywhere_,Roles Anywhere,AWS,,,,,,,RolesAnywhere,ListProfiles,,, -route53,route53,route53,route53,,route53,,,Route53,Route53,x,,2,aws_route53_(?!resolver_),aws_route53_,,route53_cidr_;route53_delegation_;route53_health_;route53_hosted_;route53_key_;route53_query_;route53_record;route53_traffic_;route53_vpc_;route53_zone,Route 53,Amazon,,,,,,,Route 53,ListHostedZones,,us-east-1, -route53domains,route53domains,route53domains,route53domains,,route53domains,,,Route53Domains,Route53Domains,x,,2,,aws_route53domains_,,route53domains_,Route 53 Domains,Amazon,,,,,,,Route 53 Domains,ListDomains,,us-east-1, -route53profiles,route53profiles,route53profiles,route53profiles,,route53profiles,,,Route53Profiles,Route53Profiles,,,2,,aws_route53profiles_,,route53profiles_,Route 53 Profiles,Amazon,,,,,,,Route 53 Profiles,ListProfiles,,, -route53-recovery-cluster,route53recoverycluster,route53recoverycluster,route53recoverycluster,,route53recoverycluster,,,Route53RecoveryCluster,Route53RecoveryCluster,,1,,,aws_route53recoverycluster_,,route53recoverycluster_,Route 53 Recovery Cluster,Amazon,,x,,,,,Route53 Recovery Cluster,,,, -route53-recovery-control-config,route53recoverycontrolconfig,route53recoverycontrolconfig,route53recoverycontrolconfig,,route53recoverycontrolconfig,,,Route53RecoveryControlConfig,Route53RecoveryControlConfig,x,1,,,aws_route53recoverycontrolconfig_,,route53recoverycontrolconfig_,Route 53 Recovery Control Config,Amazon,,,,,,,Route53 Recovery Control Config,ListClusters,,, -route53-recovery-readiness,route53recoveryreadiness,route53recoveryreadiness,route53recoveryreadiness,,route53recoveryreadiness,,,Route53RecoveryReadiness,Route53RecoveryReadiness,x,1,,,aws_route53recoveryreadiness_,,route53recoveryreadiness_,Route 53 Recovery Readiness,Amazon,,,,,,,Route53 Recovery Readiness,ListCells,,, -route53resolver,route53resolver,route53resolver,route53resolver,,route53resolver,,,Route53Resolver,Route53Resolver,,1,,aws_route53_resolver_,aws_route53resolver_,,route53_resolver_,Route 53 Resolver,Amazon,,,,,,,Route53Resolver,ListFirewallDomainLists,,, -s3api,s3api,s3,s3,,s3,,s3api,S3,S3,x,,2,aws_(canonical_user_id|s3_bucket|s3_object|s3_directory_bucket),aws_s3_,,s3_bucket;s3_directory_bucket;s3_object;canonical_user_id,S3 (Simple Storage),Amazon,,,,,AWS_S3_ENDPOINT,TF_AWS_S3_ENDPOINT,S3,ListBuckets,,, -s3control,s3control,s3control,s3control,,s3control,,,S3Control,S3Control,,,2,aws_(s3_account_|s3control_|s3_access_),aws_s3control_,,s3control;s3_account_;s3_access_,S3 Control,Amazon,,,,,,,S3 Control,ListJobs,,, -glacier,glacier,glacier,glacier,,glacier,,,Glacier,Glacier,,,2,,aws_glacier_,,glacier_,S3 Glacier,Amazon,,,,,,,Glacier,ListVaults,,, -s3outposts,s3outposts,s3outposts,s3outposts,,s3outposts,,,S3Outposts,S3Outposts,,1,,,aws_s3outposts_,,s3outposts_,S3 on Outposts,Amazon,,,,,,,S3Outposts,ListEndpoints,,, -sagemaker,sagemaker,sagemaker,sagemaker,,sagemaker,,,SageMaker,SageMaker,,1,,,aws_sagemaker_,,sagemaker_,SageMaker,Amazon,,,,,,,SageMaker,ListClusters,,, -sagemaker-a2i-runtime,sagemakera2iruntime,augmentedairuntime,sagemakera2iruntime,,sagemakera2iruntime,,augmentedairuntime,SageMakerA2IRuntime,AugmentedAIRuntime,,1,,,aws_sagemakera2iruntime_,,sagemakera2iruntime_,SageMaker A2I (Augmented AI),Amazon,,x,,,,,SageMaker A2I Runtime,,,, -sagemaker-edge,sagemakeredge,sagemakeredgemanager,sagemakeredge,,sagemakeredge,,sagemakeredgemanager,SageMakerEdge,SagemakerEdgeManager,,1,,,aws_sagemakeredge_,,sagemakeredge_,SageMaker Edge Manager,Amazon,,x,,,,,Sagemaker Edge,,,, -sagemaker-featurestore-runtime,sagemakerfeaturestoreruntime,sagemakerfeaturestoreruntime,sagemakerfeaturestoreruntime,,sagemakerfeaturestoreruntime,,,SageMakerFeatureStoreRuntime,SageMakerFeatureStoreRuntime,,1,,,aws_sagemakerfeaturestoreruntime_,,sagemakerfeaturestoreruntime_,SageMaker Feature Store Runtime,Amazon,,x,,,,,SageMaker FeatureStore Runtime,,,, -sagemaker-runtime,sagemakerruntime,sagemakerruntime,sagemakerruntime,,sagemakerruntime,,,SageMakerRuntime,SageMakerRuntime,,1,,,aws_sagemakerruntime_,,sagemakerruntime_,SageMaker Runtime,Amazon,,x,,,,,SageMaker Runtime,,,, -,,,,,,,,,,,,,,,,,SAM (Serverless Application Model),AWS,x,,,,,,,,,,No SDK support -savingsplans,savingsplans,savingsplans,savingsplans,,savingsplans,,,SavingsPlans,SavingsPlans,,1,,,aws_savingsplans_,,savingsplans_,Savings Plans,AWS,,x,,,,,savingsplans,,,, -,,,,,,,,,,,,,,,,,Schema Conversion Tool,AWS,x,,,,,,,,,,No SDK support -sdb,sdb,simpledb,,simpledb,sdb,,sdb,SimpleDB,SimpleDB,,1,,aws_simpledb_,aws_sdb_,,simpledb_,SDB (SimpleDB),Amazon,,,,,,,SimpleDB,ListDomains,,, -scheduler,scheduler,scheduler,scheduler,,scheduler,,,Scheduler,Scheduler,,,2,,aws_scheduler_,,scheduler_,EventBridge Scheduler,Amazon,,,,,,,Scheduler,ListSchedules,,, -secretsmanager,secretsmanager,secretsmanager,secretsmanager,,secretsmanager,,,SecretsManager,SecretsManager,,,2,,aws_secretsmanager_,,secretsmanager_,Secrets Manager,AWS,,,,,,,Secrets Manager,ListSecrets,,, -securityhub,securityhub,securityhub,securityhub,,securityhub,,,SecurityHub,SecurityHub,,,2,,aws_securityhub_,,securityhub_,Security Hub,AWS,,,,,,,SecurityHub,ListAutomationRules,,, -securitylake,securitylake,securitylake,securitylake,,securitylake,,,SecurityLake,SecurityLake,,,2,,aws_securitylake_,,securitylake_,Security Lake,Amazon,,,,,,,SecurityLake,ListDataLakes,,, -serverlessrepo,serverlessrepo,serverlessapplicationrepository,serverlessapplicationrepository,,serverlessrepo,,serverlessapprepo;serverlessapplicationrepository,ServerlessRepo,ServerlessApplicationRepository,,1,,aws_serverlessapplicationrepository_,aws_serverlessrepo_,,serverlessapplicationrepository_,Serverless Application Repository,AWS,,,,,,,ServerlessApplicationRepository,ListApplications,,, -servicecatalog,servicecatalog,servicecatalog,servicecatalog,,servicecatalog,,,ServiceCatalog,ServiceCatalog,,1,,,aws_servicecatalog_,,servicecatalog_,Service Catalog,AWS,,,,,,,Service Catalog,ListPortfolios,,, -servicecatalog-appregistry,servicecatalogappregistry,appregistry,servicecatalogappregistry,,servicecatalogappregistry,,appregistry,ServiceCatalogAppRegistry,AppRegistry,,,2,,aws_servicecatalogappregistry_,,servicecatalogappregistry_,Service Catalog AppRegistry,AWS,,,,,,,Service Catalog AppRegistry,ListApplications,,, -service-quotas,servicequotas,servicequotas,servicequotas,,servicequotas,,,ServiceQuotas,ServiceQuotas,,,2,,aws_servicequotas_,,servicequotas_,Service Quotas,,,,,,,,Service Quotas,ListServices,,, -ses,ses,ses,ses,,ses,,,SES,SES,,1,,,aws_ses_,,ses_,SES (Simple Email),Amazon,,,,,,,SES,ListIdentities,,, -sesv2,sesv2,sesv2,sesv2,,sesv2,,,SESV2,SESV2,,,2,,aws_sesv2_,,sesv2_,SESv2 (Simple Email V2),Amazon,,,,,,,SESv2,ListContactLists,,, -stepfunctions,stepfunctions,sfn,sfn,,sfn,,stepfunctions,SFN,SFN,,1,,,aws_sfn_,,sfn_,SFN (Step Functions),AWS,,,,,,,SFN,ListActivities,,, -shield,shield,shield,shield,,shield,,,Shield,Shield,x,,2,,aws_shield_,,shield_,Shield,AWS,,,,,,,Shield,ListProtectionGroups,,us-east-1, -signer,signer,signer,signer,,signer,,,Signer,Signer,,,2,,aws_signer_,,signer_,Signer,AWS,,,,,,,signer,ListSigningJobs,,, -sms,sms,sms,sms,,sms,,,SMS,SMS,,1,,,aws_sms_,,sms_,SMS (Server Migration),AWS,,x,,,,,SMS,,,, -snow-device-management,snowdevicemanagement,snowdevicemanagement,snowdevicemanagement,,snowdevicemanagement,,,SnowDeviceManagement,SnowDeviceManagement,,1,,,aws_snowdevicemanagement_,,snowdevicemanagement_,Snow Device Management,AWS,,x,,,,,Snow Device Management,,,, -snowball,snowball,snowball,snowball,,snowball,,,Snowball,Snowball,,1,,,aws_snowball_,,snowball_,Snow Family,AWS,,x,,,,,Snowball,,,, -sns,sns,sns,sns,,sns,,,SNS,SNS,,,2,,aws_sns_,,sns_,SNS (Simple Notification),Amazon,,,,,,,SNS,ListSubscriptions,,, -sqs,sqs,sqs,sqs,,sqs,,,SQS,SQS,,,2,,aws_sqs_,,sqs_,SQS (Simple Queue),Amazon,,,,,,,SQS,ListQueues,,, -ssm,ssm,ssm,ssm,,ssm,,,SSM,SSM,,,2,,aws_ssm_,,ssm_,SSM (Systems Manager),AWS,,,,,,,SSM,ListDocuments,,, -ssm-contacts,ssmcontacts,ssmcontacts,ssmcontacts,,ssmcontacts,,,SSMContacts,SSMContacts,,,2,,aws_ssmcontacts_,,ssmcontacts_,SSM Contacts,AWS,,,,,,,SSM Contacts,ListContacts,,, -ssm-incidents,ssmincidents,ssmincidents,ssmincidents,,ssmincidents,,,SSMIncidents,SSMIncidents,,,2,,aws_ssmincidents_,,ssmincidents_,SSM Incident Manager Incidents,AWS,,,,,,,SSM Incidents,ListResponsePlans,,, -ssm-sap,ssmsap,ssmsap,ssmsap,,ssmsap,,,SSMSAP,SsmSap,,,2,,aws_ssmsap_,,ssmsap_,Systems Manager for SAP,AWS,,,,,,,Ssm Sap,ListApplications,,, -sso,sso,sso,sso,,sso,,,SSO,SSO,,,2,,aws_sso_,,sso_,SSO (Single Sign-On),AWS,,x,x,,,,SSO,ListAccounts,"AccessToken: aws_sdkv2.String(""mock-access-token"")",, -sso-admin,ssoadmin,ssoadmin,ssoadmin,,ssoadmin,,,SSOAdmin,SSOAdmin,x,,2,,aws_ssoadmin_,,ssoadmin_,SSO Admin,AWS,,,,,,,SSO Admin,ListInstances,,, -identitystore,identitystore,identitystore,identitystore,,identitystore,,,IdentityStore,IdentityStore,,,2,,aws_identitystore_,,identitystore_,SSO Identity Store,AWS,,,,,,,identitystore,ListUsers,"IdentityStoreId: aws_sdkv2.String(""d-1234567890"")",, -sso-oidc,ssooidc,ssooidc,ssooidc,,ssooidc,,,SSOOIDC,SSOOIDC,,1,,,aws_ssooidc_,,ssooidc_,SSO OIDC,AWS,,x,,,,,SSO OIDC,,,, -storagegateway,storagegateway,storagegateway,storagegateway,,storagegateway,,,StorageGateway,StorageGateway,,1,,,aws_storagegateway_,,storagegateway_,Storage Gateway,AWS,,,,,,,Storage Gateway,ListGateways,,, -sts,sts,sts,sts,,sts,,,STS,STS,x,,2,aws_caller_identity,aws_sts_,,caller_identity,STS (Security Token),AWS,,,,,AWS_STS_ENDPOINT,TF_AWS_STS_ENDPOINT,STS,GetCallerIdentity,,, -,,,,,,,,,,,,,,,,,Sumerian,Amazon,x,,,,,,,,,,No SDK support -support,support,support,support,,support,,,Support,Support,,1,,,aws_support_,,support_,Support,AWS,,x,,,,,Support,,,, -swf,swf,swf,swf,,swf,,,SWF,SWF,,,2,,aws_swf_,,swf_,SWF (Simple Workflow),Amazon,,,,,,,SWF,ListDomains,"RegistrationStatus: ""REGISTERED""",, -,,,,,,,,,,,,,,,,,Tag Editor,AWS,x,,,,,,,,,,Part of Resource Groups Tagging -textract,textract,textract,textract,,textract,,,Textract,Textract,,1,,,aws_textract_,,textract_,Textract,Amazon,,x,,,,,Textract,,,, -timestream-influxdb,timestreaminfluxdb,timestreaminfluxdb,timestreaminfluxdb,,timestreaminfluxdb,,,TimestreamInfluxDB,TimestreamInfluxDB,,,2,,aws_timestreaminfluxdb_,,timestreaminfluxdb_,Timestream for InfluxDB,Amazon,,,,,,,Timestream InfluxDB,ListDbInstances,,, -timestream-query,timestreamquery,timestreamquery,timestreamquery,,timestreamquery,,,TimestreamQuery,TimestreamQuery,,1,,,aws_timestreamquery_,,timestreamquery_,Timestream Query,Amazon,,x,,,,,Timestream Query,,,, -timestream-write,timestreamwrite,timestreamwrite,timestreamwrite,,timestreamwrite,,,TimestreamWrite,TimestreamWrite,,,2,,aws_timestreamwrite_,,timestreamwrite_,Timestream Write,Amazon,,,,,,,Timestream Write,ListDatabases,,, -,,,,,,,,,,,,,,,,,Tools for PowerShell,AWS,x,,,,,,,,,,No SDK support -,,,,,,,,,,,,,,,,,Training and Certification,AWS,x,,,,,,,,,,No SDK support -transcribe,transcribe,transcribeservice,transcribe,,transcribe,,transcribeservice,Transcribe,TranscribeService,,,2,,aws_transcribe_,,transcribe_,Transcribe,Amazon,,,,,,,Transcribe,ListLanguageModels,,, -,,transcribestreamingservice,transcribestreaming,,transcribestreaming,,transcribestreamingservice,TranscribeStreaming,TranscribeStreamingService,,1,,,aws_transcribestreaming_,,transcribestreaming_,Transcribe Streaming,Amazon,,x,,,,,Transcribe Streaming,,,, -transfer,transfer,transfer,transfer,,transfer,,,Transfer,Transfer,,,2,,aws_transfer_,,transfer_,Transfer Family,AWS,,,,,,,Transfer,ListConnectors,,, -,,,,,transitgateway,ec2,,TransitGateway,,,,,aws_ec2_transit_gateway,aws_transitgateway_,transitgateway_,ec2_transit_gateway,Transit Gateway,AWS,x,,,x,,,,,,,Part of EC2 -translate,translate,translate,translate,,translate,,,Translate,Translate,,1,,,aws_translate_,,translate_,Translate,Amazon,,x,,,,,Translate,,,, -,,,,,,,,,,,,,,,,,Trusted Advisor,AWS,x,,,,,,,,,,Part of Support -,,,,,verifiedaccess,ec2,,VerifiedAccess,,,,,aws_verifiedaccess,aws_verifiedaccess_,verifiedaccess_,verifiedaccess_,Verified Access,AWS,x,,,x,,,,,,,Part of EC2 -,,,,,vpc,ec2,,VPC,,,,,aws_((default_)?(network_acl|route_table|security_group|subnet|vpc(?!_ipam))|ec2_(managed|network|subnet|traffic)|egress_only_internet|flow_log|internet_gateway|main_route_table_association|nat_gateway|network_interface|prefix_list|route\b),aws_vpc_,vpc_,default_network_;default_route_;default_security_;default_subnet;default_vpc;ec2_managed_;ec2_network_;ec2_subnet_;ec2_traffic_;egress_only_;flow_log;internet_gateway;main_route_;nat_;network_;prefix_list;route_;route\.;security_group;subnet;vpc_dhcp_;vpc_endpoint;vpc_ipv;vpc_network_performance;vpc_peering_;vpc_security_group_;vpc\.;vpcs\.,VPC (Virtual Private Cloud),Amazon,x,,,x,,,,,,,Part of EC2 -vpc-lattice,vpclattice,vpclattice,vpclattice,,vpclattice,,,VPCLattice,VPCLattice,,,2,,aws_vpclattice_,,vpclattice_,VPC Lattice,Amazon,,,,,,,VPC Lattice,ListServices,,, -,,,,,ipam,ec2,,IPAM,,,,,aws_vpc_ipam,aws_ipam_,ipam_,vpc_ipam,VPC IPAM (IP Address Manager),Amazon,x,,,x,,,,,,,Part of EC2 -,,,,,vpnclient,ec2,,ClientVPN,,,,,aws_ec2_client_vpn,aws_vpnclient_,vpnclient_,ec2_client_vpn_,VPN (Client),AWS,x,,,x,,,,,,,Part of EC2 -,,,,,vpnsite,ec2,,SiteVPN,,,,,aws_(customer_gateway|vpn_),aws_vpnsite_,vpnsite_,customer_gateway;vpn_,VPN (Site-to-Site),AWS,x,,,x,,,,,,,Part of EC2 -wafv2,wafv2,wafv2,wafv2,,wafv2,,,WAFV2,WAFV2,,,2,,aws_wafv2_,,wafv2_,WAF,AWS,,,,,,,WAFV2,ListRuleGroups,Scope: awstypes.ScopeRegional,, -waf,waf,waf,waf,,waf,,,WAF,WAF,,,2,,aws_waf_,,waf_,WAF Classic,AWS,,,,,,,WAF,ListRules,,, -waf-regional,wafregional,wafregional,wafregional,,wafregional,,,WAFRegional,WAFRegional,,,2,,aws_wafregional_,,wafregional_,WAF Classic Regional,AWS,,,,,,,WAF Regional,ListRules,,, -,,,,,,,,,,,,,,,,,WAM (WorkSpaces Application Manager),Amazon,x,,,,,,,,,,No SDK support -,,,,,wavelength,ec2,,Wavelength,,,,,aws_ec2_carrier_gateway,aws_wavelength_,wavelength_,ec2_carrier_,Wavelength,AWS,x,,,x,,,,,,,Part of EC2 -budgets,budgets,budgets,budgets,,budgets,,,Budgets,Budgets,,,2,,aws_budgets_,,budgets_,Web Services Budgets,Amazon,,,,,,,Budgets,DescribeBudgets,"AccountId: aws_sdkv2.String(""012345678901"")",, -wellarchitected,wellarchitected,wellarchitected,wellarchitected,,wellarchitected,,,WellArchitected,WellArchitected,,,2,,aws_wellarchitected_,,wellarchitected_,Well-Architected Tool,AWS,,,,,,,WellArchitected,ListProfiles,,, -workdocs,workdocs,workdocs,workdocs,,workdocs,,,WorkDocs,WorkDocs,,1,,,aws_workdocs_,,workdocs_,WorkDocs,Amazon,,x,,,,,WorkDocs,,,, -worklink,worklink,worklink,worklink,,worklink,,,WorkLink,WorkLink,,1,,,aws_worklink_,,worklink_,WorkLink,Amazon,,,,,,,WorkLink,ListFleets,,, -workmail,workmail,workmail,workmail,,workmail,,,WorkMail,WorkMail,,1,,,aws_workmail_,,workmail_,WorkMail,Amazon,,x,,,,,WorkMail,,,, -workmailmessageflow,workmailmessageflow,workmailmessageflow,workmailmessageflow,,workmailmessageflow,,,WorkMailMessageFlow,WorkMailMessageFlow,,1,,,aws_workmailmessageflow_,,workmailmessageflow_,WorkMail Message Flow,Amazon,,x,,,,,WorkMailMessageFlow,,,, -workspaces,workspaces,workspaces,workspaces,,workspaces,,,WorkSpaces,WorkSpaces,,,2,,aws_workspaces_,,workspaces_,WorkSpaces,Amazon,,,,,,,WorkSpaces,DescribeWorkspaces,,, -workspaces-web,workspacesweb,workspacesweb,workspacesweb,,workspacesweb,,,WorkSpacesWeb,WorkSpacesWeb,,,2,,aws_workspacesweb_,,workspacesweb_,WorkSpaces Web,Amazon,,,,,,,WorkSpaces Web,ListPortals,,, -xray,xray,xray,xray,,xray,,,XRay,XRay,,,2,,aws_xray_,,xray_,X-Ray,AWS,,,,,,,XRay,ListResourcePolicies,,, -verifiedpermissions,verifiedpermissions,verifiedpermissions,verifiedpermissions,,verifiedpermissions,,,VerifiedPermissions,VerifiedPermissions,,,2,,aws_verifiedpermissions_,,verifiedpermissions_,Verified Permissions,Amazon,,,,,,,VerifiedPermissions,ListPolicyStores,,, -codecatalyst,codecatalyst,codecatalyst,codecatalyst,,codecatalyst,,,CodeCatalyst,CodeCatalyst,,,2,,aws_codecatalyst_,,codecatalyst_,CodeCatalyst,Amazon,,,,,,,CodeCatalyst,ListAccessTokens,,, -mediapackagev2,mediapackagev2,mediapackagev2,mediapackagev2,,mediapackagev2,,,MediaPackageV2,MediaPackageV2,,,2,aws_media_packagev2_,aws_mediapackagev2_,,media_packagev2_,Elemental MediaPackage Version 2,AWS,,,,,,,MediaPackageV2,ListChannelGroups,,, diff --git a/names/data/names_data.hcl b/names/data/names_data.hcl new file mode 100644 index 00000000000..87260d2c923 --- /dev/null +++ b/names/data/names_data.hcl @@ -0,0 +1,10182 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +service "accessanalyzer" { + + sdk { + id = "AccessAnalyzer" + client_version = [2] + } + + names { + provider_name_upper = "AccessAnalyzer" + human_friendly = "IAM Access Analyzer" + } + + endpoint_info { + endpoint_api_call = "ListAnalyzers" + } + + resource_prefix { + correct = "aws_accessanalyzer_" + } + + provider_package_correct = "accessanalyzer" + doc_prefix = ["accessanalyzer_"] + brand = "AWS" +} + +service "account" { + + sdk { + id = "Account" + client_version = [2] + } + + names { + provider_name_upper = "Account" + human_friendly = "Account Management" + } + + endpoint_info { + endpoint_api_call = "ListRegions" + } + + resource_prefix { + correct = "aws_account_" + } + + provider_package_correct = "account" + doc_prefix = ["account_"] + brand = "AWS" +} + +service "acm" { + + sdk { + id = "ACM" + client_version = [2] + } + + names { + provider_name_upper = "ACM" + human_friendly = "ACM (Certificate Manager)" + } + + endpoint_info { + endpoint_api_call = "ListCertificates" + } + + resource_prefix { + correct = "aws_acm_" + } + + provider_package_correct = "acm" + doc_prefix = ["acm_"] + brand = "AWS" +} + +service "acmpca" { + + cli_v2_command { + aws_cli_v2_command = "acm-pca" + aws_cli_v2_command_no_dashes = "acmpca" + } + + sdk { + id = "ACM PCA" + client_version = [2] + } + + names { + provider_name_upper = "ACMPCA" + human_friendly = "ACM PCA (Certificate Manager Private Certificate Authority)" + } + + endpoint_info { + endpoint_api_call = "ListCertificateAuthorities" + } + + resource_prefix { + correct = "aws_acmpca_" + } + + provider_package_correct = "acmpca" + doc_prefix = ["acmpca_"] + brand = "AWS" +} + +service "alexaforbusiness" { + + sdk { + id = "Alexa For Business" + client_version = [1] + } + + names { + provider_name_upper = "AlexaForBusiness" + human_friendly = "Alexa for Business" + } + + client { + go_v1_client_typename = "AlexaForBusiness" + } + + resource_prefix { + correct = "aws_alexaforbusiness_" + } + + provider_package_correct = "alexaforbusiness" + doc_prefix = ["alexaforbusiness_"] + brand = "" + not_implemented = true +} + +service "amp" { + + go_packages { + v1_package = "prometheusservice" + v2_package = "amp" + } + + sdk { + id = "amp" + client_version = [2] + } + + names { + aliases = ["prometheus", "prometheusservice"] + provider_name_upper = "AMP" + human_friendly = "AMP (Managed Prometheus)" + } + + endpoint_info { + endpoint_api_call = "ListScrapers" + } + + resource_prefix { + actual = "aws_prometheus_" + correct = "aws_amp_" + } + + provider_package_correct = "amp" + doc_prefix = ["prometheus_"] + brand = "AWS" +} + +service "amplify" { + + sdk { + id = "Amplify" + client_version = [2] + } + + names { + provider_name_upper = "Amplify" + human_friendly = "Amplify" + } + + endpoint_info { + endpoint_api_call = "ListApps" + } + + resource_prefix { + correct = "aws_amplify_" + } + + provider_package_correct = "amplify" + doc_prefix = ["amplify_"] + brand = "AWS" +} + +service "amplifybackend" { + + sdk { + id = "AmplifyBackend" + client_version = [1] + } + + names { + provider_name_upper = "AmplifyBackend" + human_friendly = "Amplify Backend" + } + + client { + go_v1_client_typename = "AmplifyBackend" + } + + resource_prefix { + correct = "aws_amplifybackend_" + } + + provider_package_correct = "amplifybackend" + doc_prefix = ["amplifybackend_"] + brand = "AWS" + not_implemented = true +} + +service "amplifyuibuilder" { + + sdk { + id = "AmplifyUIBuilder" + client_version = [1] + } + + names { + provider_name_upper = "AmplifyUIBuilder" + human_friendly = "Amplify UI Builder" + } + + client { + go_v1_client_typename = "AmplifyUIBuilder" + } + + resource_prefix { + correct = "aws_amplifyuibuilder_" + } + + provider_package_correct = "amplifyuibuilder" + doc_prefix = ["amplifyuibuilder_"] + brand = "AWS" + not_implemented = true +} + +service "apigateway" { + + sdk { + id = "API Gateway" + client_version = [2] + } + + names { + provider_name_upper = "APIGateway" + human_friendly = "API Gateway" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "GetAccount" + } + + resource_prefix { + actual = "aws_api_gateway_" + correct = "aws_apigateway_" + } + + provider_package_correct = "apigateway" + doc_prefix = ["api_gateway_"] + brand = "AWS" +} + +service "apigatewaymanagementapi" { + + sdk { + id = "ApiGatewayManagementApi" + client_version = [1] + } + + names { + provider_name_upper = "APIGatewayManagementAPI" + human_friendly = "API Gateway Management API" + } + + client { + go_v1_client_typename = "ApiGatewayManagementApi" + } + + resource_prefix { + correct = "aws_apigatewaymanagementapi_" + } + + provider_package_correct = "apigatewaymanagementapi" + doc_prefix = ["apigatewaymanagementapi_"] + brand = "Amazon" + not_implemented = true +} + +service "apigatewayv2" { + + sdk { + id = "ApiGatewayV2" + client_version = [2] + } + + names { + provider_name_upper = "APIGatewayV2" + human_friendly = "API Gateway V2" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "GetApis" + } + + resource_prefix { + correct = "aws_apigatewayv2_" + } + + provider_package_correct = "apigatewayv2" + doc_prefix = ["apigatewayv2_"] + brand = "AWS" +} + +service "appfabric" { + + sdk { + id = "AppFabric" + client_version = [2] + } + + names { + provider_name_upper = "AppFabric" + human_friendly = "AppFabric" + } + + endpoint_info { + endpoint_api_call = "ListAppBundles" + } + + resource_prefix { + correct = "aws_appfabric_" + } + + provider_package_correct = "appfabric" + doc_prefix = ["appfabric_"] + brand = "AWS" +} + +service "appmesh" { + + sdk { + id = "App Mesh" + client_version = [1] + } + + names { + provider_name_upper = "AppMesh" + human_friendly = "App Mesh" + } + + client { + go_v1_client_typename = "AppMesh" + } + + endpoint_info { + endpoint_api_call = "ListMeshes" + } + + resource_prefix { + correct = "aws_appmesh_" + } + + provider_package_correct = "appmesh" + doc_prefix = ["appmesh_"] + brand = "AWS" +} + +service "apprunner" { + + sdk { + id = "AppRunner" + client_version = [2] + } + + names { + provider_name_upper = "AppRunner" + human_friendly = "App Runner" + } + + endpoint_info { + endpoint_api_call = "ListConnections" + } + + resource_prefix { + correct = "aws_apprunner_" + } + + provider_package_correct = "apprunner" + doc_prefix = ["apprunner_"] + brand = "AWS" +} + +service "appconfig" { + + sdk { + id = "AppConfig" + client_version = [2] + } + + names { + provider_name_upper = "AppConfig" + human_friendly = "AppConfig" + } + + client { + go_v1_client_typename = "AppConfig" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + correct = "aws_appconfig_" + } + + provider_package_correct = "appconfig" + doc_prefix = ["appconfig_"] + brand = "AWS" +} + +service "appconfigdata" { + + sdk { + id = "AppConfigData" + client_version = [1] + } + + names { + provider_name_upper = "AppConfigData" + human_friendly = "AppConfig Data" + } + + client { + go_v1_client_typename = "AppConfigData" + } + + resource_prefix { + correct = "aws_appconfigdata_" + } + + provider_package_correct = "appconfigdata" + doc_prefix = ["appconfigdata_"] + brand = "AWS" + not_implemented = true +} + +service "appflow" { + + sdk { + id = "Appflow" + client_version = [2] + } + + names { + provider_name_upper = "AppFlow" + human_friendly = "AppFlow" + } + + endpoint_info { + endpoint_api_call = "ListFlows" + } + + resource_prefix { + correct = "aws_appflow_" + } + + provider_package_correct = "appflow" + doc_prefix = ["appflow_"] + brand = "AWS" +} + +service "appintegrations" { + + go_packages { + v1_package = "appintegrationsservice" + v2_package = "appintegrations" + } + + sdk { + id = "AppIntegrations" + client_version = [2] + } + + names { + aliases = ["appintegrationsservice"] + provider_name_upper = "AppIntegrations" + human_friendly = "AppIntegrations" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + correct = "aws_appintegrations_" + } + + provider_package_correct = "appintegrations" + doc_prefix = ["appintegrations_"] + brand = "AWS" +} + +service "appautoscaling" { + + cli_v2_command { + aws_cli_v2_command = "application-autoscaling" + aws_cli_v2_command_no_dashes = "applicationautoscaling" + } + + go_packages { + v1_package = "applicationautoscaling" + v2_package = "applicationautoscaling" + } + + sdk { + id = "Application Auto Scaling" + client_version = [2] + } + + names { + aliases = ["applicationautoscaling"] + provider_name_upper = "AppAutoScaling" + human_friendly = "Application Auto Scaling" + } + + endpoint_info { + endpoint_api_call = "DescribeScalableTargets" + endpoint_api_params = "ServiceNamespace: awstypes.ServiceNamespaceEcs" + } + + resource_prefix { + actual = "aws_appautoscaling_" + correct = "aws_applicationautoscaling_" + } + + provider_package_correct = "applicationautoscaling" + doc_prefix = ["appautoscaling_"] + brand = "" +} +service "applicationcostprofiler" { + + sdk { + id = "ApplicationCostProfiler" + client_version = [1] + } + + names { + provider_name_upper = "ApplicationCostProfiler" + human_friendly = "Application Cost Profiler" + } + + client { + go_v1_client_typename = "ApplicationCostProfiler" + } + + resource_prefix { + correct = "aws_applicationcostprofiler_" + } + + provider_package_correct = "applicationcostprofiler" + doc_prefix = ["applicationcostprofiler_"] + brand = "AWS" + not_implemented = true +} + +service "applicationsignals" { + + cli_v2_command { + aws_cli_v2_command = "application-signals" + aws_cli_v2_command_no_dashes = "applicationsignals" + } + + sdk { + id = "Application Signals" + client_version = [2] + } + + names { + provider_name_upper = "ApplicationSignals" + human_friendly = "Application Signals" + } + + endpoint_info { + endpoint_api_call = "ListServiceLevelObjectives" + } + + resource_prefix { + correct = "aws_applicationsignals_" + } + + provider_package_correct = "applicationsignals" + doc_prefix = ["applicationsignals_"] + brand = "Amazon" +} + +service "discovery" { + + go_packages { + v1_package = "applicationdiscoveryservice" + v2_package = "applicationdiscoveryservice" + } + + sdk { + id = "Application Discovery Service" + client_version = [1] + } + + names { + aliases = ["applicationdiscovery", "applicationdiscoveryservice"] + provider_name_upper = "Discovery" + human_friendly = "Application Discovery" + } + + client { + go_v1_client_typename = "ApplicationDiscoveryService" + } + + resource_prefix { + correct = "aws_discovery_" + } + + provider_package_correct = "discovery" + doc_prefix = ["discovery_"] + brand = "AWS" + not_implemented = true +} + +service "mgn" { + + sdk { + id = "mgn" + client_version = [1] + } + + names { + provider_name_upper = "Mgn" + human_friendly = "Application Migration (Mgn)" + } + + client { + go_v1_client_typename = "Mgn" + } + + resource_prefix { + correct = "aws_mgn_" + } + + provider_package_correct = "mgn" + doc_prefix = ["mgn_"] + brand = "AWS" + not_implemented = true +} + +service "appstream" { + + sdk { + id = "AppStream" + client_version = [2] + } + + names { + provider_name_upper = "AppStream" + human_friendly = "AppStream 2.0" + } + + endpoint_info { + endpoint_api_call = "ListAssociatedFleets" + endpoint_api_params = "StackName: aws_sdkv2.String(\"test\")" + } + + resource_prefix { + correct = "aws_appstream_" + } + + provider_package_correct = "appstream" + doc_prefix = ["appstream_"] + brand = "AWS" +} + +service "appsync" { + + sdk { + id = "AppSync" + client_version = [2] + } + + names { + provider_name_upper = "AppSync" + human_friendly = "AppSync" + } + + client { + go_v1_client_typename = "AppSync" + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListDomainNames" + } + + resource_prefix { + correct = "aws_appsync_" + } + + provider_package_correct = "appsync" + doc_prefix = ["appsync_"] + brand = "AWS" +} + +service "athena" { + + sdk { + id = "Athena" + client_version = [2] + } + + names { + provider_name_upper = "Athena" + human_friendly = "Athena" + } + + endpoint_info { + endpoint_api_call = "ListDataCatalogs" + } + + resource_prefix { + correct = "aws_athena_" + } + + provider_package_correct = "athena" + doc_prefix = ["athena_"] + brand = "AWS" +} + +service "auditmanager" { + + sdk { + id = "AuditManager" + client_version = [2] + } + + names { + provider_name_upper = "AuditManager" + human_friendly = "Audit Manager" + } + + endpoint_info { + endpoint_api_call = "GetAccountStatus" + } + + resource_prefix { + correct = "aws_auditmanager_" + } + + provider_package_correct = "auditmanager" + doc_prefix = ["auditmanager_"] + brand = "AWS" +} + +service "autoscaling" { + + sdk { + id = "Auto Scaling" + client_version = [2] + } + + names { + provider_name_upper = "AutoScaling" + human_friendly = "Auto Scaling" + } + + endpoint_info { + endpoint_api_call = "DescribeAutoScalingGroups" + } + + resource_prefix { + actual = "aws_(autoscaling_|launch_configuration)" + correct = "aws_autoscaling_" + } + + provider_package_correct = "autoscaling" + doc_prefix = ["autoscaling_", "launch_configuration"] + brand = "" +} +service "autoscalingplans" { + + cli_v2_command { + aws_cli_v2_command = "autoscaling-plans" + aws_cli_v2_command_no_dashes = "autoscalingplans" + } + + sdk { + id = "Auto Scaling Plans" + client_version = [2] + } + + names { + provider_name_upper = "AutoScalingPlans" + human_friendly = "Auto Scaling Plans" + } + + endpoint_info { + endpoint_api_call = "DescribeScalingPlans" + } + + resource_prefix { + correct = "aws_autoscalingplans_" + } + + provider_package_correct = "autoscalingplans" + doc_prefix = ["autoscalingplans_"] + brand = "" +} +service "backup" { + + sdk { + id = "Backup" + client_version = [2] + } + + names { + provider_name_upper = "Backup" + human_friendly = "Backup" + } + + client { + go_v1_client_typename = "Backup" + } + + endpoint_info { + endpoint_api_call = "ListBackupPlans" + } + + resource_prefix { + correct = "aws_backup_" + } + + provider_package_correct = "backup" + doc_prefix = ["backup_"] + brand = "AWS" +} + +service "backupgateway" { + + cli_v2_command { + aws_cli_v2_command = "backup-gateway" + aws_cli_v2_command_no_dashes = "backupgateway" + } + + sdk { + id = "Backup Gateway" + client_version = [1] + } + + names { + provider_name_upper = "BackupGateway" + human_friendly = "Backup Gateway" + } + + client { + go_v1_client_typename = "BackupGateway" + } + + resource_prefix { + correct = "aws_backupgateway_" + } + + provider_package_correct = "backupgateway" + doc_prefix = ["backupgateway_"] + brand = "AWS" + not_implemented = true +} + +service "batch" { + + sdk { + id = "Batch" + client_version = [1, 2] + } + + names { + provider_name_upper = "Batch" + human_friendly = "Batch" + } + + client { + go_v1_client_typename = "Batch" + } + + endpoint_info { + endpoint_api_call = "ListJobs" + } + + resource_prefix { + correct = "aws_batch_" + } + + provider_package_correct = "batch" + doc_prefix = ["batch_"] + brand = "AWS" +} + +service "bedrock" { + sdk { + id = "Bedrock" + client_version = [2] + } + + names { + provider_name_upper = "Bedrock" + human_friendly = "Bedrock" + } + + endpoint_info { + endpoint_api_call = "ListFoundationModels" + } + + resource_prefix { + correct = "aws_bedrock_" + } + + provider_package_correct = "bedrock" + doc_prefix = ["bedrock_"] + brand = "Amazon" +} + +service "bedrockagent" { + cli_v2_command { + aws_cli_v2_command = "bedrock-agent" + aws_cli_v2_command_no_dashes = "bedrockagent" + } + + sdk { + id = "Bedrock Agent" + client_version = [2] + } + + names { + provider_name_upper = "BedrockAgent" + human_friendly = "Bedrock Agents" + } + + endpoint_info { + endpoint_api_call = "ListAgents" + } + + resource_prefix { + correct = "aws_bedrockagent_" + } + + provider_package_correct = "bedrockagent" + doc_prefix = ["bedrockagent_"] + brand = "Amazon" +} + +service "bcmdataexports" { + + sdk { + id = "BCM Data Exports" + client_version = [2] + } + + names { + provider_name_upper = "BCMDataExports" + human_friendly = "BCM Data Exports" + } + + endpoint_info { + endpoint_api_call = "ListExports" + } + + resource_prefix { + correct = "aws_bcmdataexports_" + } + + provider_package_correct = "bcmdataexports" + doc_prefix = ["bcmdataexports_"] + brand = "AWS" +} + +service "billingconductor" { + + go_packages { + v1_package = "billingconductor" + v2_package = "" + } + + sdk { + id = "billingconductor" + client_version = [1] + } + + names { + provider_name_upper = "BillingConductor" + human_friendly = "Billing Conductor" + } + + client { + go_v1_client_typename = "BillingConductor" + } + + resource_prefix { + correct = "aws_billingconductor_" + } + + provider_package_correct = "billingconductor" + doc_prefix = ["billingconductor_"] + brand = "AWS" + not_implemented = true +} + +service "braket" { + + sdk { + id = "Braket" + client_version = [1] + } + + names { + provider_name_upper = "Braket" + human_friendly = "Braket" + } + + client { + go_v1_client_typename = "Braket" + } + + resource_prefix { + correct = "aws_braket_" + } + + provider_package_correct = "braket" + doc_prefix = ["braket_"] + brand = "Amazon" + not_implemented = true +} + +service "ce" { + + go_packages { + v1_package = "costexplorer" + v2_package = "costexplorer" + } + + sdk { + id = "Cost Explorer" + client_version = [2] + } + + names { + aliases = ["costexplorer"] + provider_name_upper = "CE" + human_friendly = "CE (Cost Explorer)" + } + + endpoint_info { + endpoint_api_call = "ListCostCategoryDefinitions" + } + + resource_prefix { + correct = "aws_ce_" + } + + provider_package_correct = "ce" + doc_prefix = ["ce_"] + brand = "AWS" +} + +service "chatbot" { + + sdk { + id = "Chatbot" + client_version = [2] + } + + names { + provider_name_upper = "Chatbot" + human_friendly = "Chatbot" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "GetAccountPreferences" + } + + resource_prefix { + correct = "aws_chatbot_" + } + + provider_package_correct = "chatbot" + doc_prefix = ["chatbot_"] + brand = "AWS" +} + +service "chime" { + + sdk { + id = "Chime" + client_version = [1] + } + + names { + provider_name_upper = "Chime" + human_friendly = "Chime" + } + + client { + go_v1_client_typename = "Chime" + } + + endpoint_info { + endpoint_api_call = "ListAccounts" + } + + resource_prefix { + correct = "aws_chime_" + } + + provider_package_correct = "chime" + doc_prefix = ["chime_"] + brand = "AWS" +} + +service "chimesdkidentity" { + + cli_v2_command { + aws_cli_v2_command = "chime-sdk-identity" + aws_cli_v2_command_no_dashes = "chimesdkidentity" + } + + sdk { + id = "Chime SDK Identity" + client_version = [1] + } + + names { + provider_name_upper = "ChimeSDKIdentity" + human_friendly = "Chime SDK Identity" + } + + client { + go_v1_client_typename = "ChimeSDKIdentity" + } + + resource_prefix { + correct = "aws_chimesdkidentity_" + } + + provider_package_correct = "chimesdkidentity" + doc_prefix = ["chimesdkidentity_"] + brand = "Amazon" + not_implemented = true +} + +service "chimesdkmediapipelines" { + + cli_v2_command { + aws_cli_v2_command = "chime-sdk-mediapipelines" + aws_cli_v2_command_no_dashes = "chimesdkmediapipelines" + } + + sdk { + id = "Chime SDK Media Pipelines" + client_version = [2] + } + + names { + provider_name_upper = "ChimeSDKMediaPipelines" + human_friendly = "Chime SDK Media Pipelines" + } + + endpoint_info { + endpoint_api_call = "ListMediaPipelines" + } + + resource_prefix { + correct = "aws_chimesdkmediapipelines_" + } + + provider_package_correct = "chimesdkmediapipelines" + doc_prefix = ["chimesdkmediapipelines_"] + brand = "AWS" +} + +service "chimesdkmeetings" { + + cli_v2_command { + aws_cli_v2_command = "chime-sdk-meetings" + aws_cli_v2_command_no_dashes = "chimesdkmeetings" + } + + sdk { + id = "Chime SDK Meetings" + client_version = [1] + } + + names { + provider_name_upper = "ChimeSDKMeetings" + human_friendly = "Chime SDK Meetings" + } + + client { + go_v1_client_typename = "ChimeSDKMeetings" + } + + resource_prefix { + correct = "aws_chimesdkmeetings_" + } + + provider_package_correct = "chimesdkmeetings" + doc_prefix = ["chimesdkmeetings_"] + brand = "Amazon" + not_implemented = true +} + +service "chimesdkmessaging" { + + cli_v2_command { + aws_cli_v2_command = "chime-sdk-messaging" + aws_cli_v2_command_no_dashes = "chimesdkmessaging" + } + + sdk { + id = "Chime SDK Messaging" + client_version = [1] + } + + names { + provider_name_upper = "ChimeSDKMessaging" + human_friendly = "Chime SDK Messaging" + } + + client { + go_v1_client_typename = "ChimeSDKMessaging" + } + + resource_prefix { + correct = "aws_chimesdkmessaging_" + } + + provider_package_correct = "chimesdkmessaging" + doc_prefix = ["chimesdkmessaging_"] + brand = "Amazon" + not_implemented = true +} + +service "chimesdkvoice" { + + cli_v2_command { + aws_cli_v2_command = "chime-sdk-voice" + aws_cli_v2_command_no_dashes = "chimesdkvoice" + } + + sdk { + id = "Chime SDK Voice" + client_version = [2] + } + + names { + provider_name_upper = "ChimeSDKVoice" + human_friendly = "Chime SDK Voice" + } + + endpoint_info { + endpoint_api_call = "ListPhoneNumbers" + } + + resource_prefix { + correct = "aws_chimesdkvoice_" + } + + provider_package_correct = "chimesdkvoice" + doc_prefix = ["chimesdkvoice_"] + brand = "AWS" +} + +service "cleanrooms" { + + sdk { + id = "CleanRooms" + client_version = [2] + } + + names { + provider_name_upper = "CleanRooms" + human_friendly = "Clean Rooms" + } + + endpoint_info { + endpoint_api_call = "ListCollaborations" + } + + resource_prefix { + correct = "aws_cleanrooms_" + } + + provider_package_correct = "cleanrooms" + doc_prefix = ["cleanrooms_"] + brand = "AWS" +} + +service "cloudcontrol" { + + go_packages { + v1_package = "cloudcontrolapi" + v2_package = "cloudcontrol" + } + + sdk { + id = "CloudControl" + client_version = [2] + } + + names { + aliases = ["cloudcontrolapi"] + provider_name_upper = "CloudControl" + human_friendly = "Cloud Control API" + } + + client { + go_v1_client_typename = "CloudControlApi" + } + + endpoint_info { + endpoint_api_call = "ListResourceRequests" + } + + resource_prefix { + actual = "aws_cloudcontrolapi_" + correct = "aws_cloudcontrol_" + } + + provider_package_correct = "cloudcontrol" + doc_prefix = ["cloudcontrolapi_"] + brand = "AWS" +} + +service "clouddirectory" { + + sdk { + id = "CloudDirectory" + client_version = [1] + } + + names { + provider_name_upper = "CloudDirectory" + human_friendly = "Cloud Directory" + } + + client { + go_v1_client_typename = "CloudDirectory" + } + + resource_prefix { + correct = "aws_clouddirectory_" + } + + provider_package_correct = "clouddirectory" + doc_prefix = ["clouddirectory_"] + brand = "Amazon" + not_implemented = true +} + +service "servicediscovery" { + + sdk { + id = "ServiceDiscovery" + client_version = [2] + } + + names { + provider_name_upper = "ServiceDiscovery" + human_friendly = "Cloud Map" + } + + endpoint_info { + endpoint_api_call = "ListNamespaces" + } + + resource_prefix { + actual = "aws_service_discovery_" + correct = "aws_servicediscovery_" + } + + provider_package_correct = "servicediscovery" + doc_prefix = ["service_discovery_"] + brand = "AWS" +} + +service "cloud9" { + + sdk { + id = "Cloud9" + client_version = [2] + } + + names { + provider_name_upper = "Cloud9" + human_friendly = "Cloud9" + } + + endpoint_info { + endpoint_api_call = "ListEnvironments" + } + + resource_prefix { + correct = "aws_cloud9_" + } + + provider_package_correct = "cloud9" + doc_prefix = ["cloud9_"] + brand = "AWS" +} + +service "cloudformation" { + + sdk { + id = "CloudFormation" + client_version = [2] + } + + names { + provider_name_upper = "CloudFormation" + human_friendly = "CloudFormation" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListStackInstances" + endpoint_api_params = "StackSetName: aws_sdkv2.String(\"test\")" + } + + resource_prefix { + correct = "aws_cloudformation_" + } + + provider_package_correct = "cloudformation" + doc_prefix = ["cloudformation_"] + brand = "AWS" +} + +service "cloudfront" { + + sdk { + id = "CloudFront" + client_version = [2] + } + + names { + provider_name_upper = "CloudFront" + human_friendly = "CloudFront" + } + + endpoint_info { + endpoint_api_call = "ListDistributions" + } + + resource_prefix { + correct = "aws_cloudfront_" + } + + provider_package_correct = "cloudfront" + doc_prefix = ["cloudfront_"] + brand = "AWS" +} + +service "cloudfrontkeyvaluestore" { + + cli_v2_command { + aws_cli_v2_command = "cloudfront-keyvaluestore" + aws_cli_v2_command_no_dashes = "cloudfrontkeyvaluestore" + } + + go_packages { + v1_package = "" + v2_package = "cloudfrontkeyvaluestore" + } + + sdk { + id = "CloudFront KeyValueStore" + client_version = [2] + } + + names { + provider_name_upper = "CloudFrontKeyValueStore" + human_friendly = "CloudFront KeyValueStore" + } + + endpoint_info { + endpoint_api_call = "ListKeys" + endpoint_api_params = "KvsARN: aws_sdkv2.String(\"arn:aws:cloudfront::111122223333:key-value-store/MaxAge\")" + } + + resource_prefix { + correct = "aws_cloudfrontkeyvaluestore_" + } + + provider_package_correct = "cloudfrontkeyvaluestore" + doc_prefix = ["cloudfrontkeyvaluestore_"] + brand = "AWS" +} + +service "cloudhsmv2" { + + sdk { + id = "CloudHSM V2" + client_version = [2] + } + + names { + aliases = ["cloudhsm"] + provider_name_upper = "CloudHSMV2" + human_friendly = "CloudHSM" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "DescribeClusters" + } + + resource_prefix { + actual = "aws_cloudhsm_v2_" + correct = "aws_cloudhsmv2_" + } + + provider_package_correct = "cloudhsmv2" + doc_prefix = ["cloudhsm"] + brand = "AWS" +} + +service "cloudsearch" { + + sdk { + id = "CloudSearch" + client_version = [2] + } + + names { + provider_name_upper = "CloudSearch" + human_friendly = "CloudSearch" + } + + endpoint_info { + endpoint_api_call = "ListDomainNames" + } + + resource_prefix { + correct = "aws_cloudsearch_" + } + + provider_package_correct = "cloudsearch" + doc_prefix = ["cloudsearch_"] + brand = "AWS" +} + +service "cloudsearchdomain" { + + sdk { + id = "CloudSearch Domain" + client_version = [1] + } + + names { + provider_name_upper = "CloudSearchDomain" + human_friendly = "CloudSearch Domain" + } + + client { + go_v1_client_typename = "CloudSearchDomain" + } + + resource_prefix { + correct = "aws_cloudsearchdomain_" + } + + provider_package_correct = "cloudsearchdomain" + doc_prefix = ["cloudsearchdomain_"] + brand = "Amazon" + not_implemented = true +} + +service "cloudtrail" { + + sdk { + id = "CloudTrail" + client_version = [2] + } + + names { + provider_name_upper = "CloudTrail" + human_friendly = "CloudTrail" + } + + endpoint_info { + endpoint_api_call = "ListChannels" + } + + resource_prefix { + actual = "aws_cloudtrail" + correct = "aws_cloudtrail_" + } + + provider_package_correct = "cloudtrail" + doc_prefix = ["cloudtrail"] + brand = "AWS" +} + +service "cloudwatch" { + + sdk { + id = "CloudWatch" + client_version = [2] + } + + names { + provider_name_upper = "CloudWatch" + human_friendly = "CloudWatch" + } + + endpoint_info { + endpoint_api_call = "ListDashboards" + } + + resource_prefix { + actual = "aws_cloudwatch_(?!(event_|log_|query_))" + correct = "aws_cloudwatch_" + } + + provider_package_correct = "cloudwatch" + doc_prefix = ["cloudwatch_dashboard", "cloudwatch_metric_", "cloudwatch_composite_"] + brand = "AWS" +} + +service "applicationinsights" { + + cli_v2_command { + aws_cli_v2_command = "application-insights" + aws_cli_v2_command_no_dashes = "applicationinsights" + } + + sdk { + id = "Application Insights" + client_version = [2] + } + + names { + provider_name_upper = "ApplicationInsights" + human_friendly = "CloudWatch Application Insights" + } + + endpoint_info { + endpoint_api_call = "CreateApplication" + } + + resource_prefix { + correct = "aws_applicationinsights_" + } + + provider_package_correct = "applicationinsights" + doc_prefix = ["applicationinsights_"] + brand = "AWS" +} + +service "evidently" { + + go_packages { + v1_package = "cloudwatchevidently" + v2_package = "evidently" + } + + sdk { + id = "Evidently" + client_version = [2] + } + + names { + aliases = ["cloudwatchevidently"] + provider_name_upper = "Evidently" + human_friendly = "CloudWatch Evidently" + } + + endpoint_info { + endpoint_api_call = "ListProjects" + } + + resource_prefix { + correct = "aws_evidently_" + } + + provider_package_correct = "evidently" + doc_prefix = ["evidently_"] + brand = "Amazon" +} + +service "internetmonitor" { + + sdk { + id = "InternetMonitor" + client_version = [2] + } + + names { + provider_name_upper = "InternetMonitor" + human_friendly = "CloudWatch Internet Monitor" + } + + endpoint_info { + endpoint_api_call = "ListMonitors" + } + + resource_prefix { + correct = "aws_internetmonitor_" + } + + provider_package_correct = "internetmonitor" + doc_prefix = ["internetmonitor_"] + brand = "AWS" +} + +service "logs" { + + go_packages { + v1_package = "cloudwatchlogs" + v2_package = "cloudwatchlogs" + } + + sdk { + id = "CloudWatch Logs" + client_version = [2] + } + + names { + aliases = ["cloudwatchlog", "cloudwatchlogs"] + provider_name_upper = "Logs" + human_friendly = "CloudWatch Logs" + } + + client { + go_v1_client_typename = "CloudWatchLogs" + } + + endpoint_info { + endpoint_api_call = "ListAnomalies" + } + + resource_prefix { + actual = "aws_cloudwatch_(log_|query_)" + correct = "aws_logs_" + } + + provider_package_correct = "logs" + doc_prefix = ["cloudwatch_log_", "cloudwatch_query_"] + brand = "AWS" +} + +service "networkmonitor" { + + sdk { + id = "NetworkMonitor" + client_version = [2] + } + + names { + provider_name_upper = "NetworkMonitor" + human_friendly = "CloudWatch Network Monitor" + } + + endpoint_info { + endpoint_api_call = "ListMonitors" + } + + resource_prefix { + correct = "aws_networkmonitor_" + } + + provider_package_correct = "networkmonitor" + doc_prefix = ["networkmonitor_"] + brand = "Amazon" +} + +service "rum" { + + go_packages { + v1_package = "cloudwatchrum" + v2_package = "rum" + } + + sdk { + id = "RUM" + client_version = [2] + } + + names { + aliases = ["cloudwatchrum"] + provider_name_upper = "RUM" + human_friendly = "CloudWatch RUM" + } + + client { + go_v1_client_typename = "CloudWatchRUM" + } + + endpoint_info { + endpoint_api_call = "ListAppMonitors" + } + + resource_prefix { + correct = "aws_rum_" + } + + provider_package_correct = "rum" + doc_prefix = ["rum_"] + brand = "AWS" +} + +service "synthetics" { + + sdk { + id = "synthetics" + client_version = [2] + } + + names { + provider_name_upper = "Synthetics" + human_friendly = "CloudWatch Synthetics" + } + + endpoint_info { + endpoint_api_call = "ListGroups" + } + + resource_prefix { + correct = "aws_synthetics_" + } + + provider_package_correct = "synthetics" + doc_prefix = ["synthetics_"] + brand = "Amazon" +} + +service "codeartifact" { + + sdk { + id = "codeartifact" + client_version = [2] + } + + names { + provider_name_upper = "CodeArtifact" + human_friendly = "CodeArtifact" + } + + endpoint_info { + endpoint_api_call = "ListDomains" + } + + resource_prefix { + correct = "aws_codeartifact_" + } + + provider_package_correct = "codeartifact" + doc_prefix = ["codeartifact_"] + brand = "AWS" +} + +service "codebuild" { + + sdk { + id = "CodeBuild" + client_version = [2] + } + + names { + provider_name_upper = "CodeBuild" + human_friendly = "CodeBuild" + } + + endpoint_info { + endpoint_api_call = "ListBuildBatches" + } + + resource_prefix { + correct = "aws_codebuild_" + } + + provider_package_correct = "codebuild" + doc_prefix = ["codebuild_"] + brand = "AWS" +} + +service "codecommit" { + + sdk { + id = "CodeCommit" + client_version = [2] + } + + names { + provider_name_upper = "CodeCommit" + human_friendly = "CodeCommit" + } + + endpoint_info { + endpoint_api_call = "ListRepositories" + } + + resource_prefix { + correct = "aws_codecommit_" + } + + provider_package_correct = "codecommit" + doc_prefix = ["codecommit_"] + brand = "AWS" +} + +service "deploy" { + + go_packages { + v1_package = "codedeploy" + v2_package = "codedeploy" + } + + sdk { + id = "CodeDeploy" + client_version = [2] + } + + names { + aliases = ["codedeploy"] + provider_name_upper = "Deploy" + human_friendly = "CodeDeploy" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + client { + go_v1_client_typename = "CodeDeploy" + } + + resource_prefix { + actual = "aws_codedeploy_" + correct = "aws_deploy_" + } + + provider_package_correct = "deploy" + doc_prefix = ["codedeploy_"] + brand = "AWS" +} + +service "codeguruprofiler" { + + sdk { + id = "CodeGuruProfiler" + client_version = [2] + } + + names { + provider_name_upper = "CodeGuruProfiler" + human_friendly = "CodeGuru Profiler" + } + + endpoint_info { + endpoint_api_call = "ListProfilingGroups" + } + + resource_prefix { + correct = "aws_codeguruprofiler_" + } + + provider_package_correct = "codeguruprofiler" + doc_prefix = ["codeguruprofiler_"] + brand = "AWS" +} + +service "codegurureviewer" { + + cli_v2_command { + aws_cli_v2_command = "codeguru-reviewer" + aws_cli_v2_command_no_dashes = "codegurureviewer" + } + + sdk { + id = "CodeGuru Reviewer" + client_version = [2] + } + + names { + provider_name_upper = "CodeGuruReviewer" + human_friendly = "CodeGuru Reviewer" + } + + endpoint_info { + endpoint_api_call = "ListCodeReviews" + endpoint_api_params = "Type: awstypes.TypePullRequest" + } + + resource_prefix { + correct = "aws_codegurureviewer_" + } + + provider_package_correct = "codegurureviewer" + doc_prefix = ["codegurureviewer_"] + brand = "AWS" +} + +service "codepipeline" { + + sdk { + id = "CodePipeline" + client_version = [2] + } + + names { + provider_name_upper = "CodePipeline" + human_friendly = "CodePipeline" + } + + endpoint_info { + endpoint_api_call = "ListPipelines" + } + + resource_prefix { + actual = "aws_codepipeline" + correct = "aws_codepipeline_" + } + + provider_package_correct = "codepipeline" + doc_prefix = ["codepipeline"] + brand = "AWS" +} + +service "codestar" { + + sdk { + id = "CodeStar" + client_version = [1] + } + + names { + provider_name_upper = "CodeStar" + human_friendly = "CodeStar" + } + + client { + go_v1_client_typename = "CodeStar" + } + + resource_prefix { + correct = "aws_codestar_" + } + + provider_package_correct = "codestar" + doc_prefix = ["codestar_"] + brand = "AWS" + not_implemented = true +} + +service "codestarconnections" { + + cli_v2_command { + aws_cli_v2_command = "codestar-connections" + aws_cli_v2_command_no_dashes = "codestarconnections" + } + + sdk { + id = "CodeStar connections" + client_version = [2] + } + + names { + provider_name_upper = "CodeStarConnections" + human_friendly = "CodeStar Connections" + } + + endpoint_info { + endpoint_api_call = "ListConnections" + } + + resource_prefix { + correct = "aws_codestarconnections_" + } + + provider_package_correct = "codestarconnections" + doc_prefix = ["codestarconnections_"] + brand = "AWS" +} + +service "codestarnotifications" { + + cli_v2_command { + aws_cli_v2_command = "codestar-notifications" + aws_cli_v2_command_no_dashes = "codestarnotifications" + } + + sdk { + id = "codestar notifications" + client_version = [2] + } + + names { + provider_name_upper = "CodeStarNotifications" + human_friendly = "CodeStar Notifications" + } + + endpoint_info { + endpoint_api_call = "ListTargets" + } + + resource_prefix { + correct = "aws_codestarnotifications_" + } + + provider_package_correct = "codestarnotifications" + doc_prefix = ["codestarnotifications_"] + brand = "AWS" +} + +service "cognitoidentity" { + + cli_v2_command { + aws_cli_v2_command = "cognito-identity" + aws_cli_v2_command_no_dashes = "cognitoidentity" + } + + sdk { + id = "Cognito Identity" + client_version = [2] + } + + names { + provider_name_upper = "CognitoIdentity" + human_friendly = "Cognito Identity" + } + + endpoint_info { + endpoint_api_call = "ListIdentityPools" + endpoint_api_params = "MaxResults: aws_sdkv2.Int32(1)" + } + + resource_prefix { + actual = "aws_cognito_identity_(?!provider)" + correct = "aws_cognitoidentity_" + } + + provider_package_correct = "cognitoidentity" + doc_prefix = ["cognito_identity_pool"] + brand = "AWS" +} + +service "cognitoidp" { + + cli_v2_command { + aws_cli_v2_command = "cognito-idp" + aws_cli_v2_command_no_dashes = "cognitoidp" + } + + go_packages { + v1_package = "cognitoidentityprovider" + v2_package = "cognitoidentityprovider" + } + + sdk { + id = "Cognito Identity Provider" + client_version = [2] + } + + names { + aliases = ["cognitoidentityprovider"] + provider_name_upper = "CognitoIDP" + human_friendly = "Cognito IDP (Identity Provider)" + } + + client { + go_v1_client_typename = "CognitoIdentityProvider" + } + + endpoint_info { + endpoint_api_call = "ListUserPools" + endpoint_api_params = "MaxResults: aws_sdkv2.Int32(1)" + } + + resource_prefix { + actual = "aws_cognito_(identity_provider|resource|user|risk)" + correct = "aws_cognitoidp_" + } + + provider_package_correct = "cognitoidp" + doc_prefix = ["cognito_identity_provider", "cognito_managed_user", "cognito_resource_", "cognito_user", "cognito_risk"] + brand = "AWS" +} + +service "cognitosync" { + + cli_v2_command { + aws_cli_v2_command = "cognito-sync" + aws_cli_v2_command_no_dashes = "cognitosync" + } + + sdk { + id = "Cognito Sync" + client_version = [1] + } + + names { + provider_name_upper = "CognitoSync" + human_friendly = "Cognito Sync" + } + + client { + go_v1_client_typename = "CognitoSync" + } + + resource_prefix { + correct = "aws_cognitosync_" + } + + provider_package_correct = "cognitosync" + doc_prefix = ["cognitosync_"] + brand = "Amazon" + not_implemented = true +} + +service "comprehend" { + + sdk { + id = "Comprehend" + client_version = [2] + } + + names { + provider_name_upper = "Comprehend" + human_friendly = "Comprehend" + } + + endpoint_info { + endpoint_api_call = "ListDocumentClassifiers" + } + + resource_prefix { + correct = "aws_comprehend_" + } + + provider_package_correct = "comprehend" + doc_prefix = ["comprehend_"] + brand = "AWS" +} + +service "comprehendmedical" { + + sdk { + id = "ComprehendMedical" + client_version = [1] + } + + names { + provider_name_upper = "ComprehendMedical" + human_friendly = "Comprehend Medical" + } + + client { + go_v1_client_typename = "ComprehendMedical" + } + + resource_prefix { + correct = "aws_comprehendmedical_" + } + + provider_package_correct = "comprehendmedical" + doc_prefix = ["comprehendmedical_"] + brand = "Amazon" + not_implemented = true +} + +service "computeoptimizer" { + + cli_v2_command { + aws_cli_v2_command = "compute-optimizer" + aws_cli_v2_command_no_dashes = "computeoptimizer" + } + + sdk { + id = "Compute Optimizer" + client_version = [2] + } + + names { + provider_name_upper = "ComputeOptimizer" + human_friendly = "Compute Optimizer" + } + + endpoint_info { + endpoint_api_call = "GetEnrollmentStatus" + } + + resource_prefix { + correct = "aws_computeoptimizer_" + } + + provider_package_correct = "computeoptimizer" + doc_prefix = ["computeoptimizer_"] + brand = "AWS" +} + +service "configservice" { + + sdk { + id = "Config Service" + client_version = [2] + } + + names { + aliases = ["config"] + provider_name_upper = "ConfigService" + human_friendly = "Config" + } + + endpoint_info { + endpoint_api_call = "ListStoredQueries" + } + + resource_prefix { + actual = "aws_config_" + correct = "aws_configservice_" + } + + provider_package_correct = "configservice" + doc_prefix = ["config_"] + brand = "AWS" +} + +service "connect" { + + sdk { + id = "Connect" + client_version = [1] + } + + names { + provider_name_upper = "Connect" + human_friendly = "Connect" + } + + client { + go_v1_client_typename = "Connect" + } + + endpoint_info { + endpoint_api_call = "ListInstances" + } + + resource_prefix { + correct = "aws_connect_" + } + + provider_package_correct = "connect" + doc_prefix = ["connect_"] + brand = "AWS" +} + +service "connectcases" { + + sdk { + id = "ConnectCases" + client_version = [2] + } + + names { + provider_name_upper = "ConnectCases" + human_friendly = "Connect Cases" + } + + client { + go_v1_client_typename = "ConnectCases" + } + + endpoint_info { + endpoint_api_call = "ListDomains" + } + + resource_prefix { + correct = "aws_connectcases_" + } + + provider_package_correct = "connectcases" + doc_prefix = ["connectcases_"] + brand = "AWS" +} + +service "connectcontactlens" { + + cli_v2_command { + aws_cli_v2_command = "connect-contact-lens" + aws_cli_v2_command_no_dashes = "connectcontactlens" + } + + sdk { + id = "Connect Contact Lens" + client_version = [1] + } + + names { + provider_name_upper = "ConnectContactLens" + human_friendly = "Connect Contact Lens" + } + + client { + go_v1_client_typename = "ConnectContactLens" + } + + resource_prefix { + correct = "aws_connectcontactlens_" + } + + provider_package_correct = "connectcontactlens" + doc_prefix = ["connectcontactlens_"] + brand = "Amazon" + not_implemented = true +} + +service "customerprofiles" { + + cli_v2_command { + aws_cli_v2_command = "customer-profiles" + aws_cli_v2_command_no_dashes = "customerprofiles" + } + + sdk { + id = "Customer Profiles" + client_version = [2] + } + + names { + provider_name_upper = "CustomerProfiles" + human_friendly = "Connect Customer Profiles" + } + + endpoint_info { + endpoint_api_call = "ListDomains" + } + + resource_prefix { + correct = "aws_customerprofiles_" + } + + provider_package_correct = "customerprofiles" + doc_prefix = ["customerprofiles_"] + brand = "AWS" +} + +service "connectparticipant" { + + sdk { + id = "ConnectParticipant" + client_version = [1] + } + + names { + provider_name_upper = "ConnectParticipant" + human_friendly = "Connect Participant" + } + + client { + go_v1_client_typename = "ConnectParticipant" + } + + resource_prefix { + correct = "aws_connectparticipant_" + } + + provider_package_correct = "connectparticipant" + doc_prefix = ["connectparticipant_"] + brand = "Amazon" + not_implemented = true +} + +service "voiceid" { + + cli_v2_command { + aws_cli_v2_command = "voice-id" + aws_cli_v2_command_no_dashes = "voiceid" + } + + sdk { + id = "Voice ID" + client_version = [1] + } + + names { + provider_name_upper = "VoiceID" + human_friendly = "Connect Voice ID" + } + + client { + go_v1_client_typename = "VoiceID" + } + + resource_prefix { + correct = "aws_voiceid_" + } + + provider_package_correct = "voiceid" + doc_prefix = ["voiceid_"] + brand = "Amazon" + not_implemented = true +} + +service "wisdom" { + + go_packages { + v1_package = "connectwisdomservice" + v2_package = "wisdom" + } + + sdk { + id = "Wisdom" + client_version = [1] + } + + names { + aliases = ["connectwisdomservice"] + provider_name_upper = "Wisdom" + human_friendly = "Connect Wisdom" + } + + client { + go_v1_client_typename = "ConnectWisdomService" + } + + resource_prefix { + correct = "aws_wisdom_" + } + + provider_package_correct = "wisdom" + doc_prefix = ["wisdom_"] + brand = "Amazon" + not_implemented = true +} + +service "controltower" { + + sdk { + id = "ControlTower" + client_version = [2] + } + + names { + provider_name_upper = "ControlTower" + human_friendly = "Control Tower" + } + + endpoint_info { + endpoint_api_call = "ListLandingZones" + } + + resource_prefix { + correct = "aws_controltower_" + } + + provider_package_correct = "controltower" + doc_prefix = ["controltower_"] + brand = "AWS" +} + +service "costoptimizationhub" { + + cli_v2_command { + aws_cli_v2_command = "cost-optimization-hub" + aws_cli_v2_command_no_dashes = "costoptimizationhub" + } + + sdk { + id = "Cost Optimization Hub" + client_version = [2] + } + + names { + provider_name_upper = "CostOptimizationHub" + human_friendly = "Cost Optimization Hub" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "GetPreferences" + endpoint_region_override = "us-east-1" + } + + resource_prefix { + correct = "aws_costoptimizationhub_" + } + + provider_package_correct = "costoptimizationhub" + doc_prefix = ["costoptimizationhub_"] + brand = "AWS" +} + +service "cur" { + + go_packages { + v1_package = "costandusagereportservice" + v2_package = "costandusagereportservice" + } + + sdk { + id = "Cost and Usage Report Service" + client_version = [2] + } + + names { + aliases = ["costandusagereportservice"] + provider_name_upper = "CUR" + human_friendly = "Cost and Usage Report" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "DescribeReportDefinitions" + endpoint_region_override = "us-east-1" + } + + resource_prefix { + correct = "aws_cur_" + } + + provider_package_correct = "cur" + doc_prefix = ["cur_"] + brand = "AWS" +} + +service "dataexchange" { + + sdk { + id = "DataExchange" + client_version = [2] + } + + names { + provider_name_upper = "DataExchange" + human_friendly = "Data Exchange" + } + + client { + go_v1_client_typename = "DataExchange" + } + + endpoint_info { + endpoint_api_call = "ListDataSets" + } + + resource_prefix { + correct = "aws_dataexchange_" + } + + provider_package_correct = "dataexchange" + doc_prefix = ["dataexchange_"] + brand = "AWS" +} + +service "datapipeline" { + + sdk { + id = "Data Pipeline" + client_version = [2] + } + + names { + provider_name_upper = "DataPipeline" + human_friendly = "Data Pipeline" + } + + client { + go_v1_client_typename = "DataPipeline" + } + + endpoint_info { + endpoint_api_call = "ListPipelines" + } + + resource_prefix { + correct = "aws_datapipeline_" + } + + provider_package_correct = "datapipeline" + doc_prefix = ["datapipeline_"] + brand = "AWS" +} + +service "datasync" { + + sdk { + id = "DataSync" + client_version = [2] + } + + names { + provider_name_upper = "DataSync" + human_friendly = "DataSync" + } + + endpoint_info { + endpoint_api_call = "ListAgents" + } + + resource_prefix { + correct = "aws_datasync_" + } + + provider_package_correct = "datasync" + doc_prefix = ["datasync_"] + brand = "AWS" +} + +service "datazone" { + + sdk { + id = "DataZone" + client_version = [2] + } + + names { + provider_name_upper = "DataZone" + human_friendly = "DataZone" + } + + endpoint_info { + endpoint_api_call = "ListDomains" + } + + resource_prefix { + correct = "aws_datazone_" + } + + provider_package_correct = "datazone" + doc_prefix = ["datazone_"] + brand = "AWS" +} + +service "detective" { + + sdk { + id = "Detective" + client_version = [2] + } + + names { + provider_name_upper = "Detective" + human_friendly = "Detective" + } + + client { + go_v1_client_typename = "Detective" + } + + endpoint_info { + endpoint_api_call = "ListGraphs" + } + + resource_prefix { + correct = "aws_detective_" + } + + provider_package_correct = "detective" + doc_prefix = ["detective_"] + brand = "AWS" +} + +service "devicefarm" { + + sdk { + id = "Device Farm" + client_version = [2] + } + + names { + provider_name_upper = "DeviceFarm" + human_friendly = "Device Farm" + } + + endpoint_info { + endpoint_api_call = "ListDeviceInstances" + } + + resource_prefix { + correct = "aws_devicefarm_" + } + + provider_package_correct = "devicefarm" + doc_prefix = ["devicefarm_"] + brand = "AWS" +} + +service "devopsguru" { + + cli_v2_command { + aws_cli_v2_command = "devops-guru" + aws_cli_v2_command_no_dashes = "devopsguru" + } + + sdk { + id = "DevOps Guru" + client_version = [2] + } + + names { + provider_name_upper = "DevOpsGuru" + human_friendly = "DevOps Guru" + } + + endpoint_info { + endpoint_api_call = "DescribeAccountHealth" + } + + resource_prefix { + correct = "aws_devopsguru_" + } + + provider_package_correct = "devopsguru" + doc_prefix = ["devopsguru_"] + brand = "AWS" +} + +service "directconnect" { + + sdk { + id = "Direct Connect" + client_version = [1] + } + + names { + provider_name_upper = "DirectConnect" + human_friendly = "Direct Connect" + } + + client { + go_v1_client_typename = "DirectConnect" + } + + endpoint_info { + endpoint_api_call = "DescribeConnections" + } + + resource_prefix { + actual = "aws_dx_" + correct = "aws_directconnect_" + } + + provider_package_correct = "directconnect" + doc_prefix = ["dx_"] + brand = "AWS" +} + +service "dlm" { + + sdk { + id = "DLM" + client_version = [2] + } + + names { + provider_name_upper = "DLM" + human_friendly = "DLM (Data Lifecycle Manager)" + } + + endpoint_info { + endpoint_api_call = "GetLifecyclePolicies" + } + + resource_prefix { + correct = "aws_dlm_" + } + + provider_package_correct = "dlm" + doc_prefix = ["dlm_"] + brand = "AWS" +} + +service "dms" { + + go_packages { + v1_package = "databasemigrationservice" + v2_package = "databasemigrationservice" + } + + sdk { + id = "Database Migration Service" + client_version = [2] + } + + names { + aliases = ["databasemigration", "databasemigrationservice"] + provider_name_upper = "DMS" + human_friendly = "DMS (Database Migration)" + } + + client { + go_v1_client_typename = "DatabaseMigrationService" + } + + endpoint_info { + endpoint_api_call = "DescribeCertificates" + } + + resource_prefix { + correct = "aws_dms_" + } + + provider_package_correct = "dms" + doc_prefix = ["dms_"] + brand = "AWS" +} + +service "docdb" { + + sdk { + id = "DocDB" + client_version = [2] + } + + names { + provider_name_upper = "DocDB" + human_friendly = "DocumentDB" + } + + endpoint_info { + endpoint_api_call = "DescribeDBClusters" + } + + resource_prefix { + correct = "aws_docdb_" + } + + provider_package_correct = "docdb" + doc_prefix = ["docdb_"] + brand = "AWS" +} + +service "docdbelastic" { + + cli_v2_command { + aws_cli_v2_command = "docdb-elastic" + aws_cli_v2_command_no_dashes = "docdbelastic" + } + + sdk { + id = "DocDB Elastic" + client_version = [2] + } + + names { + provider_name_upper = "DocDBElastic" + human_friendly = "DocumentDB Elastic" + } + + endpoint_info { + endpoint_api_call = "ListClusters" + } + + resource_prefix { + correct = "aws_docdbelastic_" + } + + provider_package_correct = "docdbelastic" + doc_prefix = ["docdbelastic_"] + brand = "AWS" +} + +service "drs" { + + sdk { + id = "DRS" + client_version = [2] + } + + names { + provider_name_upper = "DRS" + human_friendly = "DRS (Elastic Disaster Recovery)" + } + + endpoint_info { + endpoint_api_call = "DescribeJobs" + } + + resource_prefix { + correct = "aws_drs_" + } + + provider_package_correct = "drs" + doc_prefix = ["drs_"] + brand = "AWS" +} + +service "ds" { + + go_packages { + v1_package = "directoryservice" + v2_package = "directoryservice" + } + + sdk { + id = "Directory Service" + client_version = [2] + } + + names { + aliases = ["directoryservice"] + provider_name_upper = "DS" + human_friendly = "Directory Service" + } + + client { + go_v1_client_typename = "DirectoryService" + } + + endpoint_info { + endpoint_api_call = "DescribeDirectories" + } + + resource_prefix { + actual = "aws_directory_service_" + correct = "aws_ds_" + } + + provider_package_correct = "ds" + doc_prefix = ["directory_service_"] + brand = "AWS" +} + +service "dax" { + + sdk { + id = "DAX" + client_version = [2] + } + + names { + provider_name_upper = "DAX" + human_friendly = "DynamoDB Accelerator (DAX)" + } + + endpoint_info { + endpoint_api_call = "DescribeClusters" + } + + resource_prefix { + correct = "aws_dax_" + } + + provider_package_correct = "dax" + doc_prefix = ["dax_"] + brand = "AWS" +} + +service "dynamodbstreams" { + + sdk { + id = "DynamoDB Streams" + client_version = [1] + } + + names { + provider_name_upper = "DynamoDBStreams" + human_friendly = "DynamoDB Streams" + } + + client { + go_v1_client_typename = "DynamoDBStreams" + } + + resource_prefix { + correct = "aws_dynamodbstreams_" + } + + provider_package_correct = "dynamodbstreams" + doc_prefix = ["dynamodbstreams_"] + brand = "Amazon" + not_implemented = true +} + +service "ebs" { + + sdk { + id = "EBS" + client_version = [1] + } + + names { + provider_name_upper = "EBS" + human_friendly = "EBS (Elastic Block Store)" + } + + client { + go_v1_client_typename = "EBS" + } + + resource_prefix { + correct = "aws_ebs_" + } + + provider_package_correct = "ebs" + doc_prefix = ["changewhenimplemented"] + brand = "Amazon" + not_implemented = true +} + +service "imagebuilder" { + + sdk { + id = "imagebuilder" + client_version = [1] + } + + names { + provider_name_upper = "ImageBuilder" + human_friendly = "EC2 Image Builder" + } + + client { + go_v1_client_typename = "Imagebuilder" + } + + endpoint_info { + endpoint_api_call = "ListImages" + } + + resource_prefix { + correct = "aws_imagebuilder_" + } + + provider_package_correct = "imagebuilder" + doc_prefix = ["imagebuilder_"] + brand = "AWS" +} + +service "ec2instanceconnect" { + + cli_v2_command { + aws_cli_v2_command = "ec2-instance-connect" + aws_cli_v2_command_no_dashes = "ec2instanceconnect" + } + + sdk { + id = "EC2 Instance Connect" + client_version = [1] + } + + names { + provider_name_upper = "EC2InstanceConnect" + human_friendly = "EC2 Instance Connect" + } + + client { + go_v1_client_typename = "EC2InstanceConnect" + } + + resource_prefix { + correct = "aws_ec2instanceconnect_" + } + + provider_package_correct = "ec2instanceconnect" + doc_prefix = ["ec2instanceconnect_"] + brand = "AWS" + not_implemented = true +} + +service "ecr" { + + sdk { + id = "ECR" + client_version = [2] + } + + names { + provider_name_upper = "ECR" + human_friendly = "ECR (Elastic Container Registry)" + } + + endpoint_info { + endpoint_api_call = "DescribeRepositories" + } + + resource_prefix { + correct = "aws_ecr_" + } + + provider_package_correct = "ecr" + doc_prefix = ["ecr_"] + brand = "AWS" +} + +service "ecrpublic" { + + cli_v2_command { + aws_cli_v2_command = "ecr-public" + aws_cli_v2_command_no_dashes = "ecrpublic" + } + + sdk { + id = "ECR PUBLIC" + client_version = [2] + } + + names { + provider_name_upper = "ECRPublic" + human_friendly = "ECR Public" + } + + endpoint_info { + endpoint_api_call = "DescribeRepositories" + } + + resource_prefix { + correct = "aws_ecrpublic_" + } + + provider_package_correct = "ecrpublic" + doc_prefix = ["ecrpublic_"] + brand = "AWS" +} + +service "ecs" { + + sdk { + id = "ECS" + client_version = [1, 2] + } + + names { + provider_name_upper = "ECS" + human_friendly = "ECS (Elastic Container)" + } + + client { + go_v1_client_typename = "ECS" + } + + endpoint_info { + endpoint_api_call = "ListClusters" + } + + resource_prefix { + correct = "aws_ecs_" + } + + provider_package_correct = "ecs" + doc_prefix = ["ecs_"] + brand = "AWS" +} + +service "efs" { + + sdk { + id = "EFS" + client_version = [2] + } + + names { + provider_name_upper = "EFS" + human_friendly = "EFS (Elastic File System)" + } + + endpoint_info { + endpoint_api_call = "DescribeFileSystems" + } + + resource_prefix { + correct = "aws_efs_" + } + + provider_package_correct = "efs" + doc_prefix = ["efs_"] + brand = "AWS" +} + +service "eks" { + + sdk { + id = "EKS" + client_version = [2] + } + + names { + provider_name_upper = "EKS" + human_friendly = "EKS (Elastic Kubernetes)" + } + + endpoint_info { + endpoint_api_call = "ListClusters" + } + + resource_prefix { + correct = "aws_eks_" + } + + provider_package_correct = "eks" + doc_prefix = ["eks_"] + brand = "AWS" +} + +service "elasticbeanstalk" { + + sdk { + id = "Elastic Beanstalk" + client_version = [2] + } + + names { + aliases = ["beanstalk"] + provider_name_upper = "ElasticBeanstalk" + human_friendly = "Elastic Beanstalk" + } + + endpoint_info { + endpoint_api_call = "ListAvailableSolutionStacks" + } + + resource_prefix { + actual = "aws_elastic_beanstalk_" + correct = "aws_elasticbeanstalk_" + } + + provider_package_correct = "elasticbeanstalk" + doc_prefix = ["elastic_beanstalk_"] + brand = "AWS" +} + +service "elasticinference" { + + cli_v2_command { + aws_cli_v2_command = "elastic-inference" + aws_cli_v2_command_no_dashes = "elasticinference" + } + + sdk { + id = "Elastic Inference" + client_version = [1] + } + + names { + provider_name_upper = "ElasticInference" + human_friendly = "Elastic Inference" + } + + client { + go_v1_client_typename = "ElasticInference" + } + + resource_prefix { + correct = "aws_elasticinference_" + } + + provider_package_correct = "elasticinference" + doc_prefix = ["elasticinference_"] + brand = "Amazon" + not_implemented = true +} + +service "elastictranscoder" { + + sdk { + id = "Elastic Transcoder" + client_version = [1] + } + + names { + provider_name_upper = "ElasticTranscoder" + human_friendly = "Elastic Transcoder" + } + + client { + go_v1_client_typename = "ElasticTranscoder" + } + + endpoint_info { + endpoint_api_call = "ListPipelines" + } + + resource_prefix { + correct = "aws_elastictranscoder_" + } + + provider_package_correct = "elastictranscoder" + doc_prefix = ["elastictranscoder_"] + brand = "AWS" +} + +service "elasticache" { + + sdk { + id = "ElastiCache" + client_version = [2] + } + + names { + provider_name_upper = "ElastiCache" + human_friendly = "ElastiCache" + } + + endpoint_info { + endpoint_api_call = "DescribeCacheClusters" + } + + resource_prefix { + correct = "aws_elasticache_" + } + + provider_package_correct = "elasticache" + doc_prefix = ["elasticache_"] + brand = "AWS" +} + +service "elasticsearch" { + + cli_v2_command { + aws_cli_v2_command = "es" + aws_cli_v2_command_no_dashes = "es" + } + + go_packages { + v1_package = "elasticsearchservice" + v2_package = "elasticsearchservice" + } + + sdk { + id = "Elasticsearch Service" + client_version = [1] + } + + names { + aliases = ["es", "elasticsearchservice"] + provider_name_upper = "Elasticsearch" + human_friendly = "Elasticsearch" + } + + client { + go_v1_client_typename = "ElasticsearchService" + } + + endpoint_info { + endpoint_api_call = "ListDomainNames" + } + + resource_prefix { + actual = "aws_elasticsearch_" + correct = "aws_es_" + } + + provider_package_correct = "es" + doc_prefix = ["elasticsearch_"] + brand = "AWS" +} + +service "elbv2" { + + go_packages { + v1_package = "elbv2" + v2_package = "elasticloadbalancingv2" + } + + sdk { + id = "Elastic Load Balancing v2" + client_version = [2] + } + + names { + aliases = ["elasticloadbalancingv2"] + provider_name_upper = "ELBV2" + human_friendly = "ELB (Elastic Load Balancing)" + } + + client { + go_v1_client_typename = "ELBV2" + } + + endpoint_info { + endpoint_api_call = "DescribeLoadBalancers" + } + + resource_prefix { + actual = "aws_a?lb(\\b|_listener|_target_group|s|_trust_store)" + correct = "aws_elbv2_" + } + + provider_package_correct = "elbv2" + doc_prefix = ["lbs?\\.", "lb_listener", "lb_target_group", "lb_hosted", "lb_trust_store"] + brand = "" +} +service "elb" { + + go_packages { + v1_package = "elb" + v2_package = "elasticloadbalancing" + } + + sdk { + id = "Elastic Load Balancing" + client_version = [2] + } + + names { + aliases = ["elasticloadbalancing"] + provider_name_upper = "ELB" + human_friendly = "ELB Classic" + } + + client { + go_v1_client_typename = "ELB" + } + + endpoint_info { + endpoint_api_call = "DescribeLoadBalancers" + } + + resource_prefix { + actual = "aws_(app_cookie_stickiness_policy|elb|lb_cookie_stickiness_policy|lb_ssl_negotiation_policy|load_balancer_|proxy_protocol_policy)" + correct = "aws_elb_" + } + + provider_package_correct = "elb" + doc_prefix = ["app_cookie_stickiness_policy", "elb", "lb_cookie_stickiness_policy", "lb_ssl_negotiation_policy", "load_balancer", "proxy_protocol_policy"] + brand = "" +} +service "mediaconnect" { + + sdk { + id = "MediaConnect" + client_version = [2] + } + + names { + provider_name_upper = "MediaConnect" + human_friendly = "Elemental MediaConnect" + } + + endpoint_info { + endpoint_api_call = "ListBridges" + } + + resource_prefix { + correct = "aws_mediaconnect_" + } + + provider_package_correct = "mediaconnect" + doc_prefix = ["mediaconnect_"] + brand = "AWS" +} + +service "mediaconvert" { + + sdk { + id = "MediaConvert" + client_version = [2] + } + + names { + provider_name_upper = "MediaConvert" + human_friendly = "Elemental MediaConvert" + } + endpoint_info { + endpoint_api_call = "ListJobs" + } + + resource_prefix { + actual = "aws_media_convert_" + correct = "aws_mediaconvert_" + } + + provider_package_correct = "mediaconvert" + doc_prefix = ["media_convert_"] + brand = "AWS" +} + +service "medialive" { + + sdk { + id = "MediaLive" + client_version = [2] + } + + names { + provider_name_upper = "MediaLive" + human_friendly = "Elemental MediaLive" + } + + endpoint_info { + endpoint_api_call = "ListOfferings" + } + + resource_prefix { + correct = "aws_medialive_" + } + + provider_package_correct = "medialive" + doc_prefix = ["medialive_"] + brand = "AWS" +} + +service "mediapackage" { + + sdk { + id = "MediaPackage" + client_version = [2] + } + + names { + provider_name_upper = "MediaPackage" + human_friendly = "Elemental MediaPackage" + } + + endpoint_info { + endpoint_api_call = "ListChannels" + } + + resource_prefix { + actual = "aws_media_package_" + correct = "aws_mediapackage_" + } + + provider_package_correct = "mediapackage" + doc_prefix = ["media_package_"] + brand = "AWS" +} + +service "mediapackagevod" { + + cli_v2_command { + aws_cli_v2_command = "mediapackage-vod" + aws_cli_v2_command_no_dashes = "mediapackagevod" + } + + sdk { + id = "MediaPackage Vod" + client_version = [1] + } + + names { + provider_name_upper = "MediaPackageVOD" + human_friendly = "Elemental MediaPackage VOD" + } + + client { + go_v1_client_typename = "MediaPackageVod" + } + + resource_prefix { + correct = "aws_mediapackagevod_" + } + + provider_package_correct = "mediapackagevod" + doc_prefix = ["mediapackagevod_"] + brand = "AWS" + not_implemented = true +} + +service "mediastore" { + + sdk { + id = "MediaStore" + client_version = [2] + } + + names { + provider_name_upper = "MediaStore" + human_friendly = "Elemental MediaStore" + } + + endpoint_info { + endpoint_api_call = "ListContainers" + } + + resource_prefix { + actual = "aws_media_store_" + correct = "aws_mediastore_" + } + + provider_package_correct = "mediastore" + doc_prefix = ["media_store_"] + brand = "AWS" +} + +service "mediastoredata" { + + cli_v2_command { + aws_cli_v2_command = "mediastore-data" + aws_cli_v2_command_no_dashes = "mediastoredata" + } + + sdk { + id = "MediaStore Data" + client_version = [1] + } + + names { + provider_name_upper = "MediaStoreData" + human_friendly = "Elemental MediaStore Data" + } + + client { + go_v1_client_typename = "MediaStoreData" + } + + resource_prefix { + correct = "aws_mediastoredata_" + } + + provider_package_correct = "mediastoredata" + doc_prefix = ["mediastoredata_"] + brand = "AWS" + not_implemented = true +} + +service "mediatailor" { + + sdk { + id = "MediaTailor" + client_version = [1] + } + + names { + provider_name_upper = "MediaTailor" + human_friendly = "Elemental MediaTailor" + } + + client { + go_v1_client_typename = "MediaTailor" + } + + resource_prefix { + correct = "aws_mediatailor_" + } + + provider_package_correct = "mediatailor" + doc_prefix = ["media_tailor_"] + brand = "AWS" + not_implemented = true +} + +service "emr" { + + sdk { + id = "EMR" + client_version = [1, 2] + } + + names { + provider_name_upper = "EMR" + human_friendly = "EMR" + } + + client { + go_v1_client_typename = "EMR" + } + + endpoint_info { + endpoint_api_call = "ListClusters" + } + + resource_prefix { + correct = "aws_emr_" + } + + provider_package_correct = "emr" + doc_prefix = ["emr_"] + brand = "AWS" +} + +service "emrcontainers" { + + cli_v2_command { + aws_cli_v2_command = "emr-containers" + aws_cli_v2_command_no_dashes = "emrcontainers" + } + + sdk { + id = "EMR containers" + client_version = [1] + } + + names { + provider_name_upper = "EMRContainers" + human_friendly = "EMR Containers" + } + + client { + go_v1_client_typename = "EMRContainers" + } + + endpoint_info { + endpoint_api_call = "ListVirtualClusters" + } + + resource_prefix { + correct = "aws_emrcontainers_" + } + + provider_package_correct = "emrcontainers" + doc_prefix = ["emrcontainers_"] + brand = "AWS" +} + +service "emrserverless" { + + cli_v2_command { + aws_cli_v2_command = "emr-serverless" + aws_cli_v2_command_no_dashes = "emrserverless" + } + + sdk { + id = "EMR Serverless" + client_version = [2] + } + + names { + provider_name_upper = "EMRServerless" + human_friendly = "EMR Serverless" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + correct = "aws_emrserverless_" + } + + provider_package_correct = "emrserverless" + doc_prefix = ["emrserverless_"] + brand = "AWS" +} + +service "events" { + + go_packages { + v1_package = "eventbridge" + v2_package = "eventbridge" + } + + sdk { + id = "EventBridge" + client_version = [2] + } + + names { + aliases = ["eventbridge", "cloudwatchevents"] + provider_name_upper = "Events" + human_friendly = "EventBridge" + } + + endpoint_info { + endpoint_api_call = "ListEventBuses" + } + + resource_prefix { + actual = "aws_cloudwatch_event_" + correct = "aws_events_" + } + + provider_package_correct = "events" + doc_prefix = ["cloudwatch_event_"] + brand = "AWS" +} + +service "schemas" { + + sdk { + id = "schemas" + client_version = [2] + } + + names { + provider_name_upper = "Schemas" + human_friendly = "EventBridge Schemas" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListRegistries" + } + + resource_prefix { + correct = "aws_schemas_" + } + + provider_package_correct = "schemas" + doc_prefix = ["schemas_"] + brand = "AWS" +} + +service "fis" { + + sdk { + id = "fis" + client_version = [2] + } + + names { + provider_name_upper = "FIS" + human_friendly = "FIS (Fault Injection Simulator)" + } + + endpoint_info { + endpoint_api_call = "ListExperiments" + } + + resource_prefix { + correct = "aws_fis_" + } + + provider_package_correct = "fis" + doc_prefix = ["fis_"] + brand = "AWS" +} + +service "finspace" { + + sdk { + id = "finspace" + client_version = [2] + } + + names { + provider_name_upper = "FinSpace" + human_friendly = "FinSpace" + } + + endpoint_info { + endpoint_api_call = "ListEnvironments" + } + + resource_prefix { + correct = "aws_finspace_" + } + + provider_package_correct = "finspace" + doc_prefix = ["finspace_"] + brand = "AWS" +} + +service "finspacedata" { + + cli_v2_command { + aws_cli_v2_command = "finspace-data" + aws_cli_v2_command_no_dashes = "finspacedata" + } + + sdk { + id = "finspace data" + client_version = [1] + } + + names { + provider_name_upper = "FinSpaceData" + human_friendly = "FinSpace Data" + } + + client { + go_v1_client_typename = "FinSpaceData" + } + + resource_prefix { + correct = "aws_finspacedata_" + } + + provider_package_correct = "finspacedata" + doc_prefix = ["finspacedata_"] + brand = "Amazon" + not_implemented = true +} + +service "fms" { + + sdk { + id = "FMS" + client_version = [2] + } + + names { + provider_name_upper = "FMS" + human_friendly = "FMS (Firewall Manager)" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListAppsLists" + endpoint_api_params = "MaxResults: aws_sdkv2.Int32(1)" + } + + resource_prefix { + correct = "aws_fms_" + } + + provider_package_correct = "fms" + doc_prefix = ["fms_"] + brand = "AWS" +} + +service "forecast" { + + go_packages { + v1_package = "forecastservice" + v2_package = "forecast" + } + + sdk { + id = "forecast" + client_version = [1] + } + + names { + aliases = ["forecastservice"] + provider_name_upper = "Forecast" + human_friendly = "Forecast" + } + + client { + go_v1_client_typename = "ForecastService" + } + + resource_prefix { + correct = "aws_forecast_" + } + + provider_package_correct = "forecast" + doc_prefix = ["forecast_"] + brand = "Amazon" + not_implemented = true +} + +service "forecastquery" { + + go_packages { + v1_package = "forecastqueryservice" + v2_package = "forecastquery" + } + + sdk { + id = "forecastquery" + client_version = [1] + } + + names { + aliases = ["forecastqueryservice"] + provider_name_upper = "ForecastQuery" + human_friendly = "Forecast Query" + } + + client { + go_v1_client_typename = "ForecastQueryService" + } + + resource_prefix { + correct = "aws_forecastquery_" + } + + provider_package_correct = "forecastquery" + doc_prefix = ["forecastquery_"] + brand = "Amazon" + not_implemented = true +} + +service "frauddetector" { + + sdk { + id = "FraudDetector" + client_version = [1] + } + + names { + provider_name_upper = "FraudDetector" + human_friendly = "Fraud Detector" + } + + client { + go_v1_client_typename = "FraudDetector" + } + + resource_prefix { + correct = "aws_frauddetector_" + } + + provider_package_correct = "frauddetector" + doc_prefix = ["frauddetector_"] + brand = "Amazon" + not_implemented = true +} + +service "fsx" { + + sdk { + id = "FSx" + client_version = [1] + } + + names { + provider_name_upper = "FSx" + human_friendly = "FSx" + } + + client { + go_v1_client_typename = "FSx" + } + + endpoint_info { + endpoint_api_call = "DescribeFileSystems" + } + + resource_prefix { + correct = "aws_fsx_" + } + + provider_package_correct = "fsx" + doc_prefix = ["fsx_"] + brand = "AWS" +} + +service "gamelift" { + + sdk { + id = "GameLift" + client_version = [1] + } + + names { + provider_name_upper = "GameLift" + human_friendly = "GameLift" + } + + client { + go_v1_client_typename = "GameLift" + } + + endpoint_info { + endpoint_api_call = "ListGameServerGroups" + } + + resource_prefix { + correct = "aws_gamelift_" + } + + provider_package_correct = "gamelift" + doc_prefix = ["gamelift_"] + brand = "AWS" +} + +service "globalaccelerator" { + + sdk { + id = "Global Accelerator" + client_version = [2] + } + + names { + provider_name_upper = "GlobalAccelerator" + human_friendly = "Global Accelerator" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListAccelerators" + endpoint_region_override = "us-west-2" + } + + resource_prefix { + correct = "aws_globalaccelerator_" + } + + provider_package_correct = "globalaccelerator" + doc_prefix = ["globalaccelerator_"] + brand = "AWS" +} + +service "glue" { + + sdk { + id = "Glue" + client_version = [1] + } + + names { + provider_name_upper = "Glue" + human_friendly = "Glue" + } + + client { + go_v1_client_typename = "Glue" + } + + endpoint_info { + endpoint_api_call = "ListRegistries" + } + + resource_prefix { + correct = "aws_glue_" + } + + provider_package_correct = "glue" + doc_prefix = ["glue_"] + brand = "AWS" +} + +service "databrew" { + + sdk { + id = "DataBrew" + client_version = [2] + } + + names { + aliases = ["gluedatabrew"] + provider_name_upper = "DataBrew" + human_friendly = "Glue DataBrew" + } + + endpoint_info { + endpoint_api_call = "ListProjects" + } + + resource_prefix { + correct = "aws_databrew_" + } + + provider_package_correct = "databrew" + doc_prefix = ["databrew_"] + brand = "AWS" +} + +service "groundstation" { + + sdk { + id = "GroundStation" + client_version = [2] + } + + names { + provider_name_upper = "GroundStation" + human_friendly = "Ground Station" + } + + endpoint_info { + endpoint_api_call = "ListConfigs" + } + + resource_prefix { + correct = "aws_groundstation_" + } + + provider_package_correct = "groundstation" + doc_prefix = ["groundstation_"] + brand = "AWS" +} + +service "guardduty" { + + sdk { + id = "GuardDuty" + client_version = [1, 2] + } + + names { + provider_name_upper = "GuardDuty" + human_friendly = "GuardDuty" + } + + client { + go_v1_client_typename = "GuardDuty" + } + + endpoint_info { + endpoint_api_call = "ListDetectors" + } + + resource_prefix { + correct = "aws_guardduty_" + } + + provider_package_correct = "guardduty" + doc_prefix = ["guardduty_"] + brand = "AWS" +} + +service "health" { + + sdk { + id = "Health" + client_version = [1] + } + + names { + provider_name_upper = "Health" + human_friendly = "Health" + } + + client { + go_v1_client_typename = "Health" + } + + resource_prefix { + correct = "aws_health_" + } + + provider_package_correct = "health" + doc_prefix = ["health_"] + brand = "AWS" + not_implemented = true +} + +service "healthlake" { + + sdk { + id = "HealthLake" + client_version = [2] + } + + names { + provider_name_upper = "HealthLake" + human_friendly = "HealthLake" + } + + endpoint_info { + endpoint_api_call = "ListFHIRDatastores" + } + + resource_prefix { + correct = "aws_healthlake_" + } + + provider_package_correct = "healthlake" + doc_prefix = ["healthlake_"] + brand = "AWS" +} + +service "honeycode" { + + sdk { + id = "Honeycode" + client_version = [1] + } + + names { + provider_name_upper = "Honeycode" + human_friendly = "Honeycode" + } + + client { + go_v1_client_typename = "Honeycode" + } + + resource_prefix { + correct = "aws_honeycode_" + } + + provider_package_correct = "honeycode" + doc_prefix = ["honeycode_"] + brand = "Amazon" + not_implemented = true +} + +service "iam" { + + sdk { + id = "IAM" + client_version = [2] + } + + names { + provider_name_upper = "IAM" + human_friendly = "IAM (Identity & Access Management)" + } + + client { + go_v1_client_typename = "IAM" + } + + env_var { + deprecated_env_var = "AWS_IAM_ENDPOINT" + tf_aws_env_var = "TF_AWS_IAM_ENDPOINT" + } + endpoint_info { + endpoint_api_call = "ListRoles" + } + + resource_prefix { + correct = "aws_iam_" + } + + provider_package_correct = "iam" + doc_prefix = ["iam_"] + brand = "AWS" +} + +service "inspector" { + + sdk { + id = "Inspector" + client_version = [1] + } + + names { + provider_name_upper = "Inspector" + human_friendly = "Inspector Classic" + } + + client { + go_v1_client_typename = "Inspector" + } + + endpoint_info { + endpoint_api_call = "ListRulesPackages" + } + + resource_prefix { + correct = "aws_inspector_" + } + + provider_package_correct = "inspector" + doc_prefix = ["inspector_"] + brand = "AWS" +} + +service "inspector2" { + + sdk { + id = "Inspector2" + client_version = [2] + } + + names { + aliases = ["inspectorv2"] + provider_name_upper = "Inspector2" + human_friendly = "Inspector" + } + endpoint_info { + endpoint_api_call = "ListAccountPermissions" + } + + resource_prefix { + correct = "aws_inspector2_" + } + + provider_package_correct = "inspector2" + doc_prefix = ["inspector2_"] + brand = "AWS" +} + +service "iot1clickdevices" { + + cli_v2_command { + aws_cli_v2_command = "iot1click-devices" + aws_cli_v2_command_no_dashes = "iot1clickdevices" + } + + go_packages { + v1_package = "iot1clickdevicesservice" + v2_package = "iot1clickdevicesservice" + } + + sdk { + id = "IoT 1Click Devices Service" + client_version = [1] + } + + names { + aliases = ["iot1clickdevicesservice"] + provider_name_upper = "IoT1ClickDevices" + human_friendly = "IoT 1-Click Devices" + } + + client { + go_v1_client_typename = "IoT1ClickDevicesService" + } + + resource_prefix { + correct = "aws_iot1clickdevices_" + } + + provider_package_correct = "iot1clickdevices" + doc_prefix = ["iot1clickdevices_"] + brand = "AWS" + not_implemented = true +} + +service "iot1clickprojects" { + + cli_v2_command { + aws_cli_v2_command = "iot1click-projects" + aws_cli_v2_command_no_dashes = "iot1clickprojects" + } + + sdk { + id = "IoT 1Click Projects" + client_version = [1] + } + + names { + provider_name_upper = "IoT1ClickProjects" + human_friendly = "IoT 1-Click Projects" + } + + client { + go_v1_client_typename = "IoT1ClickProjects" + } + + resource_prefix { + correct = "aws_iot1clickprojects_" + } + + provider_package_correct = "iot1clickprojects" + doc_prefix = ["iot1clickprojects_"] + brand = "AWS" + not_implemented = true +} + +service "iotanalytics" { + + sdk { + id = "IoTAnalytics" + client_version = [2] + } + + names { + provider_name_upper = "IoTAnalytics" + human_friendly = "IoT Analytics" + } + + client { + go_v1_client_typename = "IoTAnalytics" + } + + endpoint_info { + endpoint_api_call = "ListChannels" + } + + resource_prefix { + correct = "aws_iotanalytics_" + } + + provider_package_correct = "iotanalytics" + doc_prefix = ["iotanalytics_"] + brand = "AWS" +} + +service "iotdata" { + + cli_v2_command { + aws_cli_v2_command = "iot-data" + aws_cli_v2_command_no_dashes = "iotdata" + } + + go_packages { + v1_package = "iotdataplane" + v2_package = "iotdataplane" + } + + sdk { + id = "IoT Data Plane" + client_version = [1] + } + + names { + aliases = ["iotdataplane"] + provider_name_upper = "IoTData" + human_friendly = "IoT Data Plane" + } + + client { + go_v1_client_typename = "IoTDataPlane" + } + + resource_prefix { + correct = "aws_iotdata_" + } + + provider_package_correct = "iotdata" + doc_prefix = ["iotdata_"] + brand = "AWS" + not_implemented = true +} + +service "iotdeviceadvisor" { + + sdk { + id = "IotDeviceAdvisor" + client_version = [1] + } + + names { + provider_name_upper = "IoTDeviceAdvisor" + human_friendly = "IoT Device Management" + } + + client { + go_v1_client_typename = "IoTDeviceAdvisor" + } + + resource_prefix { + correct = "aws_iotdeviceadvisor_" + } + + provider_package_correct = "iotdeviceadvisor" + doc_prefix = ["iotdeviceadvisor_"] + brand = "AWS" + not_implemented = true +} + +service "iotevents" { + + sdk { + id = "IoT Events" + client_version = [2] + } + + names { + provider_name_upper = "IoTEvents" + human_friendly = "IoT Events" + } + + client { + go_v1_client_typename = "IoTEvents" + } + + endpoint_info { + endpoint_api_call = "ListAlarmModels" + } + + resource_prefix { + correct = "aws_iotevents_" + } + + provider_package_correct = "iotevents" + doc_prefix = ["iotevents_"] + brand = "AWS" +} + +service "ioteventsdata" { + + cli_v2_command { + aws_cli_v2_command = "iotevents-data" + aws_cli_v2_command_no_dashes = "ioteventsdata" + } + + sdk { + id = "IoT Events Data" + client_version = [1] + } + + names { + provider_name_upper = "IoTEventsData" + human_friendly = "IoT Events Data" + } + + client { + go_v1_client_typename = "IoTEventsData" + } + + resource_prefix { + correct = "aws_ioteventsdata_" + } + + provider_package_correct = "ioteventsdata" + doc_prefix = ["ioteventsdata_"] + brand = "AWS" + not_implemented = true +} + +service "iotfleethub" { + + sdk { + id = "IoTFleetHub" + client_version = [1] + } + + names { + provider_name_upper = "IoTFleetHub" + human_friendly = "IoT Fleet Hub" + } + + client { + go_v1_client_typename = "IoTFleetHub" + } + + resource_prefix { + correct = "aws_iotfleethub_" + } + + provider_package_correct = "iotfleethub" + doc_prefix = ["iotfleethub_"] + brand = "AWS" + not_implemented = true +} + +service "greengrass" { + + sdk { + id = "Greengrass" + client_version = [2] + } + + names { + provider_name_upper = "Greengrass" + human_friendly = "IoT Greengrass" + } + + client { + go_v1_client_typename = "Greengrass" + } + + endpoint_info { + endpoint_api_call = "ListGroups" + } + + resource_prefix { + correct = "aws_greengrass_" + } + + provider_package_correct = "greengrass" + doc_prefix = ["greengrass_"] + brand = "AWS" +} + +service "greengrassv2" { + + sdk { + id = "GreengrassV2" + client_version = [1] + } + + names { + provider_name_upper = "GreengrassV2" + human_friendly = "IoT Greengrass V2" + } + + client { + go_v1_client_typename = "GreengrassV2" + } + + resource_prefix { + correct = "aws_greengrassv2_" + } + + provider_package_correct = "greengrassv2" + doc_prefix = ["greengrassv2_"] + brand = "AWS" + not_implemented = true +} + +service "iotjobsdata" { + + cli_v2_command { + aws_cli_v2_command = "iot-jobs-data" + aws_cli_v2_command_no_dashes = "iotjobsdata" + } + + go_packages { + v1_package = "iotjobsdataplane" + v2_package = "iotjobsdataplane" + } + + sdk { + id = "IoT Jobs Data Plane" + client_version = [1] + } + + names { + aliases = ["iotjobsdataplane"] + provider_name_upper = "IoTJobsData" + human_friendly = "IoT Jobs Data Plane" + } + + client { + go_v1_client_typename = "IoTJobsDataPlane" + } + + resource_prefix { + correct = "aws_iotjobsdata_" + } + + provider_package_correct = "iotjobsdata" + doc_prefix = ["iotjobsdata_"] + brand = "AWS" + not_implemented = true +} + +service "iotsecuretunneling" { + + sdk { + id = "IoTSecureTunneling" + client_version = [1] + } + + names { + provider_name_upper = "IoTSecureTunneling" + human_friendly = "IoT Secure Tunneling" + } + + client { + go_v1_client_typename = "IoTSecureTunneling" + } + + resource_prefix { + correct = "aws_iotsecuretunneling_" + } + + provider_package_correct = "iotsecuretunneling" + doc_prefix = ["iotsecuretunneling_"] + brand = "AWS" + not_implemented = true +} + +service "iotsitewise" { + + sdk { + id = "IoTSiteWise" + client_version = [1] + } + + names { + provider_name_upper = "IoTSiteWise" + human_friendly = "IoT SiteWise" + } + + client { + go_v1_client_typename = "IoTSiteWise" + } + + resource_prefix { + correct = "aws_iotsitewise_" + } + + provider_package_correct = "iotsitewise" + doc_prefix = ["iotsitewise_"] + brand = "AWS" + not_implemented = true +} + +service "iotthingsgraph" { + + sdk { + id = "IoTThingsGraph" + client_version = [1] + } + + names { + provider_name_upper = "IoTThingsGraph" + human_friendly = "IoT Things Graph" + } + + client { + go_v1_client_typename = "IoTThingsGraph" + } + + resource_prefix { + correct = "aws_iotthingsgraph_" + } + + provider_package_correct = "iotthingsgraph" + doc_prefix = ["iotthingsgraph_"] + brand = "AWS" + not_implemented = true +} + +service "iottwinmaker" { + + sdk { + id = "IoTTwinMaker" + client_version = [1] + } + + names { + provider_name_upper = "IoTTwinMaker" + human_friendly = "IoT TwinMaker" + } + + client { + go_v1_client_typename = "IoTTwinMaker" + } + + resource_prefix { + correct = "aws_iottwinmaker_" + } + + provider_package_correct = "iottwinmaker" + doc_prefix = ["iottwinmaker_"] + brand = "AWS" + not_implemented = true +} + +service "iotwireless" { + + sdk { + id = "IoT Wireless" + client_version = [1] + } + + names { + provider_name_upper = "IoTWireless" + human_friendly = "IoT Wireless" + } + + client { + go_v1_client_typename = "IoTWireless" + } + + resource_prefix { + correct = "aws_iotwireless_" + } + + provider_package_correct = "iotwireless" + doc_prefix = ["iotwireless_"] + brand = "AWS" + not_implemented = true +} + +service "ivs" { + + sdk { + id = "ivs" + client_version = [1] + } + + names { + provider_name_upper = "IVS" + human_friendly = "IVS (Interactive Video)" + } + + client { + go_v1_client_typename = "IVS" + } + + endpoint_info { + endpoint_api_call = "ListChannels" + } + + resource_prefix { + correct = "aws_ivs_" + } + + provider_package_correct = "ivs" + doc_prefix = ["ivs_"] + brand = "AWS" +} + +service "ivschat" { + + sdk { + id = "ivschat" + client_version = [2] + } + + names { + provider_name_upper = "IVSChat" + human_friendly = "IVS (Interactive Video) Chat" + } + + endpoint_info { + endpoint_api_call = "ListRooms" + } + + resource_prefix { + correct = "aws_ivschat_" + } + + provider_package_correct = "ivschat" + doc_prefix = ["ivschat_"] + brand = "AWS" +} + +service "kendra" { + + sdk { + id = "kendra" + client_version = [2] + } + + names { + provider_name_upper = "Kendra" + human_friendly = "Kendra" + } + + client { + go_v1_client_typename = "Kendra" + } + + endpoint_info { + endpoint_api_call = "ListIndices" + } + + resource_prefix { + correct = "aws_kendra_" + } + + provider_package_correct = "kendra" + doc_prefix = ["kendra_"] + brand = "AWS" +} + +service "keyspaces" { + + sdk { + id = "Keyspaces" + client_version = [2] + } + + names { + provider_name_upper = "Keyspaces" + human_friendly = "Keyspaces (for Apache Cassandra)" + } + + endpoint_info { + endpoint_api_call = "ListKeyspaces" + } + + resource_prefix { + correct = "aws_keyspaces_" + } + + provider_package_correct = "keyspaces" + doc_prefix = ["keyspaces_"] + brand = "AWS" +} + +service "kinesis" { + + sdk { + id = "Kinesis" + client_version = [2] + } + + names { + provider_name_upper = "Kinesis" + human_friendly = "Kinesis" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListStreams" + } + + resource_prefix { + actual = "aws_kinesis_stream" + correct = "aws_kinesis_" + } + + provider_package_correct = "kinesis" + doc_prefix = ["kinesis_stream", "kinesis_resource_policy"] + brand = "AWS" +} + +service "kinesisanalytics" { + + sdk { + id = "Kinesis Analytics" + client_version = [1] + } + + names { + provider_name_upper = "KinesisAnalytics" + human_friendly = "Kinesis Analytics" + } + + client { + go_v1_client_typename = "KinesisAnalytics" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + actual = "aws_kinesis_analytics_" + correct = "aws_kinesisanalytics_" + } + + provider_package_correct = "kinesisanalytics" + doc_prefix = ["kinesis_analytics_"] + brand = "AWS" +} + +service "kinesisanalyticsv2" { + + sdk { + id = "Kinesis Analytics V2" + client_version = [1] + } + + names { + provider_name_upper = "KinesisAnalyticsV2" + human_friendly = "Kinesis Analytics V2" + } + + client { + go_v1_client_typename = "KinesisAnalyticsV2" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + correct = "aws_kinesisanalyticsv2_" + } + + provider_package_correct = "kinesisanalyticsv2" + doc_prefix = ["kinesisanalyticsv2_"] + brand = "AWS" +} + +service "firehose" { + + sdk { + id = "Firehose" + client_version = [2] + } + + names { + provider_name_upper = "Firehose" + human_friendly = "Kinesis Firehose" + } + + endpoint_info { + endpoint_api_call = "ListDeliveryStreams" + } + + resource_prefix { + actual = "aws_kinesis_firehose_" + correct = "aws_firehose_" + } + + provider_package_correct = "firehose" + doc_prefix = ["kinesis_firehose_"] + brand = "AWS" +} + +service "kinesisvideo" { + + sdk { + id = "Kinesis Video" + client_version = [1] + } + + names { + provider_name_upper = "KinesisVideo" + human_friendly = "Kinesis Video" + } + + client { + go_v1_client_typename = "KinesisVideo" + } + + endpoint_info { + endpoint_api_call = "ListStreams" + } + + resource_prefix { + correct = "aws_kinesisvideo_" + } + + provider_package_correct = "kinesisvideo" + doc_prefix = ["kinesis_video_"] + brand = "AWS" +} + +service "kinesisvideoarchivedmedia" { + + cli_v2_command { + aws_cli_v2_command = "kinesis-video-archived-media" + aws_cli_v2_command_no_dashes = "kinesisvideoarchivedmedia" + } + + sdk { + id = "Kinesis Video Archived Media" + client_version = [1] + } + + names { + provider_name_upper = "KinesisVideoArchivedMedia" + human_friendly = "Kinesis Video Archived Media" + } + + client { + go_v1_client_typename = "KinesisVideoArchivedMedia" + } + + resource_prefix { + correct = "aws_kinesisvideoarchivedmedia_" + } + + provider_package_correct = "kinesisvideoarchivedmedia" + doc_prefix = ["kinesisvideoarchivedmedia_"] + brand = "Amazon" + not_implemented = true +} + +service "kinesisvideomedia" { + + cli_v2_command { + aws_cli_v2_command = "kinesis-video-media" + aws_cli_v2_command_no_dashes = "kinesisvideomedia" + } + + sdk { + id = "Kinesis Video Media" + client_version = [1] + } + + names { + provider_name_upper = "KinesisVideoMedia" + human_friendly = "Kinesis Video Media" + } + + client { + go_v1_client_typename = "KinesisVideoMedia" + } + + resource_prefix { + correct = "aws_kinesisvideomedia_" + } + + provider_package_correct = "kinesisvideomedia" + doc_prefix = ["kinesisvideomedia_"] + brand = "Amazon" + not_implemented = true +} + +service "kinesisvideosignaling" { + + cli_v2_command { + aws_cli_v2_command = "kinesis-video-signaling" + aws_cli_v2_command_no_dashes = "kinesisvideosignaling" + } + + go_packages { + v1_package = "kinesisvideosignalingchannels" + v2_package = "kinesisvideosignaling" + } + + sdk { + id = "Kinesis Video Signaling" + client_version = [1] + } + + names { + aliases = ["kinesisvideosignalingchannels"] + provider_name_upper = "KinesisVideoSignaling" + human_friendly = "Kinesis Video Signaling" + } + + client { + go_v1_client_typename = "KinesisVideoSignalingChannels" + } + + resource_prefix { + correct = "aws_kinesisvideosignaling_" + } + + provider_package_correct = "kinesisvideosignaling" + doc_prefix = ["kinesisvideosignaling_"] + brand = "Amazon" + not_implemented = true +} + +service "kms" { + + sdk { + id = "KMS" + client_version = [2] + } + + names { + provider_name_upper = "KMS" + human_friendly = "KMS (Key Management)" + } + + endpoint_info { + endpoint_api_call = "ListKeys" + } + + resource_prefix { + correct = "aws_kms_" + } + + provider_package_correct = "kms" + doc_prefix = ["kms_"] + brand = "AWS" +} + +service "lakeformation" { + + sdk { + id = "LakeFormation" + client_version = [2] + } + + names { + provider_name_upper = "LakeFormation" + human_friendly = "Lake Formation" + } + + endpoint_info { + endpoint_api_call = "ListResources" + } + + resource_prefix { + correct = "aws_lakeformation_" + } + + provider_package_correct = "lakeformation" + doc_prefix = ["lakeformation_"] + brand = "AWS" +} + +service "lambda" { + + sdk { + id = "Lambda" + client_version = [2] + } + + names { + provider_name_upper = "Lambda" + human_friendly = "Lambda" + } + + endpoint_info { + endpoint_api_call = "ListFunctions" + } + + resource_prefix { + correct = "aws_lambda_" + } + + provider_package_correct = "lambda" + doc_prefix = ["lambda_"] + brand = "AWS" +} + +service "launchwizard" { + + cli_v2_command { + aws_cli_v2_command = "launch-wizard" + aws_cli_v2_command_no_dashes = "launchwizard" + } + + sdk { + id = "Launch Wizard" + client_version = [2] + } + + names { + provider_name_upper = "LaunchWizard" + human_friendly = "Launch Wizard" + } + + endpoint_info { + endpoint_api_call = "ListWorkloads" + } + + resource_prefix { + correct = "aws_launchwizard_" + } + + provider_package_correct = "launchwizard" + doc_prefix = ["launchwizard_"] + brand = "AWS" +} + +service "lexmodels" { + + cli_v2_command { + aws_cli_v2_command = "lex-models" + aws_cli_v2_command_no_dashes = "lexmodels" + } + + go_packages { + v1_package = "lexmodelbuildingservice" + v2_package = "lexmodelbuildingservice" + } + + sdk { + id = "Lex Model Building Service" + client_version = [1] + } + + names { + aliases = ["lexmodelbuilding", "lexmodelbuildingservice", "lex"] + provider_name_upper = "LexModels" + human_friendly = "Lex Model Building" + } + + client { + go_v1_client_typename = "LexModelBuildingService" + } + + endpoint_info { + endpoint_api_call = "GetBots" + } + + resource_prefix { + actual = "aws_lex_" + correct = "aws_lexmodels_" + } + + provider_package_correct = "lexmodels" + doc_prefix = ["lex_"] + brand = "AWS" +} + +service "lexv2models" { + + cli_v2_command { + aws_cli_v2_command = "lexv2-models" + aws_cli_v2_command_no_dashes = "lexv2models" + } + + go_packages { + v1_package = "lexmodelsv2" + v2_package = "lexmodelsv2" + } + + sdk { + id = "Lex Models V2" + client_version = [2] + } + + names { + aliases = ["lexmodelsv2"] + provider_name_upper = "LexV2Models" + human_friendly = "Lex V2 Models" + } + + endpoint_info { + endpoint_api_call = "ListBots" + } + + resource_prefix { + correct = "aws_lexv2models_" + } + + provider_package_correct = "lexv2models" + doc_prefix = ["lexv2models_"] + brand = "AWS" +} + +service "lexruntime" { + + cli_v2_command { + aws_cli_v2_command = "lex-runtime" + aws_cli_v2_command_no_dashes = "lexruntime" + } + + go_packages { + v1_package = "lexruntimeservice" + v2_package = "lexruntimeservice" + } + + sdk { + id = "Lex Runtime Service" + client_version = [1] + } + + names { + aliases = ["lexruntimeservice"] + provider_name_upper = "LexRuntime" + human_friendly = "Lex Runtime" + } + + client { + go_v1_client_typename = "LexRuntimeService" + } + + resource_prefix { + correct = "aws_lexruntime_" + } + + provider_package_correct = "lexruntime" + doc_prefix = ["lexruntime_"] + brand = "Amazon" + not_implemented = true +} + +service "lexruntimev2" { + + cli_v2_command { + aws_cli_v2_command = "lexv2-runtime" + aws_cli_v2_command_no_dashes = "lexv2runtime" + } + + sdk { + id = "Lex Runtime V2" + client_version = [1] + } + + names { + aliases = ["lexv2runtime"] + provider_name_upper = "LexRuntimeV2" + human_friendly = "Lex Runtime V2" + } + + client { + go_v1_client_typename = "LexRuntimeV2" + } + + resource_prefix { + correct = "aws_lexruntimev2_" + } + + provider_package_correct = "lexruntimev2" + doc_prefix = ["lexruntimev2_"] + brand = "Amazon" + not_implemented = true +} + +service "licensemanager" { + + cli_v2_command { + aws_cli_v2_command = "license-manager" + aws_cli_v2_command_no_dashes = "licensemanager" + } + + sdk { + id = "License Manager" + client_version = [1] + } + + names { + provider_name_upper = "LicenseManager" + human_friendly = "License Manager" + } + + client { + go_v1_client_typename = "LicenseManager" + } + + endpoint_info { + endpoint_api_call = "ListLicenseConfigurations" + } + + resource_prefix { + correct = "aws_licensemanager_" + } + + provider_package_correct = "licensemanager" + doc_prefix = ["licensemanager_"] + brand = "AWS" +} + +service "lightsail" { + + sdk { + id = "Lightsail" + client_version = [2] + } + + names { + provider_name_upper = "Lightsail" + human_friendly = "Lightsail" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "GetInstances" + } + + resource_prefix { + correct = "aws_lightsail_" + } + + provider_package_correct = "lightsail" + doc_prefix = ["lightsail_"] + brand = "AWS" +} + +service "location" { + + go_packages { + v1_package = "locationservice" + v2_package = "location" + } + + sdk { + id = "Location" + client_version = [1] + } + + names { + aliases = ["locationservice"] + provider_name_upper = "Location" + human_friendly = "Location" + } + + client { + go_v1_client_typename = "LocationService" + } + + endpoint_info { + endpoint_api_call = "ListGeofenceCollections" + } + + resource_prefix { + correct = "aws_location_" + } + + provider_package_correct = "location" + doc_prefix = ["location_"] + brand = "AWS" +} + +service "lookoutequipment" { + + sdk { + id = "LookoutEquipment" + client_version = [1] + } + + names { + provider_name_upper = "LookoutEquipment" + human_friendly = "Lookout for Equipment" + } + + client { + go_v1_client_typename = "LookoutEquipment" + } + + resource_prefix { + correct = "aws_lookoutequipment_" + } + + provider_package_correct = "lookoutequipment" + doc_prefix = ["lookoutequipment_"] + brand = "Amazon" + not_implemented = true +} + +service "lookoutmetrics" { + + sdk { + id = "LookoutMetrics" + client_version = [2] + } + + names { + provider_name_upper = "LookoutMetrics" + human_friendly = "Lookout for Metrics" + } + + endpoint_info { + endpoint_api_call = "ListMetricSets" + } + + resource_prefix { + correct = "aws_lookoutmetrics_" + } + + provider_package_correct = "lookoutmetrics" + doc_prefix = ["lookoutmetrics_"] + brand = "AWS" +} + +service "lookoutvision" { + + go_packages { + v1_package = "lookoutforvision" + v2_package = "lookoutvision" + } + + sdk { + id = "LookoutVision" + client_version = [1] + } + + names { + aliases = ["lookoutforvision"] + provider_name_upper = "LookoutVision" + human_friendly = "Lookout for Vision" + } + + client { + go_v1_client_typename = "LookoutForVision" + } + + resource_prefix { + correct = "aws_lookoutvision_" + } + + provider_package_correct = "lookoutvision" + doc_prefix = ["lookoutvision_"] + brand = "Amazon" + not_implemented = true +} + +service "machinelearning" { + + sdk { + id = "Machine Learning" + client_version = [1] + } + + names { + provider_name_upper = "MachineLearning" + human_friendly = "Machine Learning" + } + + client { + go_v1_client_typename = "MachineLearning" + } + + resource_prefix { + correct = "aws_machinelearning_" + } + + provider_package_correct = "machinelearning" + doc_prefix = ["machinelearning_"] + brand = "Amazon" + not_implemented = true +} + +service "macie2" { + + sdk { + id = "Macie2" + client_version = [1] + } + + names { + provider_name_upper = "Macie2" + human_friendly = "Macie" + } + + client { + go_v1_client_typename = "Macie2" + } + + endpoint_info { + endpoint_api_call = "ListFindings" + } + + resource_prefix { + correct = "aws_macie2_" + } + + provider_package_correct = "macie2" + doc_prefix = ["macie2_"] + brand = "AWS" +} + +service "macie" { + + sdk { + id = "Macie" + client_version = [1] + } + + names { + provider_name_upper = "Macie" + human_friendly = "Macie Classic" + } + + client { + go_v1_client_typename = "Macie" + } + + resource_prefix { + correct = "aws_macie_" + } + + provider_package_correct = "macie" + doc_prefix = ["macie_"] + brand = "Amazon" + not_implemented = true +} + +service "m2" { + + sdk { + id = "m2" + client_version = [2] + } + + names { + provider_name_upper = "M2" + human_friendly = "Mainframe Modernization" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + correct = "aws_m2_" + } + + provider_package_correct = "m2" + doc_prefix = ["m2_"] + brand = "AWS" +} + +service "managedblockchain" { + + sdk { + id = "ManagedBlockchain" + client_version = [1] + } + + names { + provider_name_upper = "ManagedBlockchain" + human_friendly = "Managed Blockchain" + } + + client { + go_v1_client_typename = "ManagedBlockchain" + } + + resource_prefix { + correct = "aws_managedblockchain_" + } + + provider_package_correct = "managedblockchain" + doc_prefix = ["managedblockchain_"] + brand = "Amazon" + not_implemented = true +} + +service "grafana" { + + go_packages { + v1_package = "managedgrafana" + v2_package = "grafana" + } + + sdk { + id = "grafana" + client_version = [2] + } + + names { + aliases = ["managedgrafana", "amg"] + provider_name_upper = "Grafana" + human_friendly = "Managed Grafana" + } + + client { + go_v1_client_typename = "ManagedGrafana" + } + + endpoint_info { + endpoint_api_call = "ListWorkspaces" + } + + resource_prefix { + correct = "aws_grafana_" + } + + provider_package_correct = "grafana" + doc_prefix = ["grafana_"] + brand = "AWS" +} + +service "kafka" { + + sdk { + id = "Kafka" + client_version = [2] + } + + names { + aliases = ["msk"] + provider_name_upper = "Kafka" + human_friendly = "Managed Streaming for Kafka" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListClusters" + } + + resource_prefix { + actual = "aws_msk_" + correct = "aws_kafka_" + } + + provider_package_correct = "kafka" + doc_prefix = ["msk_"] + brand = "AWS" +} + +service "kafkaconnect" { + + sdk { + id = "KafkaConnect" + client_version = [2] + } + + names { + provider_name_upper = "KafkaConnect" + human_friendly = "Managed Streaming for Kafka Connect" + } + + client { + go_v1_client_typename = "KafkaConnect" + } + + endpoint_info { + endpoint_api_call = "ListConnectors" + } + + resource_prefix { + actual = "aws_mskconnect_" + correct = "aws_kafkaconnect_" + } + + provider_package_correct = "kafkaconnect" + doc_prefix = ["mskconnect_"] + brand = "AWS" +} + +service "marketplacecatalog" { + + cli_v2_command { + aws_cli_v2_command = "marketplace-catalog" + aws_cli_v2_command_no_dashes = "marketplacecatalog" + } + + sdk { + id = "Marketplace Catalog" + client_version = [1] + } + + names { + provider_name_upper = "MarketplaceCatalog" + human_friendly = "Marketplace Catalog" + } + + client { + go_v1_client_typename = "MarketplaceCatalog" + } + + resource_prefix { + correct = "aws_marketplacecatalog_" + } + + provider_package_correct = "marketplacecatalog" + doc_prefix = ["marketplace_catalog_"] + brand = "AWS" + not_implemented = true +} + +service "marketplacecommerceanalytics" { + + sdk { + id = "Marketplace Commerce Analytics" + client_version = [1] + } + + names { + provider_name_upper = "MarketplaceCommerceAnalytics" + human_friendly = "Marketplace Commerce Analytics" + } + + client { + go_v1_client_typename = "MarketplaceCommerceAnalytics" + } + + resource_prefix { + correct = "aws_marketplacecommerceanalytics_" + } + + provider_package_correct = "marketplacecommerceanalytics" + doc_prefix = ["marketplacecommerceanalytics_"] + brand = "AWS" + not_implemented = true +} + +service "marketplaceentitlement" { + + cli_v2_command { + aws_cli_v2_command = "marketplace-entitlement" + aws_cli_v2_command_no_dashes = "marketplaceentitlement" + } + + go_packages { + v1_package = "marketplaceentitlementservice" + v2_package = "marketplaceentitlementservice" + } + + sdk { + id = "Marketplace Entitlement Service" + client_version = [1] + } + + names { + aliases = ["marketplaceentitlementservice"] + provider_name_upper = "MarketplaceEntitlement" + human_friendly = "Marketplace Entitlement" + } + + client { + go_v1_client_typename = "MarketplaceEntitlementService" + } + + resource_prefix { + correct = "aws_marketplaceentitlement_" + } + + provider_package_correct = "marketplaceentitlement" + doc_prefix = ["marketplaceentitlement_"] + brand = "AWS" + not_implemented = true +} + +service "marketplacemetering" { + + cli_v2_command { + aws_cli_v2_command = "meteringmarketplace" + aws_cli_v2_command_no_dashes = "meteringmarketplace" + } + + sdk { + id = "Marketplace Metering" + client_version = [1] + } + + names { + aliases = ["meteringmarketplace"] + provider_name_upper = "MarketplaceMetering" + human_friendly = "Marketplace Metering" + } + + client { + go_v1_client_typename = "MarketplaceMetering" + } + + resource_prefix { + correct = "aws_marketplacemetering_" + } + + provider_package_correct = "marketplacemetering" + doc_prefix = ["marketplacemetering_"] + brand = "AWS" + not_implemented = true +} + +service "memorydb" { + + sdk { + id = "MemoryDB" + client_version = [1] + } + + names { + provider_name_upper = "MemoryDB" + human_friendly = "MemoryDB for Redis" + } + + client { + go_v1_client_typename = "MemoryDB" + } + + endpoint_info { + endpoint_api_call = "DescribeClusters" + } + + resource_prefix { + correct = "aws_memorydb_" + } + + provider_package_correct = "memorydb" + doc_prefix = ["memorydb_"] + brand = "AWS" +} + +service "meta" { + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "Meta" + human_friendly = "Meta Data Sources" + } + + client { + skip_client_generate = true + } + + resource_prefix { + actual = "aws_(arn|billing_service_account|default_tags|ip_ranges|partition|regions?|service)$" + correct = "aws_meta_" + } + + provider_package_correct = "meta" + doc_prefix = ["arn", "ip_ranges", "billing_service_account", "default_tags", "partition", "region", "service\\."] + brand = "" + exclude = true + allowed_subcategory = true + note = "Not an AWS service (metadata)" +} +service "mgh" { + + go_packages { + v1_package = "migrationhub" + v2_package = "migrationhub" + } + + sdk { + id = "Migration Hub" + client_version = [1] + } + + names { + aliases = ["migrationhub"] + provider_name_upper = "MgH" + human_friendly = "MgH (Migration Hub)" + } + + client { + go_v1_client_typename = "MigrationHub" + } + + resource_prefix { + correct = "aws_mgh_" + } + + provider_package_correct = "mgh" + doc_prefix = ["mgh_"] + brand = "AWS" + not_implemented = true +} + +service "migrationhubconfig" { + + cli_v2_command { + aws_cli_v2_command = "migrationhub-config" + aws_cli_v2_command_no_dashes = "migrationhubconfig" + } + + sdk { + id = "MigrationHub Config" + client_version = [1] + } + + names { + provider_name_upper = "MigrationHubConfig" + human_friendly = "Migration Hub Config" + } + + client { + go_v1_client_typename = "MigrationHubConfig" + } + + resource_prefix { + correct = "aws_migrationhubconfig_" + } + + provider_package_correct = "migrationhubconfig" + doc_prefix = ["migrationhubconfig_"] + brand = "AWS" + not_implemented = true +} + +service "migrationhubrefactorspaces" { + + cli_v2_command { + aws_cli_v2_command = "migration-hub-refactor-spaces" + aws_cli_v2_command_no_dashes = "migrationhubrefactorspaces" + } + + sdk { + id = "Migration Hub Refactor Spaces" + client_version = [1] + } + + names { + provider_name_upper = "MigrationHubRefactorSpaces" + human_friendly = "Migration Hub Refactor Spaces" + } + + client { + go_v1_client_typename = "MigrationHubRefactorSpaces" + } + + resource_prefix { + correct = "aws_migrationhubrefactorspaces_" + } + + provider_package_correct = "migrationhubrefactorspaces" + doc_prefix = ["migrationhubrefactorspaces_"] + brand = "AWS" + not_implemented = true +} + +service "migrationhubstrategy" { + + go_packages { + v1_package = "migrationhubstrategyrecommendations" + v2_package = "migrationhubstrategy" + } + + sdk { + id = "MigrationHubStrategy" + client_version = [1] + } + + names { + aliases = ["migrationhubstrategyrecommendations"] + provider_name_upper = "MigrationHubStrategy" + human_friendly = "Migration Hub Strategy" + } + + client { + go_v1_client_typename = "MigrationHubStrategyRecommendations" + } + + resource_prefix { + correct = "aws_migrationhubstrategy_" + } + + provider_package_correct = "migrationhubstrategy" + doc_prefix = ["migrationhubstrategy_"] + brand = "AWS" + not_implemented = true +} + +service "mobile" { + + sdk { + id = "Mobile" + client_version = [1] + } + + names { + provider_name_upper = "Mobile" + human_friendly = "Mobile" + } + + client { + go_v1_client_typename = "Mobile" + } + + resource_prefix { + correct = "aws_mobile_" + } + + provider_package_correct = "mobile" + doc_prefix = ["mobile_"] + brand = "AWS" + not_implemented = true +} + +service "mq" { + + sdk { + id = "mq" + client_version = [2] + } + + names { + provider_name_upper = "MQ" + human_friendly = "MQ" + } + + endpoint_info { + endpoint_api_call = "ListBrokers" + } + + resource_prefix { + correct = "aws_mq_" + } + + provider_package_correct = "mq" + doc_prefix = ["mq_"] + brand = "AWS" +} + +service "mturk" { + + sdk { + id = "MTurk" + client_version = [1] + } + + names { + provider_name_upper = "MTurk" + human_friendly = "MTurk (Mechanical Turk)" + } + + client { + go_v1_client_typename = "MTurk" + } + + resource_prefix { + correct = "aws_mturk_" + } + + provider_package_correct = "mturk" + doc_prefix = ["mturk_"] + brand = "Amazon" + not_implemented = true +} + +service "mwaa" { + + sdk { + id = "MWAA" + client_version = [2] + } + + names { + provider_name_upper = "MWAA" + human_friendly = "MWAA (Managed Workflows for Apache Airflow)" + } + + endpoint_info { + endpoint_api_call = "ListEnvironments" + } + + resource_prefix { + correct = "aws_mwaa_" + } + + provider_package_correct = "mwaa" + doc_prefix = ["mwaa_"] + brand = "AWS" +} + +service "neptune" { + + sdk { + id = "Neptune" + client_version = [1] + } + + names { + provider_name_upper = "Neptune" + human_friendly = "Neptune" + } + + client { + go_v1_client_typename = "Neptune" + } + + endpoint_info { + endpoint_api_call = "DescribeDBClusters" + } + + resource_prefix { + correct = "aws_neptune_" + } + + provider_package_correct = "neptune" + doc_prefix = ["neptune_"] + brand = "AWS" +} + +service "neptunegraph" { + + cli_v2_command { + aws_cli_v2_command = "neptune-graph" + aws_cli_v2_command_no_dashes = "neptunegraph" + } + + go_packages { + v1_package = "" + v2_package = "neptunegraph" + } + + sdk { + id = "Neptune Graph" + client_version = [2] + } + + names { + provider_name_upper = "NeptuneGraph" + human_friendly = "Neptune Analytics" + } + + endpoint_info { + endpoint_api_call = "ListGraphs" + } + + resource_prefix { + correct = "aws_neptunegraph_" + } + + provider_package_correct = "neptunegraph" + doc_prefix = ["neptunegraph_"] + brand = "AWS" +} + +service "networkfirewall" { + + cli_v2_command { + aws_cli_v2_command = "network-firewall" + aws_cli_v2_command_no_dashes = "networkfirewall" + } + + sdk { + id = "Network Firewall" + client_version = [2] + } + + names { + provider_name_upper = "NetworkFirewall" + human_friendly = "Network Firewall" + } + + client { + go_v1_client_typename = "NetworkFirewall" + } + + endpoint_info { + endpoint_api_call = "ListFirewalls" + } + + resource_prefix { + correct = "aws_networkfirewall_" + } + + provider_package_correct = "networkfirewall" + doc_prefix = ["networkfirewall_"] + brand = "AWS" +} + +service "networkmanager" { + + sdk { + id = "NetworkManager" + client_version = [1] + } + + names { + provider_name_upper = "NetworkManager" + human_friendly = "Network Manager" + } + + client { + go_v1_client_typename = "NetworkManager" + } + + endpoint_info { + endpoint_api_call = "ListCoreNetworks" + } + + resource_prefix { + correct = "aws_networkmanager_" + } + + provider_package_correct = "networkmanager" + doc_prefix = ["networkmanager_"] + brand = "AWS" +} + +service "nimble" { + + go_packages { + v1_package = "nimblestudio" + v2_package = "nimble" + } + + sdk { + id = "nimble" + client_version = [1] + } + + names { + aliases = ["nimblestudio"] + provider_name_upper = "Nimble" + human_friendly = "Nimble Studio" + } + + client { + go_v1_client_typename = "NimbleStudio" + } + + resource_prefix { + correct = "aws_nimble_" + } + + provider_package_correct = "nimble" + doc_prefix = ["nimble_"] + brand = "Amazon" + not_implemented = true +} + +service "oam" { + + sdk { + id = "OAM" + client_version = [2] + } + + names { + aliases = ["cloudwatchobservabilityaccessmanager"] + provider_name_upper = "ObservabilityAccessManager" + human_friendly = "CloudWatch Observability Access Manager" + } + + endpoint_info { + endpoint_api_call = "ListLinks" + } + + resource_prefix { + correct = "aws_oam_" + } + + provider_package_correct = "oam" + doc_prefix = ["oam_"] + brand = "AWS" +} + +service "opensearch" { + + go_packages { + v1_package = "opensearchservice" + v2_package = "opensearch" + } + + sdk { + id = "OpenSearch" + client_version = [1] + } + + names { + aliases = ["opensearchservice"] + provider_name_upper = "OpenSearch" + human_friendly = "OpenSearch" + } + + client { + go_v1_client_typename = "OpenSearchService" + } + + endpoint_info { + endpoint_api_call = "ListDomainNames" + } + + resource_prefix { + correct = "aws_opensearch_" + } + + provider_package_correct = "opensearch" + doc_prefix = ["opensearch_"] + brand = "AWS" +} + +service "opensearchserverless" { + + sdk { + id = "OpenSearchServerless" + client_version = [2] + } + + names { + provider_name_upper = "OpenSearchServerless" + human_friendly = "OpenSearch Serverless" + } + + endpoint_info { + endpoint_api_call = "ListCollections" + } + + resource_prefix { + correct = "aws_opensearchserverless_" + } + + provider_package_correct = "opensearchserverless" + doc_prefix = ["opensearchserverless_"] + brand = "AWS" +} + +service "osis" { + + sdk { + id = "OSIS" + client_version = [2] + } + + names { + aliases = ["opensearchingestion"] + provider_name_upper = "OpenSearchIngestion" + human_friendly = "OpenSearch Ingestion" + } + + endpoint_info { + endpoint_api_call = "ListPipelines" + } + + resource_prefix { + correct = "aws_osis_" + } + + provider_package_correct = "osis" + doc_prefix = ["osis_"] + brand = "AWS" +} + +service "opsworks" { + + sdk { + id = "OpsWorks" + client_version = [1] + } + + names { + provider_name_upper = "OpsWorks" + human_friendly = "OpsWorks" + } + + client { + go_v1_client_typename = "OpsWorks" + } + + endpoint_info { + endpoint_api_call = "DescribeApps" + } + + resource_prefix { + correct = "aws_opsworks_" + } + + provider_package_correct = "opsworks" + doc_prefix = ["opsworks_"] + brand = "AWS" +} + +service "opsworkscm" { + + cli_v2_command { + aws_cli_v2_command = "opsworks-cm" + aws_cli_v2_command_no_dashes = "opsworkscm" + } + + sdk { + id = "OpsWorksCM" + client_version = [1] + } + + names { + provider_name_upper = "OpsWorksCM" + human_friendly = "OpsWorks CM" + } + + client { + go_v1_client_typename = "OpsWorksCM" + } + + resource_prefix { + correct = "aws_opsworkscm_" + } + + provider_package_correct = "opsworkscm" + doc_prefix = ["opsworkscm_"] + brand = "AWS" + not_implemented = true +} + +service "organizations" { + + sdk { + id = "Organizations" + client_version = [2] + } + + names { + provider_name_upper = "Organizations" + human_friendly = "Organizations" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListAccounts" + } + + resource_prefix { + correct = "aws_organizations_" + } + + provider_package_correct = "organizations" + doc_prefix = ["organizations_"] + brand = "AWS" +} + +service "outposts" { + + sdk { + id = "Outposts" + client_version = [1] + } + + names { + provider_name_upper = "Outposts" + human_friendly = "Outposts" + } + + client { + go_v1_client_typename = "Outposts" + } + + endpoint_info { + endpoint_api_call = "ListSites" + } + + resource_prefix { + correct = "aws_outposts_" + } + + provider_package_correct = "outposts" + doc_prefix = ["outposts_"] + brand = "AWS" +} + +service "panorama" { + + sdk { + id = "Panorama" + client_version = [1] + } + + names { + provider_name_upper = "Panorama" + human_friendly = "Panorama" + } + + client { + go_v1_client_typename = "Panorama" + } + + resource_prefix { + correct = "aws_panorama_" + } + + provider_package_correct = "panorama" + doc_prefix = ["panorama_"] + brand = "AWS" + not_implemented = true +} + +service "paymentcryptography" { + + cli_v2_command { + aws_cli_v2_command = "payment-cryptography" + aws_cli_v2_command_no_dashes = "paymentcryptography" + } + + sdk { + id = "PaymentCryptography" + client_version = [2] + } + + names { + provider_name_upper = "PaymentCryptography" + human_friendly = "Payment Cryptography Control Plane" + } + + endpoint_info { + endpoint_api_call = "ListKeys" + } + + resource_prefix { + correct = "aws_paymentcryptography_" + } + + provider_package_correct = "paymentcryptography" + doc_prefix = ["paymentcryptography_"] + brand = "AWS" +} + +service "pcaconnectorad" { + + cli_v2_command { + aws_cli_v2_command = "pca-connector-ad" + aws_cli_v2_command_no_dashes = "pcaconnectorad" + } + + sdk { + id = "Pca Connector Ad" + client_version = [2] + } + + names { + provider_name_upper = "PCAConnectorAD" + human_friendly = "Private CA Connector for Active Directory" + } + + endpoint_info { + endpoint_api_call = "ListConnectors" + } + + resource_prefix { + correct = "aws_pcaconnectorad_" + } + + provider_package_correct = "pcaconnectorad" + doc_prefix = ["pcaconnectorad_"] + brand = "AWS" +} + +service "personalize" { + + sdk { + id = "Personalize" + client_version = [1] + } + + names { + provider_name_upper = "Personalize" + human_friendly = "Personalize" + } + + client { + go_v1_client_typename = "Personalize" + } + + resource_prefix { + correct = "aws_personalize_" + } + + provider_package_correct = "personalize" + doc_prefix = ["personalize_"] + brand = "Amazon" + not_implemented = true +} + +service "personalizeevents" { + + cli_v2_command { + aws_cli_v2_command = "personalize-events" + aws_cli_v2_command_no_dashes = "personalizeevents" + } + + sdk { + id = "Personalize Events" + client_version = [1] + } + + names { + provider_name_upper = "PersonalizeEvents" + human_friendly = "Personalize Events" + } + + client { + go_v1_client_typename = "PersonalizeEvents" + } + + resource_prefix { + correct = "aws_personalizeevents_" + } + + provider_package_correct = "personalizeevents" + doc_prefix = ["personalizeevents_"] + brand = "Amazon" + not_implemented = true +} + +service "personalizeruntime" { + + cli_v2_command { + aws_cli_v2_command = "personalize-runtime" + aws_cli_v2_command_no_dashes = "personalizeruntime" + } + + sdk { + id = "Personalize Runtime" + client_version = [1] + } + + names { + provider_name_upper = "PersonalizeRuntime" + human_friendly = "Personalize Runtime" + } + + client { + go_v1_client_typename = "PersonalizeRuntime" + } + + resource_prefix { + correct = "aws_personalizeruntime_" + } + + provider_package_correct = "personalizeruntime" + doc_prefix = ["personalizeruntime_"] + brand = "Amazon" + not_implemented = true +} + +service "pinpoint" { + + sdk { + id = "Pinpoint" + client_version = [1] + } + + names { + provider_name_upper = "Pinpoint" + human_friendly = "Pinpoint" + } + + client { + go_v1_client_typename = "Pinpoint" + } + + endpoint_info { + endpoint_api_call = "GetApps" + } + + resource_prefix { + correct = "aws_pinpoint_" + } + + provider_package_correct = "pinpoint" + doc_prefix = ["pinpoint_"] + brand = "AWS" +} + +service "pinpointemail" { + + cli_v2_command { + aws_cli_v2_command = "pinpoint-email" + aws_cli_v2_command_no_dashes = "pinpointemail" + } + + sdk { + id = "Pinpoint Email" + client_version = [1] + } + + names { + provider_name_upper = "PinpointEmail" + human_friendly = "Pinpoint Email" + } + + client { + go_v1_client_typename = "PinpointEmail" + } + + resource_prefix { + correct = "aws_pinpointemail_" + } + + provider_package_correct = "pinpointemail" + doc_prefix = ["pinpointemail_"] + brand = "Amazon" + not_implemented = true +} + +service "pinpointsmsvoice" { + + cli_v2_command { + aws_cli_v2_command = "pinpoint-sms-voice" + aws_cli_v2_command_no_dashes = "pinpointsmsvoice" + } + + sdk { + id = "Pinpoint SMS Voice" + client_version = [1] + } + + names { + provider_name_upper = "PinpointSMSVoice" + human_friendly = "Pinpoint SMS and Voice" + } + + client { + go_v1_client_typename = "PinpointSMSVoice" + } + + resource_prefix { + correct = "aws_pinpointsmsvoice_" + } + + provider_package_correct = "pinpointsmsvoice" + doc_prefix = ["pinpointsmsvoice_"] + brand = "Amazon" + not_implemented = true +} + +service "pipes" { + + sdk { + id = "Pipes" + client_version = [2] + } + + names { + provider_name_upper = "Pipes" + human_friendly = "EventBridge Pipes" + } + + endpoint_info { + endpoint_api_call = "ListPipes" + } + + resource_prefix { + correct = "aws_pipes_" + } + + provider_package_correct = "pipes" + doc_prefix = ["pipes_"] + brand = "AWS" +} + +service "polly" { + + sdk { + id = "Polly" + client_version = [2] + } + + names { + provider_name_upper = "Polly" + human_friendly = "Polly" + } + + endpoint_info { + endpoint_api_call = "ListLexicons" + } + + resource_prefix { + correct = "aws_polly_" + } + + provider_package_correct = "polly" + doc_prefix = ["polly_"] + brand = "AWS" +} + +service "pricing" { + + sdk { + id = "Pricing" + client_version = [2] + } + + names { + provider_name_upper = "Pricing" + human_friendly = "Pricing Calculator" + } + + endpoint_info { + endpoint_api_call = "DescribeServices" + } + + resource_prefix { + correct = "aws_pricing_" + } + + provider_package_correct = "pricing" + doc_prefix = ["pricing_"] + brand = "AWS" +} + +service "proton" { + + sdk { + id = "Proton" + client_version = [1] + } + + names { + provider_name_upper = "Proton" + human_friendly = "Proton" + } + + client { + go_v1_client_typename = "Proton" + } + + resource_prefix { + correct = "aws_proton_" + } + + provider_package_correct = "proton" + doc_prefix = ["proton_"] + brand = "AWS" + not_implemented = true +} + +service "qbusiness" { + + sdk { + id = "QBusiness" + client_version = [2] + } + + names { + provider_name_upper = "QBusiness" + human_friendly = "Amazon Q Business" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + correct = "aws_qbusiness_" + } + + provider_package_correct = "qbusiness" + doc_prefix = ["qbusiness_"] + brand = "AWS" +} + +service "qldb" { + + sdk { + id = "QLDB" + client_version = [2] + } + + names { + provider_name_upper = "QLDB" + human_friendly = "QLDB (Quantum Ledger Database)" + } + + endpoint_info { + endpoint_api_call = "ListLedgers" + } + + resource_prefix { + correct = "aws_qldb_" + } + + provider_package_correct = "qldb" + doc_prefix = ["qldb_"] + brand = "AWS" +} + +service "qldbsession" { + + cli_v2_command { + aws_cli_v2_command = "qldb-session" + aws_cli_v2_command_no_dashes = "qldbsession" + } + + sdk { + id = "QLDB Session" + client_version = [1] + } + + names { + provider_name_upper = "QLDBSession" + human_friendly = "QLDB Session" + } + + client { + go_v1_client_typename = "QLDBSession" + } + + resource_prefix { + correct = "aws_qldbsession_" + } + + provider_package_correct = "qldbsession" + doc_prefix = ["qldbsession_"] + brand = "Amazon" + not_implemented = true +} + +service "quicksight" { + + sdk { + id = "QuickSight" + client_version = [1] + } + + names { + provider_name_upper = "QuickSight" + human_friendly = "QuickSight" + } + + client { + go_v1_client_typename = "QuickSight" + } + + endpoint_info { + endpoint_api_call = "ListDashboards" + endpoint_api_params = "AwsAccountId: aws_sdkv1.String(\"123456789012\")" + } + + resource_prefix { + correct = "aws_quicksight_" + } + + provider_package_correct = "quicksight" + doc_prefix = ["quicksight_"] + brand = "AWS" +} + +service "ram" { + + sdk { + id = "RAM" + client_version = [2] + } + + names { + provider_name_upper = "RAM" + human_friendly = "RAM (Resource Access Manager)" + } + + endpoint_info { + endpoint_api_call = "ListPermissions" + } + + resource_prefix { + correct = "aws_ram_" + } + + provider_package_correct = "ram" + doc_prefix = ["ram_"] + brand = "AWS" +} + +service "rds" { + + sdk { + id = "RDS" + client_version = [1, 2] + } + + names { + provider_name_upper = "RDS" + human_friendly = "RDS (Relational Database)" + } + + client { + go_v1_client_typename = "RDS" + } + + endpoint_info { + endpoint_api_call = "DescribeDBInstances" + } + + resource_prefix { + actual = "aws_(db_|rds_)" + correct = "aws_rds_" + } + + provider_package_correct = "rds" + doc_prefix = ["rds_", "db_"] + brand = "AWS" +} + +service "rdsdata" { + + cli_v2_command { + aws_cli_v2_command = "rds-data" + aws_cli_v2_command_no_dashes = "rdsdata" + } + + go_packages { + v1_package = "rdsdataservice" + v2_package = "rdsdata" + } + + sdk { + id = "RDS Data" + client_version = [1] + } + + names { + aliases = ["rdsdataservice"] + provider_name_upper = "RDSData" + human_friendly = "RDS Data" + } + + client { + go_v1_client_typename = "RDSDataService" + } + + resource_prefix { + correct = "aws_rdsdata_" + } + + provider_package_correct = "rdsdata" + doc_prefix = ["rdsdata_"] + brand = "Amazon" + not_implemented = true +} + +service "pi" { + + sdk { + id = "PI" + client_version = [1] + } + + names { + provider_name_upper = "PI" + human_friendly = "RDS Performance Insights (PI)" + } + + client { + go_v1_client_typename = "PI" + } + + resource_prefix { + correct = "aws_pi_" + } + + provider_package_correct = "pi" + doc_prefix = ["pi_"] + brand = "Amazon" + not_implemented = true +} + +service "rbin" { + + go_packages { + v1_package = "recyclebin" + v2_package = "rbin" + } + + sdk { + id = "rbin" + client_version = [2] + } + + names { + aliases = ["recyclebin"] + provider_name_upper = "RBin" + human_friendly = "Recycle Bin (RBin)" + } + + endpoint_info { + endpoint_api_call = "ListRules" + endpoint_api_params = "ResourceType: awstypes.ResourceTypeEc2Image" + } + + resource_prefix { + correct = "aws_rbin_" + } + + provider_package_correct = "rbin" + doc_prefix = ["rbin_"] + brand = "AWS" +} + +service "redshift" { + + sdk { + id = "Redshift" + client_version = [1, 2] + } + + names { + provider_name_upper = "Redshift" + human_friendly = "Redshift" + } + + client { + go_v1_client_typename = "Redshift" + } + + endpoint_info { + endpoint_api_call = "DescribeClusters" + } + + resource_prefix { + correct = "aws_redshift_" + } + + provider_package_correct = "redshift" + doc_prefix = ["redshift_"] + brand = "AWS" +} + +service "redshiftdata" { + + cli_v2_command { + aws_cli_v2_command = "redshift-data" + aws_cli_v2_command_no_dashes = "redshiftdata" + } + + go_packages { + v1_package = "redshiftdataapiservice" + v2_package = "redshiftdata" + } + + sdk { + id = "Redshift Data" + client_version = [2] + } + + names { + aliases = ["redshiftdataapiservice"] + provider_name_upper = "RedshiftData" + human_friendly = "Redshift Data" + } + + endpoint_info { + endpoint_api_call = "ListDatabases" + endpoint_api_params = "Database: aws_sdkv2.String(\"test\")" + } + + resource_prefix { + correct = "aws_redshiftdata_" + } + + provider_package_correct = "redshiftdata" + doc_prefix = ["redshiftdata_"] + brand = "AWS" +} + +service "redshiftserverless" { + + cli_v2_command { + aws_cli_v2_command = "redshift-serverless" + aws_cli_v2_command_no_dashes = "redshiftserverless" + } + + sdk { + id = "Redshift Serverless" + client_version = [1, 2] + } + + names { + provider_name_upper = "RedshiftServerless" + human_friendly = "Redshift Serverless" + } + + client { + go_v1_client_typename = "RedshiftServerless" + } + + endpoint_info { + endpoint_api_call = "ListNamespaces" + } + + resource_prefix { + correct = "aws_redshiftserverless_" + } + + provider_package_correct = "redshiftserverless" + doc_prefix = ["redshiftserverless_"] + brand = "AWS" +} + +service "rekognition" { + + sdk { + id = "Rekognition" + client_version = [2] + } + + names { + provider_name_upper = "Rekognition" + human_friendly = "Rekognition" + } + + endpoint_info { + endpoint_api_call = "ListCollections" + } + + resource_prefix { + correct = "aws_rekognition_" + } + + provider_package_correct = "rekognition" + doc_prefix = ["rekognition_"] + brand = "AWS" +} + +service "resiliencehub" { + + sdk { + id = "resiliencehub" + client_version = [1] + } + + names { + provider_name_upper = "ResilienceHub" + human_friendly = "Resilience Hub" + } + + client { + go_v1_client_typename = "ResilienceHub" + } + + resource_prefix { + correct = "aws_resiliencehub_" + } + + provider_package_correct = "resiliencehub" + doc_prefix = ["resiliencehub_"] + brand = "AWS" + not_implemented = true +} + +service "resourceexplorer2" { + + cli_v2_command { + aws_cli_v2_command = "resource-explorer-2" + aws_cli_v2_command_no_dashes = "resourceexplorer2" + } + + sdk { + id = "Resource Explorer 2" + client_version = [2] + } + + names { + provider_name_upper = "ResourceExplorer2" + human_friendly = "Resource Explorer" + } + + + endpoint_info { + endpoint_api_call = "ListIndexes" + } + + resource_prefix { + correct = "aws_resourceexplorer2_" + } + + provider_package_correct = "resourceexplorer2" + doc_prefix = ["resourceexplorer2_"] + brand = "AWS" +} + +service "resourcegroups" { + + cli_v2_command { + aws_cli_v2_command = "resource-groups" + aws_cli_v2_command_no_dashes = "resourcegroups" + } + + sdk { + id = "Resource Groups" + client_version = [2] + } + + names { + provider_name_upper = "ResourceGroups" + human_friendly = "Resource Groups" + } + + endpoint_info { + endpoint_api_call = "ListGroups" + } + + resource_prefix { + correct = "aws_resourcegroups_" + } + + provider_package_correct = "resourcegroups" + doc_prefix = ["resourcegroups_"] + brand = "AWS" +} + +service "resourcegroupstaggingapi" { + + sdk { + id = "Resource Groups Tagging API" + client_version = [2] + } + + names { + aliases = ["resourcegroupstagging"] + provider_name_upper = "ResourceGroupsTaggingAPI" + human_friendly = "Resource Groups Tagging" + } + + endpoint_info { + endpoint_api_call = "GetResources" + } + + resource_prefix { + correct = "aws_resourcegroupstaggingapi_" + } + + provider_package_correct = "resourcegroupstaggingapi" + doc_prefix = ["resourcegroupstaggingapi_"] + brand = "AWS" +} + +service "robomaker" { + + sdk { + id = "RoboMaker" + client_version = [1] + } + + names { + provider_name_upper = "RoboMaker" + human_friendly = "RoboMaker" + } + + client { + go_v1_client_typename = "RoboMaker" + } + + resource_prefix { + correct = "aws_robomaker_" + } + + provider_package_correct = "robomaker" + doc_prefix = ["robomaker_"] + brand = "AWS" + not_implemented = true +} + +service "rolesanywhere" { + + sdk { + id = "RolesAnywhere" + client_version = [2] + } + + names { + provider_name_upper = "RolesAnywhere" + human_friendly = "Roles Anywhere" + } + + endpoint_info { + endpoint_api_call = "ListProfiles" + } + + resource_prefix { + correct = "aws_rolesanywhere_" + } + + provider_package_correct = "rolesanywhere" + doc_prefix = ["rolesanywhere_"] + brand = "AWS" +} + +service "route53" { + + sdk { + id = "Route 53" + client_version = [2] + } + + names { + provider_name_upper = "Route53" + human_friendly = "Route 53" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListHostedZones" + endpoint_region_override = "us-east-1" + } + + resource_prefix { + actual = "aws_route53_(?!resolver_)" + correct = "aws_route53_" + } + + provider_package_correct = "route53" + doc_prefix = ["route53_cidr_", "route53_delegation_", "route53_health_", "route53_hosted_", "route53_key_", "route53_query_", "route53_record", "route53_traffic_", "route53_vpc_", "route53_zone"] + brand = "AWS" +} + +service "route53domains" { + + sdk { + id = "Route 53 Domains" + client_version = [2] + } + + names { + provider_name_upper = "Route53Domains" + human_friendly = "Route 53 Domains" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListDomains" + endpoint_region_override = "us-east-1" + } + + resource_prefix { + correct = "aws_route53domains_" + } + + provider_package_correct = "route53domains" + doc_prefix = ["route53domains_"] + brand = "AWS" +} + +service "route53profiles" { + + sdk { + id = "Route 53 Profiles" + client_version = [2] + } + + names { + provider_name_upper = "Route53Profiles" + human_friendly = "Route 53 Profiles" + } + + endpoint_info { + endpoint_api_call = "ListProfiles" + } + + resource_prefix { + correct = "aws_route53profiles_" + } + + provider_package_correct = "route53profiles" + doc_prefix = ["route53profiles_"] + brand = "AWS" +} + +service "route53recoverycluster" { + + cli_v2_command { + aws_cli_v2_command = "route53-recovery-cluster" + aws_cli_v2_command_no_dashes = "route53recoverycluster" + } + + sdk { + id = "Route53 Recovery Cluster" + client_version = [1] + } + + names { + provider_name_upper = "Route53RecoveryCluster" + human_friendly = "Route 53 Recovery Cluster" + } + + client { + go_v1_client_typename = "Route53RecoveryCluster" + } + + resource_prefix { + correct = "aws_route53recoverycluster_" + } + + provider_package_correct = "route53recoverycluster" + doc_prefix = ["route53recoverycluster_"] + brand = "Amazon" + not_implemented = true +} + +service "route53recoverycontrolconfig" { + + cli_v2_command { + aws_cli_v2_command = "route53-recovery-control-config" + aws_cli_v2_command_no_dashes = "route53recoverycontrolconfig" + } + + sdk { + id = "Route53 Recovery Control Config" + client_version = [1] + } + + names { + provider_name_upper = "Route53RecoveryControlConfig" + human_friendly = "Route 53 Recovery Control Config" + } + + client { + go_v1_client_typename = "Route53RecoveryControlConfig" + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListClusters" + } + + resource_prefix { + correct = "aws_route53recoverycontrolconfig_" + } + + provider_package_correct = "route53recoverycontrolconfig" + doc_prefix = ["route53recoverycontrolconfig_"] + brand = "AWS" +} + +service "route53recoveryreadiness" { + + cli_v2_command { + aws_cli_v2_command = "route53-recovery-readiness" + aws_cli_v2_command_no_dashes = "route53recoveryreadiness" + } + + sdk { + id = "Route53 Recovery Readiness" + client_version = [1] + } + + names { + provider_name_upper = "Route53RecoveryReadiness" + human_friendly = "Route 53 Recovery Readiness" + } + + client { + go_v1_client_typename = "Route53RecoveryReadiness" + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListCells" + } + + resource_prefix { + correct = "aws_route53recoveryreadiness_" + } + + provider_package_correct = "route53recoveryreadiness" + doc_prefix = ["route53recoveryreadiness_"] + brand = "AWS" +} + +service "route53resolver" { + + sdk { + id = "Route53Resolver" + client_version = [1] + } + + names { + provider_name_upper = "Route53Resolver" + human_friendly = "Route 53 Resolver" + } + + client { + go_v1_client_typename = "Route53Resolver" + } + + endpoint_info { + endpoint_api_call = "ListFirewallDomainLists" + } + + resource_prefix { + actual = "aws_route53_resolver_" + correct = "aws_route53resolver_" + } + + provider_package_correct = "route53resolver" + doc_prefix = ["route53_resolver_"] + brand = "AWS" +} + +service "s3" { + + cli_v2_command { + aws_cli_v2_command = "s3api" + aws_cli_v2_command_no_dashes = "s3api" + } + + sdk { + id = "S3" + client_version = [2] + } + + names { + aliases = ["s3api"] + provider_name_upper = "S3" + human_friendly = "S3 (Simple Storage)" + } + + client { + skip_client_generate = true + } + + env_var { + deprecated_env_var = "AWS_S3_ENDPOINT" + tf_aws_env_var = "TF_AWS_S3_ENDPOINT" + } + endpoint_info { + endpoint_api_call = "ListBuckets" + } + + resource_prefix { + actual = "aws_(canonical_user_id|s3_bucket|s3_object|s3_directory_bucket)" + correct = "aws_s3_" + } + + provider_package_correct = "s3" + doc_prefix = ["s3_bucket", "s3_directory_bucket", "s3_object", "canonical_user_id"] + brand = "AWS" +} + +service "s3control" { + + sdk { + id = "S3 Control" + client_version = [2] + } + + names { + provider_name_upper = "S3Control" + human_friendly = "S3 Control" + } + + endpoint_info { + endpoint_api_call = "ListJobs" + } + + resource_prefix { + actual = "aws_(s3_account_|s3control_|s3_access_)" + correct = "aws_s3control_" + } + + provider_package_correct = "s3control" + doc_prefix = ["s3control", "s3_account_", "s3_access_"] + brand = "AWS" +} + +service "glacier" { + + sdk { + id = "Glacier" + client_version = [2] + } + + names { + provider_name_upper = "Glacier" + human_friendly = "S3 Glacier" + } + + endpoint_info { + endpoint_api_call = "ListVaults" + } + + resource_prefix { + correct = "aws_glacier_" + } + + provider_package_correct = "glacier" + doc_prefix = ["glacier_"] + brand = "AWS" +} + +service "s3outposts" { + + sdk { + id = "S3Outposts" + client_version = [1] + } + + names { + provider_name_upper = "S3Outposts" + human_friendly = "S3 on Outposts" + } + + client { + go_v1_client_typename = "S3Outposts" + } + + endpoint_info { + endpoint_api_call = "ListEndpoints" + } + + resource_prefix { + correct = "aws_s3outposts_" + } + + provider_package_correct = "s3outposts" + doc_prefix = ["s3outposts_"] + brand = "AWS" +} + +service "sagemaker" { + + sdk { + id = "SageMaker" + client_version = [1] + } + + names { + provider_name_upper = "SageMaker" + human_friendly = "SageMaker" + } + + client { + go_v1_client_typename = "SageMaker" + } + + endpoint_info { + endpoint_api_call = "ListClusters" + } + + resource_prefix { + correct = "aws_sagemaker_" + } + + provider_package_correct = "sagemaker" + doc_prefix = ["sagemaker_"] + brand = "AWS" +} + +service "sagemakera2iruntime" { + + cli_v2_command { + aws_cli_v2_command = "sagemaker-a2i-runtime" + aws_cli_v2_command_no_dashes = "sagemakera2iruntime" + } + + go_packages { + v1_package = "augmentedairuntime" + v2_package = "sagemakera2iruntime" + } + + sdk { + id = "SageMaker A2I Runtime" + client_version = [1] + } + + names { + aliases = ["augmentedairuntime"] + provider_name_upper = "SageMakerA2IRuntime" + human_friendly = "SageMaker A2I (Augmented AI)" + } + + client { + go_v1_client_typename = "AugmentedAIRuntime" + } + + resource_prefix { + correct = "aws_sagemakera2iruntime_" + } + + provider_package_correct = "sagemakera2iruntime" + doc_prefix = ["sagemakera2iruntime_"] + brand = "Amazon" + not_implemented = true +} + +service "sagemakeredge" { + + cli_v2_command { + aws_cli_v2_command = "sagemaker-edge" + aws_cli_v2_command_no_dashes = "sagemakeredge" + } + + go_packages { + v1_package = "sagemakeredgemanager" + v2_package = "sagemakeredge" + } + + sdk { + id = "Sagemaker Edge" + client_version = [1] + } + + names { + aliases = ["sagemakeredgemanager"] + provider_name_upper = "SageMakerEdge" + human_friendly = "SageMaker Edge Manager" + } + + client { + go_v1_client_typename = "SagemakerEdgeManager" + } + + resource_prefix { + correct = "aws_sagemakeredge_" + } + + provider_package_correct = "sagemakeredge" + doc_prefix = ["sagemakeredge_"] + brand = "Amazon" + not_implemented = true +} + +service "sagemakerfeaturestoreruntime" { + + cli_v2_command { + aws_cli_v2_command = "sagemaker-featurestore-runtime" + aws_cli_v2_command_no_dashes = "sagemakerfeaturestoreruntime" + } + + sdk { + id = "SageMaker FeatureStore Runtime" + client_version = [1] + } + + names { + provider_name_upper = "SageMakerFeatureStoreRuntime" + human_friendly = "SageMaker Feature Store Runtime" + } + + client { + go_v1_client_typename = "SageMakerFeatureStoreRuntime" + } + + resource_prefix { + correct = "aws_sagemakerfeaturestoreruntime_" + } + + provider_package_correct = "sagemakerfeaturestoreruntime" + doc_prefix = ["sagemakerfeaturestoreruntime_"] + brand = "Amazon" + not_implemented = true +} + +service "sagemakerruntime" { + + cli_v2_command { + aws_cli_v2_command = "sagemaker-runtime" + aws_cli_v2_command_no_dashes = "sagemakerruntime" + } + + sdk { + id = "SageMaker Runtime" + client_version = [1] + } + + names { + provider_name_upper = "SageMakerRuntime" + human_friendly = "SageMaker Runtime" + } + + client { + go_v1_client_typename = "SageMakerRuntime" + } + + resource_prefix { + correct = "aws_sagemakerruntime_" + } + + provider_package_correct = "sagemakerruntime" + doc_prefix = ["sagemakerruntime_"] + brand = "Amazon" + not_implemented = true +} + +service "savingsplans" { + + sdk { + id = "savingsplans" + client_version = [1] + } + + names { + provider_name_upper = "SavingsPlans" + human_friendly = "Savings Plans" + } + + client { + go_v1_client_typename = "SavingsPlans" + } + + resource_prefix { + correct = "aws_savingsplans_" + } + + provider_package_correct = "savingsplans" + doc_prefix = ["savingsplans_"] + brand = "AWS" + not_implemented = true +} + +service "simpledb" { + + cli_v2_command { + aws_cli_v2_command = "sdb" + aws_cli_v2_command_no_dashes = "sdb" + } + + go_packages { + v1_package = "simpledb" + v2_package = "" + } + + sdk { + id = "SimpleDB" + client_version = [1] + } + + names { + aliases = ["sdb"] + provider_name_upper = "SimpleDB" + human_friendly = "SDB (SimpleDB)" + } + + client { + go_v1_client_typename = "SimpleDB" + } + + endpoint_info { + endpoint_api_call = "ListDomains" + } + + resource_prefix { + actual = "aws_simpledb_" + correct = "aws_sdb_" + } + + provider_package_correct = "sdb" + doc_prefix = ["simpledb_"] + brand = "AWS" +} + +service "scheduler" { + + sdk { + id = "Scheduler" + client_version = [2] + } + + names { + provider_name_upper = "Scheduler" + human_friendly = "EventBridge Scheduler" + } + + endpoint_info { + endpoint_api_call = "ListSchedules" + } + + resource_prefix { + correct = "aws_scheduler_" + } + + provider_package_correct = "scheduler" + doc_prefix = ["scheduler_"] + brand = "AWS" +} + +service "secretsmanager" { + + sdk { + id = "Secrets Manager" + client_version = [2] + } + + names { + provider_name_upper = "SecretsManager" + human_friendly = "Secrets Manager" + } + + endpoint_info { + endpoint_api_call = "ListSecrets" + } + + resource_prefix { + correct = "aws_secretsmanager_" + } + + provider_package_correct = "secretsmanager" + doc_prefix = ["secretsmanager_"] + brand = "AWS" +} + +service "securityhub" { + + sdk { + id = "SecurityHub" + client_version = [2] + } + + names { + provider_name_upper = "SecurityHub" + human_friendly = "Security Hub" + } + + endpoint_info { + endpoint_api_call = "ListAutomationRules" + } + + resource_prefix { + correct = "aws_securityhub_" + } + + provider_package_correct = "securityhub" + doc_prefix = ["securityhub_"] + brand = "AWS" +} + +service "securitylake" { + + sdk { + id = "SecurityLake" + client_version = [2] + } + + names { + provider_name_upper = "SecurityLake" + human_friendly = "Security Lake" + } + + endpoint_info { + endpoint_api_call = "ListDataLakes" + } + + resource_prefix { + correct = "aws_securitylake_" + } + + provider_package_correct = "securitylake" + doc_prefix = ["securitylake_"] + brand = "AWS" +} + +service "serverlessrepo" { + + go_packages { + v1_package = "serverlessapplicationrepository" + v2_package = "serverlessapplicationrepository" + } + + sdk { + id = "ServerlessApplicationRepository" + client_version = [2] + } + + names { + aliases = ["serverlessapprepo", "serverlessapplicationrepository"] + provider_name_upper = "ServerlessRepo" + human_friendly = "Serverless Application Repository" + } + + client { + go_v1_client_typename = "ServerlessApplicationRepository" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + actual = "aws_serverlessapplicationrepository_" + correct = "aws_serverlessrepo_" + } + + provider_package_correct = "serverlessrepo" + doc_prefix = ["serverlessapplicationrepository_"] + brand = "AWS" +} + +service "servicecatalog" { + + sdk { + id = "Service Catalog" + client_version = [1] + } + + names { + provider_name_upper = "ServiceCatalog" + human_friendly = "Service Catalog" + } + + client { + go_v1_client_typename = "ServiceCatalog" + } + + endpoint_info { + endpoint_api_call = "ListPortfolios" + } + + resource_prefix { + correct = "aws_servicecatalog_" + } + + provider_package_correct = "servicecatalog" + doc_prefix = ["servicecatalog_"] + brand = "AWS" +} + +service "servicecatalogappregistry" { + + cli_v2_command { + aws_cli_v2_command = "servicecatalog-appregistry" + aws_cli_v2_command_no_dashes = "servicecatalogappregistry" + } + + go_packages { + v1_package = "appregistry" + v2_package = "servicecatalogappregistry" + } + + sdk { + id = "Service Catalog AppRegistry" + client_version = [2] + } + + names { + aliases = ["appregistry"] + provider_name_upper = "ServiceCatalogAppRegistry" + human_friendly = "Service Catalog AppRegistry" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + correct = "aws_servicecatalogappregistry_" + } + + provider_package_correct = "servicecatalogappregistry" + doc_prefix = ["servicecatalogappregistry_"] + brand = "AWS" +} + +service "servicequotas" { + + cli_v2_command { + aws_cli_v2_command = "service-quotas" + aws_cli_v2_command_no_dashes = "servicequotas" + } + + sdk { + id = "Service Quotas" + client_version = [2] + } + + names { + provider_name_upper = "ServiceQuotas" + human_friendly = "Service Quotas" + } + + endpoint_info { + endpoint_api_call = "ListServices" + } + + resource_prefix { + correct = "aws_servicequotas_" + } + + provider_package_correct = "servicequotas" + doc_prefix = ["servicequotas_"] + brand = "" +} +service "ses" { + + sdk { + id = "SES" + client_version = [1] + } + + names { + provider_name_upper = "SES" + human_friendly = "SES (Simple Email)" + } + + client { + go_v1_client_typename = "SES" + } + + endpoint_info { + endpoint_api_call = "ListIdentities" + } + + resource_prefix { + correct = "aws_ses_" + } + + provider_package_correct = "ses" + doc_prefix = ["ses_"] + brand = "AWS" +} + +service "sesv2" { + + sdk { + id = "SESv2" + client_version = [2] + } + + names { + provider_name_upper = "SESV2" + human_friendly = "SESv2 (Simple Email V2)" + } + + endpoint_info { + endpoint_api_call = "ListContactLists" + } + + resource_prefix { + correct = "aws_sesv2_" + } + + provider_package_correct = "sesv2" + doc_prefix = ["sesv2_"] + brand = "AWS" +} + +service "sfn" { + + cli_v2_command { + aws_cli_v2_command = "stepfunctions" + aws_cli_v2_command_no_dashes = "stepfunctions" + } + + sdk { + id = "SFN" + client_version = [2] + } + + names { + aliases = ["stepfunctions"] + provider_name_upper = "SFN" + human_friendly = "SFN (Step Functions)" + } + + client { + go_v1_client_typename = "SFN" + } + + endpoint_info { + endpoint_api_call = "ListActivities" + } + + resource_prefix { + correct = "aws_sfn_" + } + + provider_package_correct = "sfn" + doc_prefix = ["sfn_"] + brand = "AWS" +} + +service "shield" { + + sdk { + id = "Shield" + client_version = [2] + } + + names { + provider_name_upper = "Shield" + human_friendly = "Shield" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListProtectionGroups" + endpoint_region_override = "us-east-1" + } + + resource_prefix { + correct = "aws_shield_" + } + + provider_package_correct = "shield" + doc_prefix = ["shield_"] + brand = "AWS" +} + +service "signer" { + + sdk { + id = "signer" + client_version = [2] + } + + names { + provider_name_upper = "Signer" + human_friendly = "Signer" + } + + endpoint_info { + endpoint_api_call = "ListSigningJobs" + } + + resource_prefix { + correct = "aws_signer_" + } + + provider_package_correct = "signer" + doc_prefix = ["signer_"] + brand = "AWS" +} + +service "sms" { + + sdk { + id = "SMS" + client_version = [1] + } + + names { + provider_name_upper = "SMS" + human_friendly = "SMS (Server Migration)" + } + + client { + go_v1_client_typename = "SMS" + } + + resource_prefix { + correct = "aws_sms_" + } + + provider_package_correct = "sms" + doc_prefix = ["sms_"] + brand = "AWS" + not_implemented = true +} + +service "snowdevicemanagement" { + + cli_v2_command { + aws_cli_v2_command = "snow-device-management" + aws_cli_v2_command_no_dashes = "snowdevicemanagement" + } + + sdk { + id = "Snow Device Management" + client_version = [1] + } + + names { + provider_name_upper = "SnowDeviceManagement" + human_friendly = "Snow Device Management" + } + + client { + go_v1_client_typename = "SnowDeviceManagement" + } + + resource_prefix { + correct = "aws_snowdevicemanagement_" + } + + provider_package_correct = "snowdevicemanagement" + doc_prefix = ["snowdevicemanagement_"] + brand = "AWS" + not_implemented = true +} + +service "snowball" { + + sdk { + id = "Snowball" + client_version = [1] + } + + names { + provider_name_upper = "Snowball" + human_friendly = "Snow Family" + } + + client { + go_v1_client_typename = "Snowball" + } + + resource_prefix { + correct = "aws_snowball_" + } + + provider_package_correct = "snowball" + doc_prefix = ["snowball_"] + brand = "AWS" + not_implemented = true +} + +service "sns" { + + sdk { + id = "SNS" + client_version = [2] + } + + names { + provider_name_upper = "SNS" + human_friendly = "SNS (Simple Notification)" + } + + endpoint_info { + endpoint_api_call = "ListSubscriptions" + } + + resource_prefix { + correct = "aws_sns_" + } + + provider_package_correct = "sns" + doc_prefix = ["sns_"] + brand = "AWS" +} + +service "sqs" { + + sdk { + id = "SQS" + client_version = [2] + } + + names { + provider_name_upper = "SQS" + human_friendly = "SQS (Simple Queue)" + } + + endpoint_info { + endpoint_api_call = "ListQueues" + } + + resource_prefix { + correct = "aws_sqs_" + } + + provider_package_correct = "sqs" + doc_prefix = ["sqs_"] + brand = "AWS" +} + +service "ssm" { + + sdk { + id = "SSM" + client_version = [2] + } + + names { + provider_name_upper = "SSM" + human_friendly = "SSM (Systems Manager)" + } + + endpoint_info { + endpoint_api_call = "ListDocuments" + } + + resource_prefix { + correct = "aws_ssm_" + } + + provider_package_correct = "ssm" + doc_prefix = ["ssm_"] + brand = "AWS" +} + +service "ssmcontacts" { + + cli_v2_command { + aws_cli_v2_command = "ssm-contacts" + aws_cli_v2_command_no_dashes = "ssmcontacts" + } + + sdk { + id = "SSM Contacts" + client_version = [2] + } + + names { + provider_name_upper = "SSMContacts" + human_friendly = "SSM Contacts" + } + + endpoint_info { + endpoint_api_call = "ListContacts" + } + + resource_prefix { + correct = "aws_ssmcontacts_" + } + + provider_package_correct = "ssmcontacts" + doc_prefix = ["ssmcontacts_"] + brand = "AWS" +} + +service "ssmincidents" { + + cli_v2_command { + aws_cli_v2_command = "ssm-incidents" + aws_cli_v2_command_no_dashes = "ssmincidents" + } + + sdk { + id = "SSM Incidents" + client_version = [2] + } + + names { + provider_name_upper = "SSMIncidents" + human_friendly = "SSM Incident Manager Incidents" + } + + endpoint_info { + endpoint_api_call = "ListResponsePlans" + } + + resource_prefix { + correct = "aws_ssmincidents_" + } + + provider_package_correct = "ssmincidents" + doc_prefix = ["ssmincidents_"] + brand = "AWS" +} + +service "ssmsap" { + + cli_v2_command { + aws_cli_v2_command = "ssm-sap" + aws_cli_v2_command_no_dashes = "ssmsap" + } + + sdk { + id = "Ssm Sap" + client_version = [2] + } + + names { + provider_name_upper = "SSMSAP" + human_friendly = "Systems Manager for SAP" + } + + endpoint_info { + endpoint_api_call = "ListApplications" + } + + resource_prefix { + correct = "aws_ssmsap_" + } + + provider_package_correct = "ssmsap" + doc_prefix = ["ssmsap_"] + brand = "AWS" +} + +service "sso" { + + sdk { + id = "SSO" + client_version = [2] + } + + names { + provider_name_upper = "SSO" + human_friendly = "SSO (Single Sign-On)" + } + + endpoint_info { + endpoint_api_call = "ListAccounts" + endpoint_api_params = "AccessToken: aws_sdkv2.String(\"mock-access-token\")" + endpoint_only = true + } + + resource_prefix { + correct = "aws_sso_" + } + + provider_package_correct = "sso" + doc_prefix = ["sso_"] + brand = "AWS" + not_implemented = true +} + +service "ssoadmin" { + + cli_v2_command { + aws_cli_v2_command = "sso-admin" + aws_cli_v2_command_no_dashes = "ssoadmin" + } + + sdk { + id = "SSO Admin" + client_version = [2] + } + + names { + provider_name_upper = "SSOAdmin" + human_friendly = "SSO Admin" + } + + client { + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "ListInstances" + } + + resource_prefix { + correct = "aws_ssoadmin_" + } + + provider_package_correct = "ssoadmin" + doc_prefix = ["ssoadmin_"] + brand = "AWS" +} + +service "identitystore" { + + sdk { + id = "identitystore" + client_version = [2] + } + + names { + provider_name_upper = "IdentityStore" + human_friendly = "SSO Identity Store" + } + + endpoint_info { + endpoint_api_call = "ListUsers" + endpoint_api_params = "IdentityStoreId: aws_sdkv2.String(\"d-1234567890\")" + } + + resource_prefix { + correct = "aws_identitystore_" + } + + provider_package_correct = "identitystore" + doc_prefix = ["identitystore_"] + brand = "AWS" +} + +service "ssooidc" { + + cli_v2_command { + aws_cli_v2_command = "sso-oidc" + aws_cli_v2_command_no_dashes = "ssooidc" + } + + sdk { + id = "SSO OIDC" + client_version = [1] + } + + names { + provider_name_upper = "SSOOIDC" + human_friendly = "SSO OIDC" + } + + client { + go_v1_client_typename = "SSOOIDC" + } + + resource_prefix { + correct = "aws_ssooidc_" + } + + provider_package_correct = "ssooidc" + doc_prefix = ["ssooidc_"] + brand = "AWS" + not_implemented = true +} + +service "storagegateway" { + + sdk { + id = "Storage Gateway" + client_version = [1] + } + + names { + provider_name_upper = "StorageGateway" + human_friendly = "Storage Gateway" + } + + client { + go_v1_client_typename = "StorageGateway" + } + + endpoint_info { + endpoint_api_call = "ListGateways" + } + + resource_prefix { + correct = "aws_storagegateway_" + } + + provider_package_correct = "storagegateway" + doc_prefix = ["storagegateway_"] + brand = "AWS" +} + +service "sts" { + + sdk { + id = "STS" + client_version = [2] + } + + names { + provider_name_upper = "STS" + human_friendly = "STS (Security Token)" + } + + client { + skip_client_generate = true + } + + env_var { + deprecated_env_var = "AWS_STS_ENDPOINT" + tf_aws_env_var = "TF_AWS_STS_ENDPOINT" + } + + endpoint_info { + endpoint_api_call = "GetCallerIdentity" + } + + resource_prefix { + actual = "aws_caller_identity" + correct = "aws_sts_" + } + + provider_package_correct = "sts" + doc_prefix = ["caller_identity"] + brand = "AWS" +} + +service "support" { + + sdk { + id = "Support" + client_version = [1] + } + + names { + provider_name_upper = "Support" + human_friendly = "Support" + } + + client { + go_v1_client_typename = "Support" + } + + resource_prefix { + correct = "aws_support_" + } + + provider_package_correct = "support" + doc_prefix = ["support_"] + brand = "AWS" + not_implemented = true +} + +service "swf" { + + sdk { + id = "SWF" + client_version = [2] + } + + names { + provider_name_upper = "SWF" + human_friendly = "SWF (Simple Workflow)" + } + + endpoint_info { + endpoint_api_call = "ListDomains" + endpoint_api_params = "RegistrationStatus: \"REGISTERED\"" + } + + resource_prefix { + correct = "aws_swf_" + } + + provider_package_correct = "swf" + doc_prefix = ["swf_"] + brand = "AWS" +} + +service "textract" { + + sdk { + id = "Textract" + client_version = [1] + } + + names { + provider_name_upper = "Textract" + human_friendly = "Textract" + } + + client { + go_v1_client_typename = "Textract" + } + + resource_prefix { + correct = "aws_textract_" + } + + provider_package_correct = "textract" + doc_prefix = ["textract_"] + brand = "Amazon" + not_implemented = true +} + +service "timestreaminfluxdb" { + + cli_v2_command { + aws_cli_v2_command = "timestream-influxdb" + aws_cli_v2_command_no_dashes = "timestreaminfluxdb" + } + + sdk { + id = "Timestream InfluxDB" + client_version = [2] + } + + names { + provider_name_upper = "TimestreamInfluxDB" + human_friendly = "Timestream for InfluxDB" + } + + endpoint_info { + endpoint_api_call = "ListDbInstances" + } + + resource_prefix { + correct = "aws_timestreaminfluxdb_" + } + + provider_package_correct = "timestreaminfluxdb" + doc_prefix = ["timestreaminfluxdb_"] + brand = "AWS" +} + +service "timestreamquery" { + + cli_v2_command { + aws_cli_v2_command = "timestream-query" + aws_cli_v2_command_no_dashes = "timestreamquery" + } + + sdk { + id = "Timestream Query" + client_version = [1] + } + + names { + provider_name_upper = "TimestreamQuery" + human_friendly = "Timestream Query" + } + + client { + go_v1_client_typename = "TimestreamQuery" + } + + resource_prefix { + correct = "aws_timestreamquery_" + } + + provider_package_correct = "timestreamquery" + doc_prefix = ["timestreamquery_"] + brand = "Amazon" + not_implemented = true +} + +service "timestreamwrite" { + + cli_v2_command { + aws_cli_v2_command = "timestream-write" + aws_cli_v2_command_no_dashes = "timestreamwrite" + } + + sdk { + id = "Timestream Write" + client_version = [2] + } + + names { + provider_name_upper = "TimestreamWrite" + human_friendly = "Timestream Write" + } + + endpoint_info { + endpoint_api_call = "ListDatabases" + } + + resource_prefix { + correct = "aws_timestreamwrite_" + } + + provider_package_correct = "timestreamwrite" + doc_prefix = ["timestreamwrite_"] + brand = "AWS" +} + +service "transcribe" { + + go_packages { + v1_package = "transcribeservice" + v2_package = "transcribe" + } + + sdk { + id = "Transcribe" + client_version = [2] + } + + names { + aliases = ["transcribeservice"] + provider_name_upper = "Transcribe" + human_friendly = "Transcribe" + } + + endpoint_info { + endpoint_api_call = "ListLanguageModels" + } + + resource_prefix { + correct = "aws_transcribe_" + } + + provider_package_correct = "transcribe" + doc_prefix = ["transcribe_"] + brand = "Amazon" +} + +service "transcribestreaming" { + + go_packages { + v1_package = "transcribestreamingservice" + v2_package = "transcribestreaming" + } + + sdk { + id = "Transcribe Streaming" + client_version = [1] + } + + names { + aliases = ["transcribestreamingservice"] + provider_name_upper = "TranscribeStreaming" + human_friendly = "Transcribe Streaming" + } + + client { + go_v1_client_typename = "TranscribeStreamingService" + } + + resource_prefix { + correct = "aws_transcribestreaming_" + } + + provider_package_correct = "transcribestreaming" + doc_prefix = ["transcribestreaming_"] + brand = "Amazon" + not_implemented = true +} + +service "transfer" { + + sdk { + id = "Transfer" + client_version = [2] + } + + names { + provider_name_upper = "Transfer" + human_friendly = "Transfer Family" + } + + + endpoint_info { + endpoint_api_call = "ListConnectors" + } + + resource_prefix { + correct = "aws_transfer_" + } + + provider_package_correct = "transfer" + doc_prefix = ["transfer_"] + brand = "AWS" +} + +service "translate" { + + sdk { + id = "Translate" + client_version = [1] + } + + names { + provider_name_upper = "Translate" + human_friendly = "Translate" + } + + client { + go_v1_client_typename = "Translate" + } + + resource_prefix { + correct = "aws_translate_" + } + + provider_package_correct = "translate" + doc_prefix = ["translate_"] + brand = "Amazon" + not_implemented = true +} + +service "vpclattice" { + + cli_v2_command { + aws_cli_v2_command = "vpc-lattice" + aws_cli_v2_command_no_dashes = "vpclattice" + } + + sdk { + id = "VPC Lattice" + client_version = [2] + } + + names { + provider_name_upper = "VPCLattice" + human_friendly = "VPC Lattice" + } + + endpoint_info { + endpoint_api_call = "ListServices" + } + + resource_prefix { + correct = "aws_vpclattice_" + } + + provider_package_correct = "vpclattice" + doc_prefix = ["vpclattice_"] + brand = "AWS" +} + +service "wafv2" { + + sdk { + id = "WAFV2" + client_version = [2] + } + + names { + provider_name_upper = "WAFV2" + human_friendly = "WAF" + } + + endpoint_info { + endpoint_api_call = "ListRuleGroups" + endpoint_api_params = "Scope: awstypes.ScopeRegional" + } + + resource_prefix { + correct = "aws_wafv2_" + } + + provider_package_correct = "wafv2" + doc_prefix = ["wafv2_"] + brand = "AWS" +} + +service "waf" { + + sdk { + id = "WAF" + client_version = [2] + } + + names { + provider_name_upper = "WAF" + human_friendly = "WAF Classic" + } + + endpoint_info { + endpoint_api_call = "ListRules" + } + + resource_prefix { + correct = "aws_waf_" + } + + provider_package_correct = "waf" + doc_prefix = ["waf_"] + brand = "AWS" +} + +service "wafregional" { + + cli_v2_command { + aws_cli_v2_command = "waf-regional" + aws_cli_v2_command_no_dashes = "wafregional" + } + + sdk { + id = "WAF Regional" + client_version = [2] + } + + names { + provider_name_upper = "WAFRegional" + human_friendly = "WAF Classic Regional" + } + + endpoint_info { + endpoint_api_call = "ListRules" + } + + resource_prefix { + correct = "aws_wafregional_" + } + + provider_package_correct = "wafregional" + doc_prefix = ["wafregional_"] + brand = "AWS" +} + +service "budgets" { + + sdk { + id = "Budgets" + client_version = [2] + } + + names { + provider_name_upper = "Budgets" + human_friendly = "Web Services Budgets" + } + + endpoint_info { + endpoint_api_call = "DescribeBudgets" + endpoint_api_params = "AccountId: aws_sdkv2.String(\"012345678901\")" + } + + resource_prefix { + correct = "aws_budgets_" + } + + provider_package_correct = "budgets" + doc_prefix = ["budgets_"] + brand = "AWS" +} + +service "wellarchitected" { + + sdk { + id = "WellArchitected" + client_version = [2] + } + + names { + provider_name_upper = "WellArchitected" + human_friendly = "Well-Architected Tool" + } + + endpoint_info { + endpoint_api_call = "ListProfiles" + } + + resource_prefix { + correct = "aws_wellarchitected_" + } + + provider_package_correct = "wellarchitected" + doc_prefix = ["wellarchitected_"] + brand = "AWS" +} + +service "workdocs" { + + sdk { + id = "WorkDocs" + client_version = [1] + } + + names { + provider_name_upper = "WorkDocs" + human_friendly = "WorkDocs" + } + + client { + go_v1_client_typename = "WorkDocs" + } + + resource_prefix { + correct = "aws_workdocs_" + } + + provider_package_correct = "workdocs" + doc_prefix = ["workdocs_"] + brand = "Amazon" + not_implemented = true +} + +service "worklink" { + + sdk { + id = "WorkLink" + client_version = [1] + } + + names { + provider_name_upper = "WorkLink" + human_friendly = "WorkLink" + } + + client { + go_v1_client_typename = "WorkLink" + } + + endpoint_info { + endpoint_api_call = "ListFleets" + } + + resource_prefix { + correct = "aws_worklink_" + } + + provider_package_correct = "worklink" + doc_prefix = ["worklink_"] + brand = "AWS" +} + +service "workmail" { + + sdk { + id = "WorkMail" + client_version = [1] + } + + names { + provider_name_upper = "WorkMail" + human_friendly = "WorkMail" + } + + client { + go_v1_client_typename = "WorkMail" + } + + resource_prefix { + correct = "aws_workmail_" + } + + provider_package_correct = "workmail" + doc_prefix = ["workmail_"] + brand = "Amazon" + not_implemented = true +} + +service "workmailmessageflow" { + + sdk { + id = "WorkMailMessageFlow" + client_version = [1] + } + + names { + provider_name_upper = "WorkMailMessageFlow" + human_friendly = "WorkMail Message Flow" + } + + client { + go_v1_client_typename = "WorkMailMessageFlow" + } + + resource_prefix { + correct = "aws_workmailmessageflow_" + } + + provider_package_correct = "workmailmessageflow" + doc_prefix = ["workmailmessageflow_"] + brand = "Amazon" + not_implemented = true +} + +service "workspaces" { + + sdk { + id = "WorkSpaces" + client_version = [2] + } + + names { + provider_name_upper = "WorkSpaces" + human_friendly = "WorkSpaces" + } + + endpoint_info { + endpoint_api_call = "DescribeWorkspaces" + } + + resource_prefix { + correct = "aws_workspaces_" + } + + provider_package_correct = "workspaces" + doc_prefix = ["workspaces_"] + brand = "AWS" +} + +service "workspacesweb" { + + cli_v2_command { + aws_cli_v2_command = "workspaces-web" + aws_cli_v2_command_no_dashes = "workspacesweb" + } + + sdk { + id = "WorkSpaces Web" + client_version = [2] + } + + names { + provider_name_upper = "WorkSpacesWeb" + human_friendly = "WorkSpaces Web" + } + + endpoint_info { + endpoint_api_call = "ListPortals" + } + + resource_prefix { + correct = "aws_workspacesweb_" + } + + provider_package_correct = "workspacesweb" + doc_prefix = ["workspacesweb_"] + brand = "AWS" +} + +service "xray" { + + sdk { + id = "XRay" + client_version = [2] + } + + names { + provider_name_upper = "XRay" + human_friendly = "X-Ray" + } + + endpoint_info { + endpoint_api_call = "ListResourcePolicies" + } + + resource_prefix { + correct = "aws_xray_" + } + + provider_package_correct = "xray" + doc_prefix = ["xray_"] + brand = "AWS" +} + +service "verifiedpermissions" { + + sdk { + id = "VerifiedPermissions" + client_version = [2] + } + + names { + provider_name_upper = "VerifiedPermissions" + human_friendly = "Verified Permissions" + } + + endpoint_info { + endpoint_api_call = "ListPolicyStores" + } + + resource_prefix { + correct = "aws_verifiedpermissions_" + } + + provider_package_correct = "verifiedpermissions" + doc_prefix = ["verifiedpermissions_"] + brand = "AWS" +} + +service "codecatalyst" { + + sdk { + id = "CodeCatalyst" + client_version = [2] + } + + names { + provider_name_upper = "CodeCatalyst" + human_friendly = "CodeCatalyst" + } + + endpoint_info { + endpoint_api_call = "ListAccessTokens" + } + + resource_prefix { + correct = "aws_codecatalyst_" + } + + provider_package_correct = "codecatalyst" + doc_prefix = ["codecatalyst_"] + brand = "AWS" +} + +service "mediapackagev2" { + + sdk { + id = "MediaPackageV2" + client_version = [2] + } + + names { + provider_name_upper = "MediaPackageV2" + human_friendly = "Elemental MediaPackage Version 2" + } + + endpoint_info { + endpoint_api_call = "ListChannelGroups" + } + + resource_prefix { + actual = "aws_media_packagev2_" + correct = "aws_mediapackagev2_" + } + + provider_package_correct = "mediapackagev2" + doc_prefix = ["media_packagev2_"] + brand = "AWS" +} + +service "iot" { + + sdk { + id = "IoT" + client_version = [2] + } + + names { + provider_name_upper = "IoT" + human_friendly = "IoT Core" + } + + client { + go_v1_client_typename = "IoT" + } + + endpoint_info { + endpoint_api_call = "DescribeDefaultAuthorizer" + } + + resource_prefix { + correct = "aws_iot_" + } + + provider_package_correct = "iot" + doc_prefix = ["iot_"] + brand = "AWS" +} + +service "dynamodb" { + + sdk { + id = "DynamoDB" + client_version = [2] + } + + names { + provider_name_upper = "DynamoDB" + human_friendly = "DynamoDB" + } + + client { + skip_client_generate = true + } + + env_var { + deprecated_env_var = "AWS_DYNAMODB_ENDPOINT" + tf_aws_env_var = "TF_AWS_DYNAMODB_ENDPOINT" + } + endpoint_info { + endpoint_api_call = "ListTables" + } + + resource_prefix { + correct = "aws_dynamodb_" + } + + provider_package_correct = "dynamodb" + doc_prefix = ["dynamodb_"] + brand = "AWS" +} + +service "ec2" { + + sdk { + id = "EC2" + client_version = [1, 2] + } + + names { + provider_name_upper = "EC2" + human_friendly = "EC2 (Elastic Compute Cloud)" + } + + client { + go_v1_client_typename = "EC2" + skip_client_generate = true + } + + endpoint_info { + endpoint_api_call = "DescribeVpcs" + } + + resource_prefix { + actual = "aws_(ami|availability_zone|ec2_(availability|capacity|fleet|host|instance|public_ipv4_pool|serial|spot|tag)|eip|instance|key_pair|launch_template|placement_group|spot)" + correct = "aws_ec2_" + } + + sub_service "ec2ebs" { + + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "EC2EBS" + human_friendly = "EBS (EC2)" + } + + resource_prefix { + actual = "aws_(ebs_|volume_attach|snapshot_create)" + correct = "aws_ec2ebs_" + } + + split_package = "ec2" + file_prefix = "ebs_" + doc_prefix = ["ebs_", "volume_attachment", "snapshot_"] + brand = "Amazon" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + sub_service "ec2outposts" { + + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "EC2Outposts" + human_friendly = "Outposts (EC2)" + } + + resource_prefix { + actual = "aws_ec2_(coip_pool|local_gateway)" + correct = "aws_ec2outposts_" + } + + split_package = "ec2" + file_prefix = "outposts_" + doc_prefix = ["ec2_coip_pool", "ec2_local_gateway"] + brand = "AWS" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + sub_service "transitgateway" { + + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "TransitGateway" + human_friendly = "Transit Gateway" + } + + resource_prefix { + actual = "aws_ec2_transit_gateway" + correct = "aws_transitgateway_" + } + + split_package = "ec2" + file_prefix = "transitgateway_" + doc_prefix = ["ec2_transit_gateway"] + brand = "AWS" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + sub_service "verifiedaccess" { + + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "VerifiedAccess" + human_friendly = "Verified Access" + } + + resource_prefix { + actual = "aws_verifiedaccess" + correct = "aws_verifiedaccess_" + } + + split_package = "ec2" + file_prefix = "verifiedaccess_" + doc_prefix = ["verifiedaccess_"] + brand = "AWS" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + sub_service "vpc" { + + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "VPC" + human_friendly = "VPC (Virtual Private Cloud)" + } + + resource_prefix { + actual = "aws_((default_)?(network_acl|route_table|security_group|subnet|vpc(?!_ipam))|ec2_(managed|network|subnet|traffic)|egress_only_internet|flow_log|internet_gateway|main_route_table_association|nat_gateway|network_interface|prefix_list|route\\b)" + correct = "aws_vpc_" + } + + split_package = "ec2" + file_prefix = "vpc_" + doc_prefix = ["default_network_", "default_route_", "default_security_", "default_subnet", "default_vpc", "ec2_managed_", "ec2_network_", "ec2_subnet_", "ec2_traffic_", "egress_only_", "flow_log", "internet_gateway", "main_route_", "nat_", "network_", "prefix_list", "route_", "route\\.", "security_group", "subnet", "vpc_dhcp_", "vpc_endpoint", "vpc_ipv", "vpc_network_performance", "vpc_peering_", "vpc_security_group_", "vpc\\.", "vpcs\\."] + brand = "Amazon" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + sub_service "ipam" { + + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "IPAM" + human_friendly = "VPC IPAM (IP Address Manager)" + } + + resource_prefix { + actual = "aws_vpc_ipam" + correct = "aws_ipam_" + } + split_package = "ec2" + file_prefix = "ipam_" + doc_prefix = ["vpc_ipam"] + brand = "Amazon" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + sub_service "vpnclient" { + + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "ClientVPN" + human_friendly = "VPN (Client)" + } + + resource_prefix { + actual = "aws_ec2_client_vpn" + correct = "aws_vpnclient_" + } + split_package = "ec2" + file_prefix = "vpnclient_" + doc_prefix = ["ec2_client_vpn_"] + brand = "AWS" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + sub_service "vpnsite" { + + cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + + go_packages { + v1_package = "" + v2_package = "" + } + + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "SiteVPN" + human_friendly = "VPN (Site-to-Site)" + } + + resource_prefix { + actual = "aws_(customer_gateway|vpn_)" + correct = "aws_vpnsite_" + } + + split_package = "ec2" + file_prefix = "vpnsite_" + doc_prefix = ["customer_gateway", "vpn_"] + brand = "AWS" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + sub_service "wavelength" { + +cli_v2_command { + aws_cli_v2_command = "" + aws_cli_v2_command_no_dashes = "" + } + +go_packages { + v1_package = "" + v2_package = "" + } + sdk { + id = "" + client_version = null + } + + names { + provider_name_upper = "Wavelength" + human_friendly = "Wavelength" + } + + resource_prefix { + actual = "aws_ec2_carrier_gateway" + correct = "aws_wavelength_" + } + + split_package = "ec2" + file_prefix = "wavelength_" + doc_prefix = ["ec2_carrier_"] + brand = "AWS" + exclude = true + allowed_subcategory = true + note = "Part of EC2" + } + + provider_package_correct = "ec2" + split_package = "ec2" + file_prefix = "ec2_" + doc_prefix = ["ami", "availability_zone", "ec2_availability_", "ec2_capacity_", "ec2_fleet", "ec2_host", "ec2_image_", "ec2_instance_", "ec2_public_ipv4_pool", "ec2_serial_", "ec2_spot_", "ec2_tag", "eip", "instance", "key_pair", "launch_template", "placement_group", "spot_"] + brand = "Amazon" +} diff --git a/names/data/read.go b/names/data/read.go index d8262e810e1..172a2aa2094 100644 --- a/names/data/read.go +++ b/names/data/read.go @@ -4,12 +4,12 @@ package data import ( - "bytes" _ "embed" - "encoding/csv" - "errors" - "io" + "log" "strings" + + "github.com/hashicorp/hcl/v2/gohcl" + "github.com/hashicorp/hcl/v2/hclparse" ) type ServiceRecord []string @@ -22,6 +22,14 @@ func (sr ServiceRecord) AWSCLIV2CommandNoDashes() string { return sr[colAWSCLIV2CommandNoDashes] } +func (sr ServiceRecord) GoPackageName(version int) string { + switch version { + case 1: + return sr.GoV1Package() + } + return sr.GoV2Package() +} + func (sr ServiceRecord) GoV1Package() string { return sr[colGoV1Package] } @@ -61,6 +69,14 @@ func (sr ServiceRecord) ProviderNameUpper() string { return sr[colProviderNameUpper] } +func (sr ServiceRecord) ClientTypeName(version int) (s string) { + switch version { + case 1: + return sr.GoV1ClientTypeName() + } + return "Client" +} + func (sr ServiceRecord) GoV1ClientTypeName() string { return sr[colGoV1ClientTypeName] } @@ -179,32 +195,214 @@ func (sr ServiceRecord) Note() string { return sr[colNote] } -func ReadAllServiceData() (results []ServiceRecord, err error) { - reader := csv.NewReader(bytes.NewReader(namesData)) - // reader.ReuseRecord = true +func parseService(curr Service) ServiceRecord { + record := make(ServiceRecord, colNote+1) + + // provider packages/label + record[colProviderPackageActual] = curr.Label + record[colProviderPackageCorrect] = curr.Label + + // cli_v2_command + if len(curr.ServiceCli) > 0 { + record[colAWSCLIV2Command] = curr.ServiceCli[0].AWSCLIV2Command + record[colAWSCLIV2CommandNoDashes] = curr.ServiceCli[0].AWSCLIV2CommandNoDashes + } else { + record[colAWSCLIV2Command] = curr.Label + record[colAWSCLIV2CommandNoDashes] = curr.Label + } + + // go_packages + if len(curr.ServiceGoPackages) > 0 { + record[colGoV1Package] = curr.ServiceGoPackages[0].V1Package + record[colGoV2Package] = curr.ServiceGoPackages[0].V2Package + } else { + record[colGoV1Package] = curr.Label + record[colGoV2Package] = curr.Label + } + + // sdk + if len(curr.ServiceSDK) > 0 { + record[colSDKID] = curr.ServiceSDK[0].ID + for _, i := range curr.ServiceSDK[0].Version { + if i == 1 { + record[colClientSDKV1] = "1" + } + if i == 2 { + record[colClientSDKV2] = "2" + } + } + } - // Skip the header - _, err = reader.Read() - if err != nil { - return + // names + if len(curr.ServiceNames) > 0 { + record[colAliases] = strings.Join(curr.ServiceNames[0].Aliases, ";") + record[colProviderNameUpper] = curr.ServiceNames[0].ProviderNameUpper + record[colHumanFriendly] = curr.ServiceNames[0].HumanFriendly } - for { - r, err := reader.Read() - if errors.Is(err, io.EOF) { - break + // client + if len(curr.ServiceClient) > 0 { + record[colGoV1ClientTypeName] = curr.ServiceClient[0].GoV1ClientTypeName + if curr.ServiceClient[0].SkipClientGenerate { + record[colSkipClientGenerate] = "x" + } else { + record[colSkipClientGenerate] = "" } - if err != nil { - return nil, err + } + + // env_var + if len(curr.ServiceEnvVars) > 0 { + record[colDeprecatedEnvVar] = curr.ServiceEnvVars[0].DeprecatedEnvVar + record[colTFAWSEnvVar] = curr.ServiceEnvVars[0].TFAWSEnvVar + } + + // endpoint_info + if len(curr.ServiceEndpoints) > 0 { + record[colEndpointAPICall] = curr.ServiceEndpoints[0].EndpointAPICall + record[colEndpointAPIParams] = curr.ServiceEndpoints[0].EndpointAPIParams + record[colEndpointOverrideRegion] = curr.ServiceEndpoints[0].EndpointRegionOverride + if curr.ServiceEndpoints[0].EndpointOnly { + record[colEndpointOnly] = "x" + } else { + record[colEndpointOnly] = "" } - results = append(results, ServiceRecord(r)) + } + + // resource_prefix + if len(curr.ServiceResourcePrefix) > 0 { + record[colResourcePrefixActual] = curr.ServiceResourcePrefix[0].ResourcePrefixActual + record[colResourcePrefixCorrect] = curr.ServiceResourcePrefix[0].ResourcePrefixCorrect + } + + // rest + record[colSplitPackageRealPackage] = curr.ServiceSplitPackage + record[colFilePrefix] = curr.FilePrefix + record[colDocPrefix] = strings.Join(curr.DocPrefix, ";") + record[colBrand] = curr.Brand + if curr.Exclude { + record[colExclude] = "x" + } else { + record[colExclude] = "" + } + if curr.NotImplemented { + record[colNotImplemented] = "x" + } else { + record[colNotImplemented] = "" + } + if curr.AllowedSubcategory { + record[colAllowedSubcategory] = "x" + } else { + record[colAllowedSubcategory] = "" + } + record[colNote] = curr.Note + if len(curr.ServiceProviderPackageCorrect) > 0 { + record[colProviderPackageCorrect] = curr.ServiceProviderPackageCorrect + } + + return record +} + +func ReadAllServiceData() (results []ServiceRecord, err error) { + var decodedServiceList Services + parser := hclparse.NewParser() + toParse, parseErr := parser.ParseHCL(b, "names_data.hcl") + if parseErr.HasErrors() { + log.Fatal("Parser error : ", parseErr) + } + decodeErr := gohcl.DecodeBody(toParse.Body, nil, &decodedServiceList) + if decodeErr.HasErrors() { + log.Fatal("Decode error", decodeErr) + } + for _, curr := range decodedServiceList.ServiceList { + if len(curr.SubService) > 0 { + for _, sub := range curr.SubService { + results = append(results, parseService(sub)) + } + } + results = append(results, parseService(curr)) } return } -//go:embed names_data.csv -var namesData []byte +type CLIV2Command struct { + AWSCLIV2Command string `hcl:"aws_cli_v2_command,optional"` + AWSCLIV2CommandNoDashes string `hcl:"aws_cli_v2_command_no_dashes,optional"` +} + +type GoPackages struct { + V1Package string `hcl:"v1_package,optional"` + V2Package string `hcl:"v2_package,optional"` +} + +type ResourcePrefix struct { + ResourcePrefixActual string `hcl:"actual,optional"` + ResourcePrefixCorrect string `hcl:"correct,optional"` +} + +type SDK struct { + ID string `hcl:"id,optional"` + Version []int `hcl:"client_version,attr"` +} + +type Names struct { + Aliases []string `hcl:"aliases,optional"` + ProviderNameUpper string `hcl:"provider_name_upper,attr"` + HumanFriendly string `hcl:"human_friendly,attr"` +} + +type ProviderPackage struct { + Actual string `hcl:"actual,optional"` + Correct string `hcl:"correct,optional"` +} + +type Client struct { + GoV1ClientTypeName string `hcl:"go_v1_client_typename,optional"` + SkipClientGenerate bool `hcl:"skip_client_generate,optional"` +} + +type EnvVar struct { + DeprecatedEnvVar string `hcl:"deprecated_env_var,optional"` + TFAWSEnvVar string `hcl:"tf_aws_env_var,optional"` +} + +type EndpointInfo struct { + EndpointAPICall string `hcl:"endpoint_api_call,optional"` + EndpointAPIParams string `hcl:"endpoint_api_params,optional"` + EndpointRegionOverride string `hcl:"endpoint_region_override,optional"` + EndpointOnly bool `hcl:"endpoint_only,optional"` +} + +type Service struct { + Label string `hcl:"CLIV2Command,label"` + ServiceCli []CLIV2Command `hcl:"cli_v2_command,block"` + ServiceGoPackages []GoPackages `hcl:"go_packages,block"` + ServiceSDK []SDK `hcl:"sdk,block"` + ServiceNames []Names `hcl:"names,block"` + ServiceClient []Client `hcl:"client,block"` + ServiceEnvVars []EnvVar `hcl:"env_var,block"` + ServiceEndpoints []EndpointInfo `hcl:"endpoint_info,block"` + ServiceResourcePrefix []ResourcePrefix `hcl:"resource_prefix,block"` + + SubService []Service `hcl:"sub_service,block"` + + ServiceProviderPackageCorrect string `hcl:"provider_package_correct,optional"` + ServiceSplitPackage string `hcl:"split_package,optional"` + FilePrefix string `hcl:"file_prefix,optional"` + DocPrefix []string `hcl:"doc_prefix,optional"` + Brand string `hcl:"brand,attr"` + Exclude bool `hcl:"exclude,optional"` + NotImplemented bool `hcl:"not_implemented,optional"` + AllowedSubcategory bool `hcl:"allowed_subcategory,optional"` + Note string `hcl:"note,optional"` +} + +type Services struct { + ServiceList []Service `hcl:"service,block"` +} + +//go:embed names_data.hcl +var b []byte const ( colAWSCLIV2Command = iota diff --git a/names/names.go b/names/names.go index 39923e10b49..a95b328b9e6 100644 --- a/names/names.go +++ b/names/names.go @@ -3,14 +3,14 @@ // Package names provides constants for AWS service names that are used as keys // for the endpoints slice in internal/conns/conns.go. The package also exposes -// access to data found in the data/names_data.csv file, which provides additional +// access to data found in the data/names_data.hcl file, which provides additional // service-related name information. // // Consumers of the names package include the conns package // (internal/conn/conns.go), the provider package // (internal/provider/provider.go), generators, and the skaff tool. // -// It is very important that information in the data/names_data.csv be exactly +// It is very important that information in the data/names_data.hcl be exactly // correct because the Terrform AWS Provider relies on the information to // function correctly. package names @@ -18,6 +18,7 @@ package names import ( "fmt" "log" + "slices" "strings" "github.com/hashicorp/terraform-provider-aws/names/data" @@ -25,88 +26,100 @@ import ( // Endpoint constants defined by the AWS SDK v1 but not defined in the AWS SDK v2. const ( - AccessAnalyzerEndpointID = "access-analyzer" ACMPCAEndpointID = "acm-pca" AMPEndpointID = "aps" - AppStreamEndpointID = "appstream2" - ApplicationAutoscalingEndpointID = "application-autoscaling" - AppFabricEndpointID = "appfabric" - AppIntegrationsEndpointID = "app-integrations" - AppConfigEndpointID = "appconfig" - AmplifyEndpointID = "amplify" APIGatewayID = "apigateway" APIGatewayV2EndpointID = "apigateway" + AccessAnalyzerEndpointID = "access-analyzer" + AmplifyEndpointID = "amplify" + AppConfigEndpointID = "appconfig" + AppFabricEndpointID = "appfabric" + AppIntegrationsEndpointID = "app-integrations" + AppStreamEndpointID = "appstream2" + AppSyncEndpointID = "appsync" + ApplicationAutoscalingEndpointID = "application-autoscaling" ApplicationInsightsEndpointID = "applicationinsights" AthenaEndpointID = "athena" AuditManagerEndpointID = "auditmanager" AutoScalingPlansEndpointID = "autoscaling-plans" + BCMDataExportsEndpointID = "bcm-data-exports" + BackupEndpointID = "backup" BatchEndpointID = "batch" BedrockAgentEndpointID = "bedrockagent" BedrockEndpointID = "bedrock" - BCMDataExportsEndpointID = "bcm-data-exports" BudgetsEndpointID = "budgets" ChimeSDKMediaPipelinesEndpointID = "media-pipelines-chime" ChimeSDKVoiceEndpointID = "voice-chime" + Cloud9EndpointID = "cloud9" CloudFormationEndpointID = "cloudformation" CloudFrontEndpointID = "cloudfront" CloudSearchEndpointID = "cloudsearch" CloudWatchEndpointID = "monitoring" - Cloud9EndpointID = "cloud9" CodeArtifactEndpointID = "codeartifact" CodeGuruReviewerEndpointID = "codeguru-reviewer" CodeStarConnectionsEndpointID = "codestar-connections" CognitoIdentityEndpointID = "cognito-identity" ComprehendEndpointID = "comprehend" ConfigServiceEndpointID = "config" + DataExchangeEndpointID = "dataexchange" + DataPipelineEndpointID = "datapipeline" + DetectiveEndpointID = "api.detective" DeviceFarmEndpointID = "devicefarm" DevOpsGuruEndpointID = "devops-guru" DLMEndpointID = "dlm" ECREndpointID = "api.ecr" + EFSEndpointID = "elasticfilesystem" EKSEndpointID = "eks" + ELBEndpointID = "elasticloadbalancing" EMREndpointID = "elasticmapreduce" + ElastiCacheEndpointID = "elasticache" EventsEndpointID = "events" EvidentlyEndpointID = "evidently" FMSEndpointID = "fms" + GrafanaEndpointID = "grafana" + IVSChatEndpointID = "ivschat" IdentityStoreEndpointID = "identitystore" Inspector2EndpointID = "inspector2" - IVSChatEndpointID = "ivschat" - KendraEndpointID = "kendra" KMSEndpointID = "kms" + KafkaConnectEndpointID = "kafkaconnect" + KendraEndpointID = "kendra" LambdaEndpointID = "lambda" LexV2ModelsEndpointID = "models-v2-lex" M2EndpointID = "m2" + MQEndpointID = "mq" MediaConvertEndpointID = "mediaconvert" MediaLiveEndpointID = "medialive" - MQEndpointID = "mq" ObservabilityAccessManagerEndpointID = "oam" - OpenSearchServerlessEndpointID = "aoss" OpenSearchIngestionEndpointID = "osis" + OpenSearchServerlessEndpointID = "aoss" PaymentCryptographyEndpointID = "paymentcryptography" PipesEndpointID = "pipes" PollyEndpointID = "polly" QLDBEndpointID = "qldb" - RedshiftServerlessEndpointID = "redshift-serverless" + RUMEndpointID = "rum" RedshiftEndpointID = "redshift" + RedshiftServerlessEndpointID = "redshift-serverless" RekognitionEndpointID = "rekognition" ResourceExplorer2EndpointID = "resource-explorer-2" RolesAnywhereEndpointID = "rolesanywhere" Route53DomainsEndpointID = "route53domains" - SchemasEndpointID = "schemas" + SSMEndpointID = "ssm" + SSMIncidentsEndpointID = "ssm-incidents" + SSOAdminEndpointID = "sso" + STSEndpointID = "sts" SchedulerEndpointID = "scheduler" + SchemasEndpointID = "schemas" ServiceCatalogAppRegistryEndpointID = "servicecatalog-appregistry" ServiceDiscoveryEndpointID = "servicediscovery" ServiceQuotasEndpointID = "servicequotas" ShieldEndpointID = "shield" - SSMEndpointID = "ssm" - SSMIncidentsEndpointID = "ssm-incidents" - SSOAdminEndpointID = "sso" - STSEndpointID = "sts" TranscribeEndpointID = "transcribe" TransferEndpointID = "transfer" - VerifiedPermissionsEndpointID = "verifiedpermissions" VPCLatticeEndpointID = "vpc-lattice" + VerifiedPermissionsEndpointID = "verifiedpermissions" WAFEndpointID = "waf" WAFRegionalEndpointID = "waf-regional" + DataZoneEndpointID = "datazone" ) // These should move to aws-sdk-go-base. @@ -169,8 +182,55 @@ const ( // AWS ISOB (US) partition's regions. USISOBEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). + + // AWS ISOF partition's regions. + EUISOEWest1RegionID = "eu-isoe-west-1" // EU ISOE West. ) +var allRegionIDs = []string{ + AFSouth1RegionID, + APEast1RegionID, + APNortheast1RegionID, + APNortheast2RegionID, + APNortheast3RegionID, + APSouth1RegionID, + APSouth2RegionID, + APSoutheast1RegionID, + APSoutheast2RegionID, + APSoutheast3RegionID, + APSoutheast4RegionID, + CACentral1RegionID, + CAWest1RegionID, + EUCentral1RegionID, + EUCentral2RegionID, + EUNorth1RegionID, + EUSouth1RegionID, + EUSouth2RegionID, + EUWest1RegionID, + EUWest2RegionID, + EUWest3RegionID, + ILCentral1RegionID, + MECentral1RegionID, + MESouth1RegionID, + SAEast1RegionID, + USEast1RegionID, + USEast2RegionID, + USWest1RegionID, + USWest2RegionID, + CNNorth1RegionID, + CNNorthwest1RegionID, + USGovEast1RegionID, + USGovWest1RegionID, + USISOEast1RegionID, + USISOWest1RegionID, + USISOBEast1RegionID, + EUISOEWest1RegionID, +} + +func Regions() []string { + return slices.Clone(allRegionIDs) +} + func DNSSuffixForPartition(partition string) string { switch partition { case "": @@ -217,6 +277,8 @@ func PartitionForRegion(region string) string { return ISOPartitionID case USISOBEast1RegionID: return ISOBPartitionID + case EUISOEWest1RegionID: + return ISOEPartitionID case USGovEast1RegionID, USGovWest1RegionID: return USGovCloudPartitionID default: @@ -235,18 +297,15 @@ func ReverseDNS(hostname string) string { return strings.Join(parts, ".") } -// Type ServiceDatum corresponds closely to columns in `data/names_data.csv` and are +// Type ServiceDatum corresponds closely to attributes and blocks in `data/names_data.hcl` and are // described in detail in README.md. -type ServiceDatum struct { +type serviceDatum struct { Aliases []string AWSServiceEnvVar string Brand string ClientSDKV1 bool DeprecatedEnvVar string - EndpointOnly bool GoV1ClientTypeName string - GoV1Package string - GoV2Package string HumanFriendly string ProviderNameUpper string SDKID string @@ -254,24 +313,24 @@ type ServiceDatum struct { } // serviceData key is the AWS provider service package -var serviceData map[string]*ServiceDatum +var serviceData map[string]serviceDatum func init() { - serviceData = make(map[string]*ServiceDatum) + serviceData = make(map[string]serviceDatum) - // Data from names_data.csv - if err := readCSVIntoServiceData(); err != nil { - log.Fatalf("reading CSV into service data: %s", err) + // Data from names_data.hcl + if err := readHCLIntoServiceData(); err != nil { + log.Fatalf("reading HCL into service data: %s", err) } } -func readCSVIntoServiceData() error { - // names_data.csv is dynamically embedded so changes, additions should be made +func readHCLIntoServiceData() error { + // names_data.hcl is dynamically embedded so changes, additions should be made // there also d, err := data.ReadAllServiceData() if err != nil { - return fmt.Errorf("reading CSV into service data: %w", err) + return fmt.Errorf("reading HCL into service data: %w", err) } for _, l := range d { @@ -285,15 +344,12 @@ func readCSVIntoServiceData() error { p := l.ProviderPackage() - serviceData[p] = &ServiceDatum{ + sd := serviceDatum{ AWSServiceEnvVar: l.AWSServiceEnvVar(), Brand: l.Brand(), ClientSDKV1: l.ClientSDKV1(), DeprecatedEnvVar: l.DeprecatedEnvVar(), - EndpointOnly: l.EndpointOnly(), GoV1ClientTypeName: l.GoV1ClientTypeName(), - GoV1Package: l.GoV1Package(), - GoV2Package: l.GoV2Package(), HumanFriendly: l.HumanFriendly(), ProviderNameUpper: l.ProviderNameUpper(), SDKID: l.SDKID(), @@ -306,7 +362,9 @@ func readCSVIntoServiceData() error { a = append(a, l.Aliases()...) } - serviceData[p].Aliases = a + sd.Aliases = a + + serviceData[p] = sd } return nil @@ -367,27 +425,6 @@ func Endpoints() []Endpoint { return endpoints } -type ServiceNameUpper struct { - ProviderPackage string - ProviderNameUpper string - SDKID string -} - -func ServiceNamesUpper() []ServiceNameUpper { - serviceNames := make([]ServiceNameUpper, 0, len(serviceData)) - - for k, v := range serviceData { - sn := ServiceNameUpper{ - ProviderPackage: k, - ProviderNameUpper: v.ProviderNameUpper, - SDKID: v.SDKID, - } - serviceNames = append(serviceNames, sn) - } - - return serviceNames -} - func ProviderNameUpper(service string) (string, error) { if v, ok := serviceData[service]; ok { return v.ProviderNameUpper, nil @@ -468,44 +505,6 @@ func HumanFriendly(service string) (string, error) { return "", fmt.Errorf("no service data found for %s", service) } -func AWSGoPackage(providerPackage string, version int) (string, error) { - switch version { - case 1: - return AWSGoV1Package(providerPackage) - case 2: - return AWSGoV2Package(providerPackage) - default: - return "", fmt.Errorf("unsupported AWS SDK Go version: %d", version) - } -} - -func AWSGoV1Package(providerPackage string) (string, error) { - if v, ok := serviceData[providerPackage]; ok { - return v.GoV1Package, nil - } - - return "", fmt.Errorf("getting AWS SDK Go v1 package, %s not found", providerPackage) -} - -func AWSGoV2Package(providerPackage string) (string, error) { - if v, ok := serviceData[providerPackage]; ok { - return v.GoV2Package, nil - } - - return "", fmt.Errorf("getting AWS SDK Go v2 package, %s not found", providerPackage) -} - -func AWSGoClientTypeName(providerPackage string, version int) (string, error) { - switch version { - case 1: - return AWSGoV1ClientTypeName(providerPackage) - case 2: - return "Client", nil - default: - return "", fmt.Errorf("unsupported AWS SDK Go version: %d", version) - } -} - func AWSGoV1ClientTypeName(providerPackage string) (string, error) { if v, ok := serviceData[providerPackage]; ok { return v.GoV1ClientTypeName, nil diff --git a/names/names_test.go b/names/names_test.go index aff195260b1..069f340a4df 100644 --- a/names/names_test.go +++ b/names/names_test.go @@ -533,69 +533,6 @@ func TestFullHumanFriendly(t *testing.T) { } } -func TestAWSGoV1Package(t *testing.T) { - t.Parallel() - - testCases := []struct { - TestName string - Input string - Expected string - Error bool - }{ - { - TestName: "empty", - Input: "", - Expected: "", - Error: true, - }, - { - TestName: "same as AWS", - Input: CloudTrail, - Expected: CloudTrail, - Error: false, - }, - { - TestName: "different from AWS", - Input: Transcribe, - Expected: "transcribeservice", - Error: false, - }, - { - TestName: "different from AWS 2", - Input: RBin, - Expected: "recyclebin", - Error: false, - }, - { - TestName: "doesnotexist", - Input: "doesnotexist", - Expected: "", - Error: true, - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.TestName, func(t *testing.T) { - t.Parallel() - - got, err := AWSGoV1Package(testCase.Input) - - if err != nil && !testCase.Error { - t.Errorf("got error (%s), expected no error", err) - } - - if err == nil && testCase.Error { - t.Errorf("got (%s) and no error, expected error", got) - } - - if got != testCase.Expected { - t.Errorf("got %s, expected %s", got, testCase.Expected) - } - }) - } -} - func TestAWSGoV1ClientName(t *testing.T) { t.Parallel() diff --git a/skaff/go.mod b/skaff/go.mod index e0f55357ae8..6c28748c502 100644 --- a/skaff/go.mod +++ b/skaff/go.mod @@ -1,17 +1,27 @@ module github.com/hashicorp/terraform-provider-aws/skaff -go 1.22.2 +go 1.22.5 require ( github.com/YakDriver/regexache v0.23.0 github.com/hashicorp/terraform-provider-aws v1.60.1-0.20220322001452-8f7a597d0c24 - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 ) require ( + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/hcl/v2 v2.21.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/tools v0.23.0 // indirect ) replace github.com/hashicorp/terraform-provider-aws => ../ diff --git a/skaff/go.sum b/skaff/go.sum index 8eb4d31214b..7fd622d213f 100644 --- a/skaff/go.sum +++ b/skaff/go.sum @@ -1,14 +1,40 @@ github.com/YakDriver/regexache v0.23.0 h1:kv3j4XKhbx/vqUilSBgizXDUXHvvH1KdYekdmGwz4C4= github.com/YakDriver/regexache v0.23.0/go.mod h1:K4BZ3MYKAqSFbYWqmbsG+OzYUDyJjnMEr27DJEsVG3U= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tools/awssdkpatch/go.mod b/tools/awssdkpatch/go.mod index 4abb4919556..10c250babb7 100644 --- a/tools/awssdkpatch/go.mod +++ b/tools/awssdkpatch/go.mod @@ -1,15 +1,22 @@ module github.com/hashicorp/terraform-provider-aws/tools/awssdkpatch -go 1.22.2 +go 1.22.5 require ( github.com/hashicorp/terraform-provider-aws v1.60.1-0.20220322001452-8f7a597d0c24 - golang.org/x/tools v0.22.0 + golang.org/x/tools v0.23.0 ) require ( - golang.org/x/mod v0.18.0 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/hashicorp/hcl/v2 v2.21.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + golang.org/x/mod v0.19.0 // indirect golang.org/x/sync v0.7.0 // indirect + golang.org/x/text v0.16.0 // indirect ) replace github.com/hashicorp/terraform-provider-aws => ../.. diff --git a/tools/awssdkpatch/go.sum b/tools/awssdkpatch/go.sum index 16f2f692e0c..19aacce5ee9 100644 --- a/tools/awssdkpatch/go.sum +++ b/tools/awssdkpatch/go.sum @@ -1,6 +1,26 @@ -golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= -golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= diff --git a/tools/literally/go.mod b/tools/literally/go.mod index f63355863cb..b9b1927d27b 100644 --- a/tools/literally/go.mod +++ b/tools/literally/go.mod @@ -1,3 +1,3 @@ module github.com/hashicorp/terraform-provider-aws/tools/literally -go 1.22.2 +go 1.22.5 diff --git a/tools/tfsdk2fw/go.mod b/tools/tfsdk2fw/go.mod index 64e648f3112..65b12ffb4a1 100644 --- a/tools/tfsdk2fw/go.mod +++ b/tools/tfsdk2fw/go.mod @@ -1,6 +1,6 @@ module github.com/hashicorp/terraform-provider-aws/tools/tfsdk2fw -go 1.22.2 +go 1.22.5 require ( github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 @@ -12,215 +12,236 @@ require ( github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect - github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton // indirect github.com/YakDriver/go-version v0.1.0 // indirect github.com/YakDriver/regexache v0.23.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go v1.54.0 // indirect - github.com/aws/aws-sdk-go-v2 v1.27.2 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 // indirect - github.com/aws/aws-sdk-go-v2/config v1.27.18 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.18 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 // indirect + github.com/aws/aws-sdk-go v1.54.19 // indirect + github.com/aws/aws-sdk-go-v2 v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.26 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.26 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9 // indirect - github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.30.0 // indirect - github.com/aws/aws-sdk-go-v2/service/account v1.17.1 // indirect - github.com/aws/aws-sdk-go-v2/service/acm v1.26.2 // indirect - github.com/aws/aws-sdk-go-v2/service/acmpca v1.30.3 // indirect - github.com/aws/aws-sdk-go-v2/service/amp v1.25.10 // indirect - github.com/aws/aws-sdk-go-v2/service/amplify v1.21.11 // indirect - github.com/aws/aws-sdk-go-v2/service/apigateway v1.23.12 // indirect - github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.20.10 // indirect - github.com/aws/aws-sdk-go-v2/service/appconfig v1.29.8 // indirect - github.com/aws/aws-sdk-go-v2/service/appfabric v1.7.10 // indirect - github.com/aws/aws-sdk-go-v2/service/appflow v1.41.10 // indirect - github.com/aws/aws-sdk-go-v2/service/appintegrations v1.25.10 // indirect - github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.27.10 // indirect - github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.24.10 // indirect - github.com/aws/aws-sdk-go-v2/service/apprunner v1.28.10 // indirect - github.com/aws/aws-sdk-go-v2/service/appstream v1.34.10 // indirect - github.com/aws/aws-sdk-go-v2/service/athena v1.41.2 // indirect - github.com/aws/aws-sdk-go-v2/service/auditmanager v1.33.0 // indirect - github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.11 // indirect - github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.11 // indirect - github.com/aws/aws-sdk-go-v2/service/batch v1.38.1 // indirect - github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.3.10 // indirect - github.com/aws/aws-sdk-go-v2/service/bedrock v1.8.8 // indirect - github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.12.2 // indirect - github.com/aws/aws-sdk-go-v2/service/budgets v1.23.6 // indirect - github.com/aws/aws-sdk-go-v2/service/chatbot v1.2.3 // indirect - github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.15.11 // indirect - github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.15.6 // indirect - github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.12.6 // indirect - github.com/aws/aws-sdk-go-v2/service/cloud9 v1.24.11 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.18.10 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudformation v1.51.3 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudfront v1.36.6 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.21.10 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.22.10 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.40.2 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.6 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.35.7 // indirect - github.com/aws/aws-sdk-go-v2/service/codeartifact v1.27.6 // indirect - github.com/aws/aws-sdk-go-v2/service/codebuild v1.37.3 // indirect - github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.13.7 // indirect - github.com/aws/aws-sdk-go-v2/service/codecommit v1.22.10 // indirect - github.com/aws/aws-sdk-go-v2/service/codedeploy v1.25.10 // indirect - github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.20.10 // indirect - github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.25.10 // indirect - github.com/aws/aws-sdk-go-v2/service/codepipeline v1.28.0 // indirect - github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.25.8 // indirect - github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.22.10 // indirect - github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.23.13 // indirect - github.com/aws/aws-sdk-go-v2/service/comprehend v1.31.10 // indirect - github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.34.7 // indirect - github.com/aws/aws-sdk-go-v2/service/configservice v1.46.11 // indirect - github.com/aws/aws-sdk-go-v2/service/connectcases v1.17.6 // indirect - github.com/aws/aws-sdk-go-v2/service/controltower v1.14.3 // indirect - github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.23.10 // indirect - github.com/aws/aws-sdk-go-v2/service/costexplorer v1.38.6 // indirect - github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.10 // indirect - github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.10 // indirect - github.com/aws/aws-sdk-go-v2/service/datasync v1.38.4 // indirect - github.com/aws/aws-sdk-go-v2/service/datazone v1.8.6 // indirect - github.com/aws/aws-sdk-go-v2/service/dax v1.19.10 // indirect - github.com/aws/aws-sdk-go-v2/service/devicefarm v1.22.10 // indirect - github.com/aws/aws-sdk-go-v2/service/devopsguru v1.30.10 // indirect - github.com/aws/aws-sdk-go-v2/service/directoryservice v1.24.10 // indirect - github.com/aws/aws-sdk-go-v2/service/dlm v1.24.10 // indirect - github.com/aws/aws-sdk-go-v2/service/docdb v1.34.7 // indirect - github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.9.9 // indirect - github.com/aws/aws-sdk-go-v2/service/drs v1.26.6 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.8 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ecr v1.28.5 // indirect - github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.10 // indirect - github.com/aws/aws-sdk-go-v2/service/ecs v1.42.0 // indirect - github.com/aws/aws-sdk-go-v2/service/eks v1.43.1 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticache v1.38.8 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10 // indirect - github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3 // indirect - github.com/aws/aws-sdk-go-v2/service/emr v1.39.11 // indirect - github.com/aws/aws-sdk-go-v2/service/emrserverless v1.21.2 // indirect - github.com/aws/aws-sdk-go-v2/service/eventbridge v1.31.5 // indirect - github.com/aws/aws-sdk-go-v2/service/evidently v1.19.10 // indirect - github.com/aws/aws-sdk-go-v2/service/finspace v1.24.7 // indirect - github.com/aws/aws-sdk-go-v2/service/firehose v1.29.1 // indirect - github.com/aws/aws-sdk-go-v2/service/fis v1.24.8 // indirect - github.com/aws/aws-sdk-go-v2/service/fms v1.33.7 // indirect - github.com/aws/aws-sdk-go-v2/service/glacier v1.22.10 // indirect - github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.24.1 // indirect - github.com/aws/aws-sdk-go-v2/service/groundstation v1.27.6 // indirect - github.com/aws/aws-sdk-go-v2/service/guardduty v1.43.0 // indirect - github.com/aws/aws-sdk-go-v2/service/healthlake v1.24.6 // indirect - github.com/aws/aws-sdk-go-v2/service/iam v1.32.6 // indirect - github.com/aws/aws-sdk-go-v2/service/identitystore v1.23.12 // indirect - github.com/aws/aws-sdk-go-v2/service/inspector2 v1.26.6 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.14.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ivschat v1.12.11 // indirect - github.com/aws/aws-sdk-go-v2/service/kafka v1.33.2 // indirect - github.com/aws/aws-sdk-go-v2/service/kendra v1.50.7 // indirect - github.com/aws/aws-sdk-go-v2/service/keyspaces v1.10.10 // indirect - github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.10 // indirect - github.com/aws/aws-sdk-go-v2/service/kms v1.32.3 // indirect - github.com/aws/aws-sdk-go-v2/service/lakeformation v1.33.3 // indirect - github.com/aws/aws-sdk-go-v2/service/lambda v1.54.6 // indirect - github.com/aws/aws-sdk-go-v2/service/launchwizard v1.4.2 // indirect - github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.43.10 // indirect - github.com/aws/aws-sdk-go-v2/service/lightsail v1.38.3 // indirect - github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.27.10 // indirect - github.com/aws/aws-sdk-go-v2/service/m2 v1.13.6 // indirect - github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.28.10 // indirect - github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.53.7 // indirect - github.com/aws/aws-sdk-go-v2/service/medialive v1.52.6 // indirect - github.com/aws/aws-sdk-go-v2/service/mediapackage v1.30.11 // indirect - github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.11.6 // indirect - github.com/aws/aws-sdk-go-v2/service/mediastore v1.20.10 // indirect - github.com/aws/aws-sdk-go-v2/service/mq v1.22.11 // indirect - github.com/aws/aws-sdk-go-v2/service/mwaa v1.27.4 // indirect - github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.8.7 // indirect - github.com/aws/aws-sdk-go-v2/service/oam v1.11.6 // indirect - github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.11.13 // indirect - github.com/aws/aws-sdk-go-v2/service/organizations v1.27.9 // indirect - github.com/aws/aws-sdk-go-v2/service/osis v1.10.0 // indirect - github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.10.6 // indirect - github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.5.10 // indirect - github.com/aws/aws-sdk-go-v2/service/pipes v1.12.1 // indirect - github.com/aws/aws-sdk-go-v2/service/polly v1.40.5 // indirect - github.com/aws/aws-sdk-go-v2/service/pricing v1.28.7 // indirect - github.com/aws/aws-sdk-go-v2/service/qbusiness v1.6.6 // indirect - github.com/aws/aws-sdk-go-v2/service/qldb v1.21.10 // indirect - github.com/aws/aws-sdk-go-v2/service/ram v1.25.10 // indirect - github.com/aws/aws-sdk-go-v2/service/rbin v1.16.10 // indirect - github.com/aws/aws-sdk-go-v2/service/rds v1.79.6 // indirect - github.com/aws/aws-sdk-go-v2/service/redshift v1.44.7 // indirect - github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.25.10 // indirect - github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.18.8 // indirect - github.com/aws/aws-sdk-go-v2/service/rekognition v1.40.6 // indirect - github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.10.11 // indirect - github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.22.6 // indirect - github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.21.10 // indirect - github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.11.6 // indirect - github.com/aws/aws-sdk-go-v2/service/route53 v1.40.10 // indirect - github.com/aws/aws-sdk-go-v2/service/route53domains v1.23.10 // indirect - github.com/aws/aws-sdk-go-v2/service/route53profiles v1.0.7 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1 // indirect - github.com/aws/aws-sdk-go-v2/service/s3control v1.44.13 // indirect - github.com/aws/aws-sdk-go-v2/service/scheduler v1.8.10 // indirect - github.com/aws/aws-sdk-go-v2/service/schemas v1.24.10 // indirect - github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.30.0 // indirect - github.com/aws/aws-sdk-go-v2/service/securityhub v1.49.2 // indirect - github.com/aws/aws-sdk-go-v2/service/securitylake v1.14.0 // indirect - github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.26.10 // indirect - github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.12 // indirect - github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sesv2 v1.30.0 // indirect - github.com/aws/aws-sdk-go-v2/service/shield v1.25.10 // indirect - github.com/aws/aws-sdk-go-v2/service/signer v1.22.13 // indirect - github.com/aws/aws-sdk-go-v2/service/sns v1.29.11 // indirect - github.com/aws/aws-sdk-go-v2/service/sqs v1.32.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ssm v1.50.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.22.10 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.30.10 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmsap v1.13.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 // indirect - github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.25.11 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 // indirect - github.com/aws/aws-sdk-go-v2/service/swf v1.23.2 // indirect - github.com/aws/aws-sdk-go-v2/service/synthetics v1.24.10 // indirect - github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.0.8 // indirect - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.25.11 // indirect - github.com/aws/aws-sdk-go-v2/service/transcribe v1.37.6 // indirect - github.com/aws/aws-sdk-go-v2/service/transfer v1.48.3 // indirect - github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.15.0 // indirect - github.com/aws/aws-sdk-go-v2/service/vpclattice v1.8.6 // indirect - github.com/aws/aws-sdk-go-v2/service/waf v1.20.10 // indirect - github.com/aws/aws-sdk-go-v2/service/wafregional v1.21.10 // indirect - github.com/aws/aws-sdk-go-v2/service/wafv2 v1.49.3 // indirect - github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.30.6 // indirect - github.com/aws/aws-sdk-go-v2/service/workspaces v1.39.6 // indirect - github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.18.6 // indirect - github.com/aws/aws-sdk-go-v2/service/xray v1.25.10 // indirect - github.com/aws/smithy-go v1.20.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 // indirect + github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.32.3 // indirect + github.com/aws/aws-sdk-go-v2/service/account v1.19.3 // indirect + github.com/aws/aws-sdk-go-v2/service/acm v1.28.4 // indirect + github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0 // indirect + github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3 // indirect + github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/appconfig v1.31.3 // indirect + github.com/aws/aws-sdk-go-v2/service/appfabric v1.9.3 // indirect + github.com/aws/aws-sdk-go-v2/service/appflow v1.43.3 // indirect + github.com/aws/aws-sdk-go-v2/service/appintegrations v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.30.4 // indirect + github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/service/apprunner v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/appstream v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2/service/appsync v1.34.3 // indirect + github.com/aws/aws-sdk-go-v2/service/athena v1.44.3 // indirect + github.com/aws/aws-sdk-go-v2/service/auditmanager v1.35.3 // indirect + github.com/aws/aws-sdk-go-v2/service/autoscaling v1.43.3 // indirect + github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/backup v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2/service/batch v1.43.0 // indirect + github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.5.3 // indirect + github.com/aws/aws-sdk-go-v2/service/bedrock v1.12.0 // indirect + github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.16.0 // indirect + github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3 // indirect + github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.17.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloud9 v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.20.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudfront v1.38.4 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.25.2 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.42.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.37.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codeartifact v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codebuild v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.15.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codecommit v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codedeploy v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codepipeline v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.41.4 // indirect + github.com/aws/aws-sdk-go-v2/service/comprehend v1.33.3 // indirect + github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.37.3 // indirect + github.com/aws/aws-sdk-go-v2/service/configservice v1.48.3 // indirect + github.com/aws/aws-sdk-go-v2/service/connectcases v1.19.3 // indirect + github.com/aws/aws-sdk-go-v2/service/controltower v1.16.3 // indirect + github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/costexplorer v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3 // indirect + github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3 // indirect + github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3 // indirect + github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2 // indirect + github.com/aws/aws-sdk-go-v2/service/dax v1.21.3 // indirect + github.com/aws/aws-sdk-go-v2/service/detective v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/devicefarm v1.25.2 // indirect + github.com/aws/aws-sdk-go-v2/service/devopsguru v1.32.3 // indirect + github.com/aws/aws-sdk-go-v2/service/directoryservice v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/dlm v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/docdb v1.36.3 // indirect + github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/drs v1.28.3 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ecs v1.44.3 // indirect + github.com/aws/aws-sdk-go-v2/service/efs v1.31.3 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.46.2 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticache v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.26.2 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3 // indirect + github.com/aws/aws-sdk-go-v2/service/emr v1.42.2 // indirect + github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3 // indirect + github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 // indirect + github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 // indirect + github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3 // indirect + github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 // indirect + github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.27.0 // indirect + github.com/aws/aws-sdk-go-v2/service/grafana v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/greengrass v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/groundstation v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/guardduty v1.45.3 // indirect + github.com/aws/aws-sdk-go-v2/service/healthlake v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.34.3 // indirect + github.com/aws/aws-sdk-go-v2/service/identitystore v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/inspector2 v1.28.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 // indirect + github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.16.3 // indirect + github.com/aws/aws-sdk-go-v2/service/iot v1.55.3 // indirect + github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 // indirect + github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 // indirect + github.com/aws/aws-sdk-go-v2/service/kendra v1.52.3 // indirect + github.com/aws/aws-sdk-go-v2/service/keyspaces v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 // indirect + github.com/aws/aws-sdk-go-v2/service/lakeformation v1.35.3 // indirect + github.com/aws/aws-sdk-go-v2/service/lambda v1.56.3 // indirect + github.com/aws/aws-sdk-go-v2/service/launchwizard v1.6.3 // indirect + github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.45.3 // indirect + github.com/aws/aws-sdk-go-v2/service/lightsail v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/m2 v1.15.3 // indirect + github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0 // indirect + github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3 // indirect + github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3 // indirect + github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3 // indirect + github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/service/mediastore v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/mq v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4 // indirect + github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3 // indirect + github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3 // indirect + github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3 // indirect + github.com/aws/aws-sdk-go-v2/service/oam v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2 // indirect + github.com/aws/aws-sdk-go-v2/service/osis v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.7.3 // indirect + github.com/aws/aws-sdk-go-v2/service/pipes v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/service/polly v1.42.3 // indirect + github.com/aws/aws-sdk-go-v2/service/pricing v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/qbusiness v1.10.2 // indirect + github.com/aws/aws-sdk-go-v2/service/qldb v1.23.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ram v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3 // indirect + github.com/aws/aws-sdk-go-v2/service/rds v1.81.4 // indirect + github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4 // indirect + github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.20.3 // indirect + github.com/aws/aws-sdk-go-v2/service/rekognition v1.43.2 // indirect + github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.12.3 // indirect + github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.23.3 // indirect + github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.13.3 // indirect + github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3 // indirect + github.com/aws/aws-sdk-go-v2/service/route53domains v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/route53profiles v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/service/rum v1.19.3 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 // indirect + github.com/aws/aws-sdk-go-v2/service/s3control v1.46.3 // indirect + github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3 // indirect + github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3 // indirect + github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3 // indirect + github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3 // indirect + github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.28.3 // indirect + github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.3 // indirect + github.com/aws/aws-sdk-go-v2/service/servicequotas v1.23.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sesv2 v1.32.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3 // indirect + github.com/aws/aws-sdk-go-v2/service/shield v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/signer v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssm v1.52.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.24.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/swf v1.25.3 // indirect + github.com/aws/aws-sdk-go-v2/service/synthetics v1.26.3 // indirect + github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3 // indirect + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3 // indirect + github.com/aws/aws-sdk-go-v2/service/transcribe v1.39.3 // indirect + github.com/aws/aws-sdk-go-v2/service/transfer v1.50.3 // indirect + github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.17.3 // indirect + github.com/aws/aws-sdk-go-v2/service/vpclattice v1.10.3 // indirect + github.com/aws/aws-sdk-go-v2/service/waf v1.23.3 // indirect + github.com/aws/aws-sdk-go-v2/service/wafregional v1.23.3 // indirect + github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4 // indirect + github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.32.3 // indirect + github.com/aws/aws-sdk-go-v2/service/workspaces v1.44.2 // indirect + github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.21.3 // indirect + github.com/aws/aws-sdk-go-v2/service/xray v1.27.3 // indirect + github.com/aws/smithy-go v1.20.3 // indirect github.com/beevik/etree v1.4.0 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/cedar-policy/cedar-go v0.0.0-20240318205125-470d1fe984bb // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fatih/color v1.16.0 // indirect + github.com/fatih/color v1.17.0 // indirect github.com/gertd/go-pluralize v0.2.1 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -228,8 +249,8 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0 // indirect - github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.53 // indirect - github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.54 // indirect + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.54 // indirect + github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.55 // indirect github.com/hashicorp/awspolicyequivalence v1.6.0 // indirect github.com/hashicorp/cli v1.1.6 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -241,20 +262,20 @@ require ( github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.7.0 // indirect - github.com/hashicorp/hc-install v0.6.4 // indirect - github.com/hashicorp/hcl/v2 v2.20.1 // indirect + github.com/hashicorp/hc-install v0.7.0 // indirect + github.com/hashicorp/hcl/v2 v2.21.0 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.21.0 // indirect github.com/hashicorp/terraform-json v0.22.1 // indirect - github.com/hashicorp/terraform-plugin-framework v1.9.0 // indirect + github.com/hashicorp/terraform-plugin-framework v1.10.0 // indirect github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0 // indirect github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 // indirect github.com/hashicorp/terraform-plugin-framework-timetypes v0.4.0 // indirect - github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 // indirect + github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 // indirect github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect github.com/hashicorp/terraform-plugin-mux v0.16.0 // indirect - github.com/hashicorp/terraform-plugin-testing v1.8.0 // indirect + github.com/hashicorp/terraform-plugin-testing v1.9.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -282,17 +303,17 @@ require ( github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/zclconf/go-cty v1.14.4 // indirect - go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0 // indirect - go.opentelemetry.io/otel v1.26.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect - go.opentelemetry.io/otel/trace v1.26.0 // indirect - golang.org/x/crypto v0.24.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.25.0 // indirect + go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0 // indirect + go.opentelemetry.io/otel v1.27.0 // indirect + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.27.0 // indirect + golang.org/x/crypto v0.25.0 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.27.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.21.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + golang.org/x/tools v0.23.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/grpc v1.63.2 // indirect diff --git a/tools/tfsdk2fw/go.sum b/tools/tfsdk2fw/go.sum index 951cdb3110d..15537020e1b 100644 --- a/tools/tfsdk2fw/go.sum +++ b/tools/tfsdk2fw/go.sum @@ -9,8 +9,8 @@ github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= -github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton h1:0RXAi0EJFs81j+MMsqvHNuAUGWzeVfCO9LnHAfoQ8NA= +github.com/ProtonMail/go-crypto v1.1.0-alpha.3-proton/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/YakDriver/go-version v0.1.0 h1:/x+Xg2+l89Mjtxl0VRf2+ue8cnHkw6jfYv49j6f7gZw= github.com/YakDriver/go-version v0.1.0/go.mod h1:LXwFAp1E3KBhS7FHO/FE8r3XCmvKizs/VXXXFWfoSYY= github.com/YakDriver/regexache v0.23.0 h1:kv3j4XKhbx/vqUilSBgizXDUXHvvH1KdYekdmGwz4C4= @@ -22,400 +22,442 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.54.0 h1:tGCQ6YS2TepzKtbl+ddXnLIoV8XvWdxMKtuMxdrsa4U= -github.com/aws/aws-sdk-go v1.54.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.27.2 h1:pLsTXqX93rimAOZG2FIYraDQstZaaGVVN4tNw65v0h8= -github.com/aws/aws-sdk-go-v2 v1.27.2/go.mod h1:ffIFB97e2yNsv4aTSGkqtHnppsIJzw7G7BReUZ3jCXM= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2 h1:x6xsQXGSmW6frevwDA+vi/wqhp1ct18mVXYN08/93to= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.2/go.mod h1:lPprDr1e6cJdyYeGXnRaJoP4Md+cDBvi2eOj00BlGmg= -github.com/aws/aws-sdk-go-v2/config v1.27.18 h1:wFvAnwOKKe7QAyIxziwSKjmer9JBMH1vzIL6W+fYuKk= -github.com/aws/aws-sdk-go-v2/config v1.27.18/go.mod h1:0xz6cgdX55+kmppvPm2IaKzIXOheGJhAufacPJaXZ7c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.18 h1:D/ALDWqK4JdY3OFgA2thcPO1c9aYTT5STS/CvnkqY1c= -github.com/aws/aws-sdk-go-v2/credentials v1.17.18/go.mod h1:JuitCWq+F5QGUrmMPsk945rop6bB57jdscu+Glozdnc= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5 h1:dDgptDO9dxeFkXy+tEgVkzSClHZje/6JkPW5aZyEvrQ= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.5/go.mod h1:gjvE2KBUgUQhcv89jqxrIxH9GaKs1JbZzWejj/DaHGA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24 h1:FzNwpVTZDCvm597Ty6mGYvxTolyC1oup0waaKntZI4E= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.16.24/go.mod h1:wM9NElT/Wn6n3CT1eyVcXtfCy8lSVjjQXfdawQbSShc= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9 h1:cy8ahBJuhtM8GTTSyOkfy6WVPV1IE+SS5/wfXUYuulw= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.9/go.mod h1:CZBXGLaJnEZI6EVNcPd7a6B5IC5cA/GkRWtu9fp3S6Y= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9 h1:A4SYk07ef04+vxZToz9LWvAXl9LW0NClpPpMsi31cz0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.9/go.mod h1:5jJcHuwDagxN+ErjQ3PU3ocf6Ylc/p9x+BLO/+X4iXw= +github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= +github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.30.3 h1:jUeBtG0Ih+ZIFH0F4UkmL9w3cSpaMv9tYYDbzILP8dY= +github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3 h1:tW1/Rkad38LA15X4UQtjXZXNKsCgkshC3EbmcUmghTg= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM= +github.com/aws/aws-sdk-go-v2/config v1.27.26 h1:T1kAefbKuNum/AbShMsZEro6eRkeOT8YILfE9wyjAYQ= +github.com/aws/aws-sdk-go-v2/config v1.27.26/go.mod h1:ivWHkAWFrw/nxty5Fku7soTIVdqZaZ7dw+tc5iGW3GA= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26 h1:tsm8g/nJxi8+/7XyJJcP2dLrnK/5rkFp6+i2nhmz5fk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.26/go.mod h1:3vAM49zkIa3q8WT6o9Ve5Z0vdByDMwmdScO0zvThTgI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7 h1:kNemAUX+bJFBSfPkGVZ8HFOKIadjLoI2Ua1ZKivhGSo= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.7/go.mod h1:71S2C1g/Zjn+ANmyoOqJ586OrPF9uC9iiHt9ZAT+MOw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15 h1:SoNJ4RlFEQEbtDcCEt+QG56MY4fm4W8rYirAmq+/DdU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15 h1:C6WHdGnTDIYETAm5iErQUiVNsclNx9qbJVPIt03B6bI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9 h1:vHyZxoLVOgrI8GqX7OMHLXp4YYoxeEsrjweXKpye+ds= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.9/go.mod h1:z9VXZsWA2BvZNH1dT0ToUYwMu/CR9Skkj/TBX+mceZw= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.30.0 h1:l5NODu13ZXBo3SIuWlSqM8W15UkmGb1CfoT9LMePiGQ= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.30.0/go.mod h1:+J6D4VAx1rypnSo1AI7XIx4v3al7RwEmTT45+hKtsuo= -github.com/aws/aws-sdk-go-v2/service/account v1.17.1 h1:4NJR1yu7rp5FxJqnqpRaSnIiq/EL26EBaGFnLh3TVlM= -github.com/aws/aws-sdk-go-v2/service/account v1.17.1/go.mod h1:RP2gSKo6kGbTkrDVhsK7BDmhobfBc+0O1dVI1VGNR0U= -github.com/aws/aws-sdk-go-v2/service/acm v1.26.2 h1:BAAPzljqPgzr4vJl1aI+qwWArot2Ev7jZy9i69Bysvo= -github.com/aws/aws-sdk-go-v2/service/acm v1.26.2/go.mod h1:UxBKNLjXNINYbDrT7DG7ZHYEK2qOT1m6XJeKY+LitbQ= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.30.3 h1:bQ+4RthpbM3w5+1Z9l/OoANG1J5Nl3oNVCL70dZbEV0= -github.com/aws/aws-sdk-go-v2/service/acmpca v1.30.3/go.mod h1:JV4s3XObODdRk5gtgA5uKNSLhCqi5WRvQgnzhkMvJEk= -github.com/aws/aws-sdk-go-v2/service/amp v1.25.10 h1:aGSGpPg6aGoe/v42C2iSQqzDI778m0YpMolinM5SeIE= -github.com/aws/aws-sdk-go-v2/service/amp v1.25.10/go.mod h1:wyfNo2hj/f8yPSdberXMJv60eUG6xGr3cDLVF4jJivY= -github.com/aws/aws-sdk-go-v2/service/amplify v1.21.11 h1:B4BzoxzV8vio6V07yEDEqpVrhd2ciD3b4OkF2QGpgkA= -github.com/aws/aws-sdk-go-v2/service/amplify v1.21.11/go.mod h1:Ev3460rW8/OmH3bJBkMZDgZR48c6wl1d4DxA78h+CWM= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.23.12 h1:B9YQUaFlg5YAEukEogYG5E+C6GHHAMNbS1g82rgxRSg= -github.com/aws/aws-sdk-go-v2/service/apigateway v1.23.12/go.mod h1:zwkGhImFmKYyfIjJb2jBVd+cQ+pq+APQNryk9Tk57Ps= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.20.10 h1:7rAYDeRvzVKJcnNDT/xOX1px9k/scn4Ya4NtonV6PWg= -github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.20.10/go.mod h1:hYMrp35CMcqnG1/+ZuaqOCl8YoGdb0+OfB2o/CbT7AU= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.29.8 h1:VlCuJtG4WFXaYWqqX/FK6L+yaS8hRJNA9Q3c0Vrv018= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.29.8/go.mod h1:n46CP0fdiMHscrLc9E4E/AW90LxtoD8KAs8GBOlh1ZU= -github.com/aws/aws-sdk-go-v2/service/appfabric v1.7.10 h1:ZH680e/x/CCEZuumTWovuPFKvHjxFe6FXOjD7JOYr7Y= -github.com/aws/aws-sdk-go-v2/service/appfabric v1.7.10/go.mod h1:gNOb1nyhDzbyNir5SOA+O502Gwy8HRLCZZiWF856+hw= -github.com/aws/aws-sdk-go-v2/service/appflow v1.41.10 h1:ozylppjAYagJKcnCEQL8pKPT2b4B0IeeOwCYy/ZMTO0= -github.com/aws/aws-sdk-go-v2/service/appflow v1.41.10/go.mod h1:MeLW0NK8MPEUQm7XnZniE5rQRLiGKbu49kHWWdd5lzI= -github.com/aws/aws-sdk-go-v2/service/appintegrations v1.25.10 h1:tCGbQBGGMcgHZmSLcRI4lvU/y3l36z1GHWd8w9Wl7uY= -github.com/aws/aws-sdk-go-v2/service/appintegrations v1.25.10/go.mod h1:DItbH9nkfmNQJKfARIjF8kktLUOv0lQ8oLeCoHX6P9Q= -github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.27.10 h1:ry4D6RPuF6FrVDaFaKgwkLYV5BrJE/rt3m6K6FQYZqw= -github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.27.10/go.mod h1:0pzgHdeoNmeBekRPJl+DRXNJD6D9FqTcD+tFkK81NRg= -github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.24.10 h1:5XwvSPLjQleCuojVnUqMqYiD7UHfrc29GOZmpj/bB0c= -github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.24.10/go.mod h1:Xrzju78vcomnDMXDJ9T6qWk6wHJ6HgGUQ4Fh9reSFL0= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.28.10 h1:wGqe+j9Ab0kSbrSTI0AlLbd1xMp8vj916/pAAe2F48I= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.28.10/go.mod h1:QqiGYjaeD3O+DGHeij4FZgMEW+pzqJUCQBbcseLQeJU= -github.com/aws/aws-sdk-go-v2/service/appstream v1.34.10 h1:JhIT4EGxtjpmIC9l9iLWGw0j8FZezbsqADa6XwkD870= -github.com/aws/aws-sdk-go-v2/service/appstream v1.34.10/go.mod h1:stS2ZSwmXOl+IeWEQWFyo8++JVSuKwuJpMF+EJTNLco= -github.com/aws/aws-sdk-go-v2/service/athena v1.41.2 h1:PiTlzkJrnYGHucoQQ8zDvgf/vKDgDps2FVr3GIWIWdE= -github.com/aws/aws-sdk-go-v2/service/athena v1.41.2/go.mod h1:XCkSMZRqquO7222ELibKBj+bDjg9QeS2wkVKcW7z2Mk= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.33.0 h1:vrvI5gUkDC9s2ogMPTgpLaAca3V49TMi5JkopstiOkA= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.33.0/go.mod h1:w6hDogXBS5N3C/OsuPFbmjzBH5B/MHnZkAsO5aerB6k= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.11 h1:n0OjQw2HMbBr1g2M3XzzNTV8srYSoLkYgV48jiLkqbQ= -github.com/aws/aws-sdk-go-v2/service/autoscaling v1.40.11/go.mod h1:qaQkZEptpHa0HhooCCONUjxvYbkgHtDuG/cCDvJt6UE= -github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.11 h1:EphDT9zNEntQAikIWgSm368R6CP403jtG+f7k9xrtLk= -github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.20.11/go.mod h1:9nh1OAv8xttmIE3AJ2hWAROcSdsrPMyHE+4tLW7BO90= -github.com/aws/aws-sdk-go-v2/service/batch v1.38.1 h1:AJUFYzHn6B6vYa3/MHZkdoAx+0QExCKXiO7YQSIsMN0= -github.com/aws/aws-sdk-go-v2/service/batch v1.38.1/go.mod h1:3EYTC8QgdDTgwytlDYvWUvSTgmyQ/4V5rCJlma5ZTvk= -github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.3.10 h1:oBaVBnBvkDh/7gNz7Fs6EbrVdMMfnysCoach9u9B0zQ= -github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.3.10/go.mod h1:ukyl81iTQhkgiZydbzFTdh6ddHza0HQO/vffH37X5GQ= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.8.8 h1:xYOVGI6TC1gfli10NShlRsd80pe1Fp/t+LXJNIshihI= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.8.8/go.mod h1:jlgZZlnucnhTwwkt/MLIYT9GRq+hgjkkaLNwWaqp7lk= -github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.12.2 h1:5mpsZ7TDvTw1TpT7DnSQTUDMluVPZdccKzhXGThQdho= -github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.12.2/go.mod h1:sR0KPW2UZmFP1A9xAIO9lQIwh/uzmGy9hTenzuyems0= -github.com/aws/aws-sdk-go-v2/service/budgets v1.23.6 h1:2NdUhw2XHwuT2sK1849T4FEl3dNB6mebOWYaQV/T++4= -github.com/aws/aws-sdk-go-v2/service/budgets v1.23.6/go.mod h1:X69Kb7PDBlJCYyAh1nUS5oEjLplyvIxxTOmEOXVZ7uI= -github.com/aws/aws-sdk-go-v2/service/chatbot v1.2.3 h1:MU/H6Bopqtfu7SOrVy1fZ/eZzX1gKKHqXIZAerKxomU= -github.com/aws/aws-sdk-go-v2/service/chatbot v1.2.3/go.mod h1:Nup6J+0ugC1ddxf04M4e+Tl8KStEJ2m8DIuwIaFwqBc= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.15.11 h1:QWZ/DP2bVhPMKHfeEI6sZ7QOxCXpW36HavOqKCo2wTg= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.15.11/go.mod h1:pQZUK8Lm31nCPFLsDnZUDvmRxw/GGLqF7GtZvEZPB3A= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.15.6 h1:u4fcjpNEk1X0K2x7BvmssdWgavB65KaeU+t3Qi3juUc= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.15.6/go.mod h1:Wh1ryEf52xU0QD97S9+IGGk8Rv8z0zNmMsXyu0ADTmM= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.12.6 h1:NxemArZLwYuKFSSbbD9tIci6qVvCQtJcEZc2jg/Nc08= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.12.6/go.mod h1:eNWgs4jaUQhfmFU6kail21dJ+zookZyxmQReFajmn7w= -github.com/aws/aws-sdk-go-v2/service/cloud9 v1.24.11 h1:tEWBfvLgInrnrNPIN1dHe5T4o6t7tPrh6wMGUaBA1S8= -github.com/aws/aws-sdk-go-v2/service/cloud9 v1.24.11/go.mod h1:HApCCrEvcY5kj+d0S/a7bjcn5XoD6JYKHGrReD+R4E4= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.18.10 h1:D8E8QEHZ/2yt7GEOdlsQMypCNYs6RoQLlV2UBDbBWV8= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.18.10/go.mod h1:lQc/tta6L/lJIOJEd+coKVFi5qum1oNe/8EXBNtK68I= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.51.3 h1:HlsyxSED4xEtAq7WsFh7oMuBg2OnK+Q2thz0MQR5uAY= -github.com/aws/aws-sdk-go-v2/service/cloudformation v1.51.3/go.mod h1:KiLdmslIONL5WXMrelwfAzisbZ5UckYT9FGtZJASKnk= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.36.6 h1:dYxK3oAOXbryNOs4qnWugEe6oWh50PWLPe/Y1CoJGzU= -github.com/aws/aws-sdk-go-v2/service/cloudfront v1.36.6/go.mod h1:tt1D2vhp2ZJbQ875VVxsXgx8z2OWaD4kgkSNqQd0EOc= -github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.4.10 h1:7aB96DEtCf33kX1i5zXE30UZNStVz6EuRk23e1gGfkY= -github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.4.10/go.mod h1:wt0o+YJBTQocmC/8rixGl9Ovddw5mfz0IghtpvS3sRw= -github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.21.10 h1:Olk2n5NKBCzkRCeQILoQ3B0QSBr46u0WvnjoohW8TXI= -github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.21.10/go.mod h1:x2vWbMhG6oBV4SZ51ew4X0Wm8dig5d4zM5Z9W8HOCEc= -github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.22.10 h1:x2Z2nDm6Egfu9/VIHRWsHj9aeQe/XAc2Ox3uId/4/HU= -github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.22.10/go.mod h1:kZHeNQxC4Kynj/C/FN9L6I7UloX3l0geJrx66diSNq0= -github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.40.2 h1:oUpoMnt8H30Th/P+goSYB57aaIMHgO0ri0Bs/zFDo30= -github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.40.2/go.mod h1:NlPpu+9PsQp311DfPxg6gvE0NW2E4xdVSWZmu6pv1dc= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.6 h1:UVjxYe8VGpwXYcmBcciBHlQrNssdEvntXCPWmnRR15U= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.38.6/go.mod h1:4V6VDA0kZavRn71+sLpVna75oobnlG+gwtnNcBwZhu4= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.35.7 h1:kG3A4w9GMub28Cn9k0M5c0F1wQLbTCHMvsb9FlUXGu0= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.35.7/go.mod h1:Ibm/16D/pKg0k9InRCkG6DATLfHGMRWJ0QVS06ppVjs= -github.com/aws/aws-sdk-go-v2/service/codeartifact v1.27.6 h1:7h/vvPE3FmutPx1hz9ZiUWbIutlBnYe9cpnOvapV++s= -github.com/aws/aws-sdk-go-v2/service/codeartifact v1.27.6/go.mod h1:QFlahZ+Y+RempIF6zMcl/G9/r026ERriqikzRZjDI10= -github.com/aws/aws-sdk-go-v2/service/codebuild v1.37.3 h1:M9D+qSdebooflTy5FZKjjc0ScIu4rY8wft4pProSOfo= -github.com/aws/aws-sdk-go-v2/service/codebuild v1.37.3/go.mod h1:oLXvRVcYUh9Jct6B4yBtsOrj2FECvBXQcTMnpHZrUl4= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.13.7 h1:1ENRDfamQrcHJLuSRBuNoiSjMpmGHMXY944F/XN4wII= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.13.7/go.mod h1:JWrdgdMeoK2tKjF4HJ6YX9zA+7Ibnrs3UDjhAjAUQSg= -github.com/aws/aws-sdk-go-v2/service/codecommit v1.22.10 h1:vlf+RZWguYZJzbC95Zoddg3elMg3ZmH8nSip9LF4TkY= -github.com/aws/aws-sdk-go-v2/service/codecommit v1.22.10/go.mod h1:jN+rcF5OPMwDpAJ/uK16MAUis/ByjN1YB/fmPISRZ3U= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.25.10 h1:Luq+/0wysA7vYfrgp+z6K1sbSMvAGsM8lyfD+Ps/q3k= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.25.10/go.mod h1:Z/PUeQGN2+03OeszXPaNB1VPyDcPeaYlqVfV/pfpt4s= -github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.20.10 h1:ct/fxqFdGYXzIlX0p8mD046Mq4P0w5gckhZ7agfdQ/w= -github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.20.10/go.mod h1:7vAwI4YVdWl8cB+bmtoxL6UaJ/hsK8L5YJwYYDlN4Vc= -github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.25.10 h1:Za3irwaGoBfCKXJqeB9NjBlVu60a9FPk8aGI7c8KlsA= -github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.25.10/go.mod h1:Z+hqC2q0pc/cjVHEN/OGEQOiyIHHlHqajV6agFDWxAA= -github.com/aws/aws-sdk-go-v2/service/codepipeline v1.28.0 h1:DPb5NN5t7oG01Dskb1qaURIAMA6GG7Y7OuVJDZZnLHI= -github.com/aws/aws-sdk-go-v2/service/codepipeline v1.28.0/go.mod h1:wiyjnfFARpwbUaFukzDE/vFlIsT+18D34fR1jfZhLTk= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.25.8 h1:J6ToNokSFf2TooLPCbu0gE8pxNm2eCx1KPeiPQttI/o= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.25.8/go.mod h1:AbHoQZ/Q3D7EuTv0s9G8Hq2MnLPuKh7CtTBZpQeZJOA= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.22.10 h1:FRNxZelyamjes/KzIx34Gf3MDJQhrQVRPDeXdl4Vmno= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.22.10/go.mod h1:5O6onn9kBfuiAmKoQFRlwzyLtGL7esOY785J2RtporE= -github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.23.13 h1:pp2Id7OxLkuBt/RwxTljUnrZI/0bGPwvew1qiqRK06k= -github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.23.13/go.mod h1:hYHhbLzJbPEqtn5AFIX3gxUAVxjZiIX/k0qkrtYPMAE= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.31.10 h1:V4TT4lZvrK/+FWiauEzKhzkmcOihlWhLI99ok6DC2s4= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.31.10/go.mod h1:NMZf+QBFmS1wKKZe2usxSi2AQ/CMqauSFAawT8bWb9g= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.34.7 h1:j2o7wDgqlk0o1kYnnJAmfvRA7ZB8CfQv4bUBFe/0vc0= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.34.7/go.mod h1:a/820gyN3ykj8uh/a+W6QZtq7shWsI/BeYHsRtoQyxw= -github.com/aws/aws-sdk-go-v2/service/configservice v1.46.11 h1:oEpDPoRfF7H8kuRT3LiQ7cfXVAgvTSIcxkxxTllNpvQ= -github.com/aws/aws-sdk-go-v2/service/configservice v1.46.11/go.mod h1:9iyVzn5BgTmy78KTlYJPMqP9ZPm6ripPx9DlM0f3PDY= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.17.6 h1:xyRQg7ofUyvUvKTcFIoIkZjDnPyk9attgie20xf1TvA= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.17.6/go.mod h1:bCdstM5DmKcnyJ4WtXtuZ2pGW5Ysgj+jQgjcwI8gyFk= -github.com/aws/aws-sdk-go-v2/service/controltower v1.14.3 h1:5TQoE8Jqa1faLxxF3JEjrLnogd7yuXg/OQh87145qPc= -github.com/aws/aws-sdk-go-v2/service/controltower v1.14.3/go.mod h1:SRnSiyiSHUoo57mdNF8NwLhakUGYbD47FVa5nOi3QM0= -github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.23.10 h1:2v7SRmVjQkKUz/+Iz1o4CydiXYZ3YRnIT2otTXTQzAs= -github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.23.10/go.mod h1:6z5YYMxC98CbbGVnl4ZTSkKQ8doQGgG+vrMxgjepdHw= -github.com/aws/aws-sdk-go-v2/service/costexplorer v1.38.6 h1:QmRZhtv8MJjzwBvtYcNygr2qEy3+efdW9VPNVBdRtyI= -github.com/aws/aws-sdk-go-v2/service/costexplorer v1.38.6/go.mod h1:Hw7bdrxR6Whnc1Gm/dL+3O47yvxv6fq691QDuYP4CRk= -github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.10 h1:Dureguz7Rt4oCM6wjJ+8wnHVxzaMxE1yXoyx0dl6L40= -github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.4.10/go.mod h1:xHvn/2S7UoUmuCmhOjFhxUJvVIYcFveju/wDRTJLUGo= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.10 h1:ihZtKZKPLNUFBzvRoZ0kXNdO3scdNwmLrZlNDCkYIj0= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.36.10/go.mod h1:/9TqI2Eb2CsFDP+NT6364fdmhy6/ENIRuOnyVInI8tI= -github.com/aws/aws-sdk-go-v2/service/datasync v1.38.4 h1:XPbgYirjL7lgxww/Giiz5+sxvR+PsrUi7hEd0G2O7Tk= -github.com/aws/aws-sdk-go-v2/service/datasync v1.38.4/go.mod h1:rKicbpvp17KIjesRGNiZTrbKVPkcUvmgsHro0kD2xxw= -github.com/aws/aws-sdk-go-v2/service/datazone v1.8.6 h1:+HLFrID7P2vMu4LDXQ3E5O0r2hlz6CpdSD1Lw/3EBpE= -github.com/aws/aws-sdk-go-v2/service/datazone v1.8.6/go.mod h1:qPKGqWEw4jbUCQbDg05JUtAct5X3N3tEfNF1JUPSqYY= -github.com/aws/aws-sdk-go-v2/service/dax v1.19.10 h1:0HyYc5poHunpMVyLao0aFUPx1T6S7OoD42T5/BanD6I= -github.com/aws/aws-sdk-go-v2/service/dax v1.19.10/go.mod h1:e28ilym+zzgzWaINgcaGR6xhZDk/JD6YzhNlOCLvYwg= -github.com/aws/aws-sdk-go-v2/service/devicefarm v1.22.10 h1:xMDzASghupXMJCyD08fHbGzT0lXYghMvbGGtNcgWc2o= -github.com/aws/aws-sdk-go-v2/service/devicefarm v1.22.10/go.mod h1:2dpVfQeot1pkyC3nlxLa/Re+Cj3+nBkyTmLV/QDQSkE= -github.com/aws/aws-sdk-go-v2/service/devopsguru v1.30.10 h1:uvlI0w0PGHmHMEjvEfUyBWpR9xdabJoPSjX1mps3Z9M= -github.com/aws/aws-sdk-go-v2/service/devopsguru v1.30.10/go.mod h1:jORIT/Q3NE4NFozKMvf5WUH0agl9oyB0w8nundUs5x0= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.24.10 h1:z3dYRIakCsFQtjjR7nUYSHnzBPnSPdUYH22xt3EFUtA= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.24.10/go.mod h1:/QLtpNRcVdFov0Lg8hwzryhLzdoLHS6pdsy9nT36oOo= -github.com/aws/aws-sdk-go-v2/service/dlm v1.24.10 h1:8ON2Utun4Q4FW2K6fI7EunVNiNipDQTZHd7VtwifGyw= -github.com/aws/aws-sdk-go-v2/service/dlm v1.24.10/go.mod h1:U24MUfNJt2URjXoFLu2NMPKPDgRUt7ZiAiYZ2jApx8Y= -github.com/aws/aws-sdk-go-v2/service/docdb v1.34.7 h1:1foSApaBUak26Y9xinJKRuf+On2wKQpfCdCeH7BIGpc= -github.com/aws/aws-sdk-go-v2/service/docdb v1.34.7/go.mod h1:2hCT2jx7fl7DyrY0oZjO3OOK7h+/SCvLUWnkU7zUm1A= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.9.9 h1:W4e41cUvIN/2f9sAhmDMdL5uqQo7V8nofT+TxdjjXhE= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.9.9/go.mod h1:xvM7Frdhg94+HGSNoOU3dj9s/YeB/e+AgUgG+E44wqc= -github.com/aws/aws-sdk-go-v2/service/drs v1.26.6 h1:MdrimlaasKFQNc5R4P7KPHs88oI/S8s/DqeW/46qkR4= -github.com/aws/aws-sdk-go-v2/service/drs v1.26.6/go.mod h1:SOC8l4nWwE5t4tvgiXQdPkcMye8creUQA/dOr68pWaY= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.8 h1:yOosUCdI/P+gfBd8uXk6lvZmrp7z2Xs8s1caIDP33lo= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.32.8/go.mod h1:4sYs0Krug9vn4cfDly4ExdbXJRqqZZBVDJNtBHGxCpQ= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.0 h1:kN8Jd9H1LD/zlZEaoLpHJjsaKQjzYA1TgzlCB12BCw8= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.164.0/go.mod h1:gYk1NtyvkH1SxPcndDtfro3lwbiE5t0tW4eRki5YnOQ= -github.com/aws/aws-sdk-go-v2/service/ecr v1.28.5 h1:dvvTFXpWSv9+8lTNPl1EPNZL6BCUV6MgVckEMvXaOgk= -github.com/aws/aws-sdk-go-v2/service/ecr v1.28.5/go.mod h1:Ogt6AOZ/sPBlJZpVFJgOK+jGGREuo8DMjNg+O/7gpjI= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.10 h1:dNXYTooy/H6NSIJ/zZqAVk/Ri4G4mqEWoz3btXhqI7E= -github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.23.10/go.mod h1:6JWi6AO/j/YgTOdu+XM2fRfoZTmferahXDwmravqSwQ= -github.com/aws/aws-sdk-go-v2/service/ecs v1.42.0 h1:FvAROmrc7vjL5I90Ag4JR0/7NRnXYU9OpPOLUSzxnos= -github.com/aws/aws-sdk-go-v2/service/ecs v1.42.0/go.mod h1:qxSuZNUGNmgr4Yt6rK2n8F9w7pWn5eOqo8C+NmF9rmg= -github.com/aws/aws-sdk-go-v2/service/eks v1.43.1 h1:RfpqqfRmDw4RMvNHmPesDBuMeaVDQhWgepAn6tP0aYI= -github.com/aws/aws-sdk-go-v2/service/eks v1.43.1/go.mod h1:oxKaTqwF6pHUbgA6/aOwVEZFK+Okv4tZMdb9m6AHjlg= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.38.8 h1:y8kZastREinFhp2jcLjh+TeDQY4WpQ5qlB55XoDOj5o= -github.com/aws/aws-sdk-go-v2/service/elasticache v1.38.8/go.mod h1:kg37oVoLxcdwfXXAsboA9cj6IfgFoc0PWwltp9xy/rY= -github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10 h1:6MoPaz2J4C47Gieucud6SFEqhX4yZ9+hKQZzZvLbSy8= -github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.23.10/go.mod h1:uW7bugGF+vIsQdE22S+akMpsB+eZsSjJ6Kv/1lKQT50= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3 h1:Avh8YS+sgb2OKRht0wdNwY8tqtsCzVrmc8dG8Wfy9LI= -github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.31.3/go.mod h1:HbtHaw/hnNPaiqcyYnheILVyn81wOZiX9n2gYF5tPmM= -github.com/aws/aws-sdk-go-v2/service/emr v1.39.11 h1:PLsio+PhcBMUVjRypTYnZUAZ3qPYVWKmIgp3B8ZZxRM= -github.com/aws/aws-sdk-go-v2/service/emr v1.39.11/go.mod h1:c4P6499AxhWdFqbnZ25WX77JfVEWFHWqWj9wITeFqlI= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.21.2 h1:kl5gXTCoi2dEUplPE+p+dpdD/BiOWsp1zKNfd3Onhn4= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.21.2/go.mod h1:Z2lS6azbbFQslXAH586gQoU2Lup1IviscRXROJMeL6k= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.31.5 h1:2Qpq1XOClfrQglKh5SgQMSGMD0KLII9pbAw8FRgK/Fs= -github.com/aws/aws-sdk-go-v2/service/eventbridge v1.31.5/go.mod h1:BNzkR8iCd5MUGeo3oMLx8wo+S4EtAsIX2XnAuSdBX/0= -github.com/aws/aws-sdk-go-v2/service/evidently v1.19.10 h1:kRXBNhlhmAihqmXWQD3WCzlq69G+4kaaymDjDSIWQMU= -github.com/aws/aws-sdk-go-v2/service/evidently v1.19.10/go.mod h1:xmn6CgBAvNyXpku7wbOV5BXF/tN/Q0pKF3n9P/Nf5QA= -github.com/aws/aws-sdk-go-v2/service/finspace v1.24.7 h1:dlGh182hZoJIFxlwNjRTUJUQkKvRLoUOiDyGkc6F7No= -github.com/aws/aws-sdk-go-v2/service/finspace v1.24.7/go.mod h1:XPu6lBGrnwZyH2qn5Twk1x8IVYzRWQvXzQx/uRChk+s= -github.com/aws/aws-sdk-go-v2/service/firehose v1.29.1 h1:EULt+Eb7La2to3yiwC/m3Sn2+qEjaFN7IOQxjFk2290= -github.com/aws/aws-sdk-go-v2/service/firehose v1.29.1/go.mod h1:ahhanMBeTZy6yRPzKVybiothdO77NvOCyZMpEMfj2ow= -github.com/aws/aws-sdk-go-v2/service/fis v1.24.8 h1:ajYYW5orv4QkEm9Hr2elpJ2OoTIlcLDa7q9nIEMgXGY= -github.com/aws/aws-sdk-go-v2/service/fis v1.24.8/go.mod h1:0GkfIF1n+BIh/xeWbpWoWlD+Mhk7haXQNH11G7BQTGM= -github.com/aws/aws-sdk-go-v2/service/fms v1.33.7 h1:SMf+LPFIiq1tfNo0rhV6YrlgnL7H6w7CSgMuJwqClEQ= -github.com/aws/aws-sdk-go-v2/service/fms v1.33.7/go.mod h1:NxMT3if6WnGIRRqEn74imFVzImksNcVl+NHKvlBvdT0= -github.com/aws/aws-sdk-go-v2/service/glacier v1.22.10 h1:E19vpAzC5QDng2IlfM6aNMBljv1kFx9O7iydbvMUk14= -github.com/aws/aws-sdk-go-v2/service/glacier v1.22.10/go.mod h1:14pqq/Xg2S/hlu9q67ePsGw0OB6SJppEqDJwxLEivvI= -github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.24.1 h1:bDVYY5tSzBnLAcdY/9nZd1gM4O+a8IVk2tUfcS0gJ1A= -github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.24.1/go.mod h1:ct31bulbJED7Z4Vdtr+Jtvt6bPRB5PdeH96NNm4wkOc= -github.com/aws/aws-sdk-go-v2/service/groundstation v1.27.6 h1:2GSPMCtOlEVwltVhqUT1x6CFKpFi/5D2yFhd/PqaSic= -github.com/aws/aws-sdk-go-v2/service/groundstation v1.27.6/go.mod h1:gFFqhE7646BA034Im+oTpkfnefC1AR/E4ZUTs/sV7lc= -github.com/aws/aws-sdk-go-v2/service/guardduty v1.43.0 h1:Jz/FJc/n27a9j1du1JxtBaMb/Wg/dSkWPbrfn2Y7CT4= -github.com/aws/aws-sdk-go-v2/service/guardduty v1.43.0/go.mod h1:tNfynl7aA5gEHA7yJZiEICHYMkITKSc0Z+vic+YpW0M= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.24.6 h1:AlmacWcocqb7vowwTlYtVR9AbYWW4vFExIoD7+kFR4g= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.24.6/go.mod h1:jp0Co1hHoXMEQTzyRICGBHvN8owh1QISx56d79dulFU= -github.com/aws/aws-sdk-go-v2/service/iam v1.32.6 h1:NRlKKQ/BPHPqsuN2Hy6v4WA8/bsRTP0j8/BFPBC5+SU= -github.com/aws/aws-sdk-go-v2/service/iam v1.32.6/go.mod h1:S+s7/UH0UIqRX4GyXvZihMJNR9nqlB0kxO4NKSFeRak= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.23.12 h1:UPOu53s56w1lIOKMaVfvOF4/4Ku3j5ZwKc9gWLkLUEM= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.23.12/go.mod h1:zx7M4pSjEGDxTwwREKVb0apz/2amwWoiewD+PztFvps= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.26.6 h1:uNhB5VBE/O72F3Z7sg86R6CytbceBm32gfjO6PXfILw= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.26.6/go.mod h1:h+gR0kPQnx2Tm5YPrYhb3W8ufqOTM/jlbHS/4WfUQgo= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2 h1:Ji0DY1xUsUr3I8cHps0G+XM3WWU16lP6yG8qu1GAZAs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.2/go.mod h1:5CsjAbs3NlGQyZNFACh+zztPDI7fU6eW9QsxjfnuBKg= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11 h1:4vt9Sspk59EZyHCAEMaktHKiq0C09noRTQorXD/qV+s= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.11/go.mod h1:5jHR79Tv+Ccq6rwYh+W7Nptmw++WiFafMfR42XhwNl8= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.10 h1:+ijk29Q2FlKCinEzG6GE3IcOyBsmPNUmFq/L82pSyhI= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.10/go.mod h1:D9WZXFWtJD76gmV2ZciWcY8BJBFdCblqdfF9OmkrwVU= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11 h1:o4T+fKxA3gTMcluBNZZXE9DNaMkJuUL1O3mffCUjoJo= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.11/go.mod h1:84oZdJ+VjuJKs9v1UTC9NaodRZRseOXCTgku+vQJWR8= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9 h1:TE2i0A9ErH1YfRSvXfCr2SQwfnqsoJT9nPQ9kj0lkxM= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.9/go.mod h1:9TzXX3MehQNGPwCZ3ka4CpwQsoAMWSF48/b+De9rfVM= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.14.6 h1:S+tywpOd723Gqg0xIg5QePGWKQ179kdj8yc0cI0ChI0= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.14.6/go.mod h1:xBJfeB8hPTEVyxGeBrZn9lO11UjFlC6yN8fm+LMuDl0= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.12.11 h1:EBpzcF6XrSgCUWvPSJBPxcRxgU0FbZya2KmHXnrXhOg= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.12.11/go.mod h1:EtWeluyFt+mbORnwybcy/0gmm3psrGPWUOZI4771q0A= -github.com/aws/aws-sdk-go-v2/service/kafka v1.33.2 h1:MP0DahXgJWKGv1/lFkWnO+Koj4fCVxe0Tcap6KlmpYw= -github.com/aws/aws-sdk-go-v2/service/kafka v1.33.2/go.mod h1:hxzW4JuArNI/W5i8scwr0BvYhJXhtntyMNSXnxJ4rcc= -github.com/aws/aws-sdk-go-v2/service/kendra v1.50.7 h1:aH+HH9kXs3AFj51H+NT4izexEDYULpoG4L+wKZ9SXAw= -github.com/aws/aws-sdk-go-v2/service/kendra v1.50.7/go.mod h1:VEnRGR182kFe23M6tA7B+3JN8bvtrDNkBLvlnTpKcbM= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.10.10 h1:aytBO6+Ex86UOstDfm4KxTD3sPFxdWcT9ImgbdPht4c= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.10.10/go.mod h1:p8edp/FOKMmGTWOSj4KWtum5Rgv9iE4p7cpdUoz0N+w= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.10 h1:lmp5qBDoJCLsPwKrYNe6zbHnNvW5jzz/xS+H0jkoSYg= -github.com/aws/aws-sdk-go-v2/service/kinesis v1.27.10/go.mod h1:CUWfw8B25XToRN7+sg092F9Ywjvz0PT4veHXBQ2KE0A= -github.com/aws/aws-sdk-go-v2/service/kms v1.32.3 h1:PtuDgLHjTq9JgykpX93EqGHlbNK0ju8xuDMcdD1Uo5I= -github.com/aws/aws-sdk-go-v2/service/kms v1.32.3/go.mod h1:uQiZ8PiSsPZuVC+hYKe/bSDZEhejdQW8GRemyUp0hio= -github.com/aws/aws-sdk-go-v2/service/lakeformation v1.33.3 h1:wieZjsYWmw330AVbgkIbTQXWacUmTZFrVKqnWBef7WU= -github.com/aws/aws-sdk-go-v2/service/lakeformation v1.33.3/go.mod h1:h7rs2zd6iDs8a9zjQ+JZ1hYBStUxUm+8jTNwpfSZY7E= -github.com/aws/aws-sdk-go-v2/service/lambda v1.54.6 h1:UMu5aeSubjM9geSuPCGOgBAZa0JvsXxJBFXmKgUuisM= -github.com/aws/aws-sdk-go-v2/service/lambda v1.54.6/go.mod h1:fWbFM4/v+IgUW+p4TooAXuhmiQyC5qxMV5gUqxDII2g= -github.com/aws/aws-sdk-go-v2/service/launchwizard v1.4.2 h1:OavF0RBMhcuArrkGSGnRsk7BDZAqg3BmDI6E7KgAcVs= -github.com/aws/aws-sdk-go-v2/service/launchwizard v1.4.2/go.mod h1:DIcTjNG5V6jZxpFWwYkG4/k0CbsqPNJlj5koUQnmu+g= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.43.10 h1:uX6vAyjLRTlvnrp+MdU2pJQ8EYMbv561PVRCh6QG++w= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.43.10/go.mod h1:F9+N41US+/MkvlC/NGxptK/MiUfKe1dweqsBl38ev/U= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.38.3 h1:YdA5QgoYa2wNblkWyZfPlLLYsAEKCwLfdMxpWu16wpM= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.38.3/go.mod h1:T0LiPG5vKHZ7DmOq4Cmw0Kku3tMkaR9AknskS2hUXvI= -github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.27.10 h1:A3MzGDmkAyV2jRVSCHmTjMsuiYrRjrKxQiHsVts1jas= -github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.27.10/go.mod h1:BuKoVF3AykN1SAsKigr+aL8UovHFQJs2M7vylsOj8xY= -github.com/aws/aws-sdk-go-v2/service/m2 v1.13.6 h1:NxZs0J3l2p+PY+lPjHFVeY08lmTrv1vHzSfLNWqMfJc= -github.com/aws/aws-sdk-go-v2/service/m2 v1.13.6/go.mod h1:h0ksPg7Jqgml26JZoUs87A2sqx5/gRLH3hrN7p3ww8g= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.28.10 h1:PF1Q1JOKpyMPAjhBBcxUxOXafaHMZkXjN2Su+yPSj2M= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.28.10/go.mod h1:I2QK9o927+sKJn0yNFn3L0GVnXyZWwguOTdOy69wqRY= -github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.53.7 h1:9GzfkCQV6VLgtCjQQc8Bhz2QJLyae9b3kNN6N9qYVwU= -github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.53.7/go.mod h1:jpXXeDQerb6Md4Yg3LxscyQrqOzL+h1xi5SizhXCY9w= -github.com/aws/aws-sdk-go-v2/service/medialive v1.52.6 h1:VUfwXW95Om8NRrNuPBY8+tUpv2pLeMHoHNds2dPoI9s= -github.com/aws/aws-sdk-go-v2/service/medialive v1.52.6/go.mod h1:+kfONJ/rwJ7Qxizw2VNciswVk19vpXg9ngsEpfARusg= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.30.11 h1:28cpMq1VSS+d1vVYtrXQDzeuz+/P+Dxj2n2c0BrkQ4A= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.30.11/go.mod h1:v3DYFGJr+U/7XqOVLA5IBHXBUoHksVjfCrCEHQg6Usg= -github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.11.6 h1:GXSUO5NtPTuM/YW8v3yVh8h+y0mmbYSBlrL76ybUEUQ= -github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.11.6/go.mod h1:pg6xc5VIhx1ViJIsiQetZqdBKYCNRnsl8kcaWuJptZs= -github.com/aws/aws-sdk-go-v2/service/mediastore v1.20.10 h1:iheOfN0czGrzE96ZtlF9RvFG4sSNfRercZCxENO+BKw= -github.com/aws/aws-sdk-go-v2/service/mediastore v1.20.10/go.mod h1:ivRaBAFCc5B2vHuHJKlYyC6dDk0Q2cZpGO45Mbl2UPc= -github.com/aws/aws-sdk-go-v2/service/mq v1.22.11 h1:dI5u7KbpjslchKz46vHkQlfYFfcVRXQ53tBp3qdlOH8= -github.com/aws/aws-sdk-go-v2/service/mq v1.22.11/go.mod h1:hQ/8Uo+sQySjHie+oGZxYaDMVsAJYYea7fDWtxOW25g= -github.com/aws/aws-sdk-go-v2/service/mwaa v1.27.4 h1:8smXN5gAGZKBjervH0VZiR/dpP9G2nOiSakKNL+A2xY= -github.com/aws/aws-sdk-go-v2/service/mwaa v1.27.4/go.mod h1:n5E3bv5OwgyzXa8wN4dBiQ9chq4427i8mIL0DOGQ08U= -github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.8.7 h1:gmQ5UpIRqclaYFHyh+nWlx5NITsvVLR5aOzU9JnVPhU= -github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.8.7/go.mod h1:2ZVLdyzUl10QKonLIE4j8hsu9rePk62iO1HW7ND7cIw= -github.com/aws/aws-sdk-go-v2/service/oam v1.11.6 h1:AWbX6Q0CThDhgn6MIm2XPCnw3uA00yFkOKzfkGjDvwI= -github.com/aws/aws-sdk-go-v2/service/oam v1.11.6/go.mod h1:zh+/YaGPYtYIsy83eib8QYUCLNmTTRNnPKSy++MoVx0= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.11.13 h1:eqytt4h4+NG5eSYjHy/gxQeTYmH6kyB2BiNOqVdLWIU= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.11.13/go.mod h1:9tkQ/yUzFFBjIM7IbMqpsESMwNZkfO9ZtlXXJVfC/h4= -github.com/aws/aws-sdk-go-v2/service/organizations v1.27.9 h1:KNXacqpLvkK4oAMqSNhG2ETQzrVK4mKETAeNeo+dWyk= -github.com/aws/aws-sdk-go-v2/service/organizations v1.27.9/go.mod h1:hcr6lPG6K2l0WiKyu2ag/JrHbiIOUMg3tdNPtpTe+PM= -github.com/aws/aws-sdk-go-v2/service/osis v1.10.0 h1:2unOPcW9Eh/gcOEWdIcU5rmpwTFtqZ0YLaHzApPwiTI= -github.com/aws/aws-sdk-go-v2/service/osis v1.10.0/go.mod h1:1H+iBuiqX8snPxlOViytBuIKDHY2y+ZHzJ/gIqb+JEs= -github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.10.6 h1:dAtMkOI1E/+uOf5Md0TV5DzaOhUWbVsajcbiyjXS8Ng= -github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.10.6/go.mod h1:AlYMkLQ4e0iExjXDf8TPosjt8fjsmYu/2nv9xs9MbDs= -github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.5.10 h1:cn/ly7rE/rpG4XW7GFxs970D+PglbESF9f8vI/oUC+M= -github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.5.10/go.mod h1:3TF4rQ87enheeIx4c/vSFoqQXdoI9eOr6RDPvW0x0mM= -github.com/aws/aws-sdk-go-v2/service/pipes v1.12.1 h1:srOxtOzUntiGKtVCahEL13JYuBHGvuDlBJcHI81dgZA= -github.com/aws/aws-sdk-go-v2/service/pipes v1.12.1/go.mod h1:JHoN0tGkx3ZIYwOvF3WtepKYAMmSfC5o61ijIk6YuKo= -github.com/aws/aws-sdk-go-v2/service/polly v1.40.5 h1:nJowt8m2IcbcLkQnghrnro33nCBaPvGUOxzp2XrGbvE= -github.com/aws/aws-sdk-go-v2/service/polly v1.40.5/go.mod h1:NlZSQx5MgRlTRxuTB1UklQbkXSX/Rjk+nEJR2ClTjrM= -github.com/aws/aws-sdk-go-v2/service/pricing v1.28.7 h1:OimFd9B78+2BO35rJpIni3zEt3xXp+l/YuLwgULMNVE= -github.com/aws/aws-sdk-go-v2/service/pricing v1.28.7/go.mod h1:kdbauXuTWNaItPgeKT1uycVDvVlfD9FAEmKjAmAEiWM= -github.com/aws/aws-sdk-go-v2/service/qbusiness v1.6.6 h1:QtIdssfJjPLUGSc1UEl99uDbtW3WvP+bJ3ZmZPtGS6c= -github.com/aws/aws-sdk-go-v2/service/qbusiness v1.6.6/go.mod h1:J5k2cOgnRLFlQOX4Z0bBWhG8nb45vwMemPjegzvTdto= -github.com/aws/aws-sdk-go-v2/service/qldb v1.21.10 h1:JiA51DS5fOSXCbkaVurMcNAHSXTicEWlpy/343xdp1g= -github.com/aws/aws-sdk-go-v2/service/qldb v1.21.10/go.mod h1:KjLu3xgMrrGMgEpMvft7A0zPTn0EXVA5ys7KiF9/E44= -github.com/aws/aws-sdk-go-v2/service/ram v1.25.10 h1:eTSTspyVeFIjVvKEkhrF8xlTkcv2xRVih8H0ZL/wIGU= -github.com/aws/aws-sdk-go-v2/service/ram v1.25.10/go.mod h1:u82AB4OuZSlMIADLmySpervL3v6El3RYqSh3vjjOa2g= -github.com/aws/aws-sdk-go-v2/service/rbin v1.16.10 h1:4CSjB4CbP+WvGn9ow2ZyBSQ/JDpp2RmKkO2wpiFrBno= -github.com/aws/aws-sdk-go-v2/service/rbin v1.16.10/go.mod h1:PQuHOX24ueFRaxXKMVl+tAsbSPd/Ue5VhF4ispb5zdc= -github.com/aws/aws-sdk-go-v2/service/rds v1.79.6 h1:NX0OiCFYFc/p1Ufimr+kJkXCCFZe9FnUoQmG5mMrYfg= -github.com/aws/aws-sdk-go-v2/service/rds v1.79.6/go.mod h1:fZ+i+g1q3unIVP0qfYYyJd80W8aiyQJ6Wsij/HFj9W0= -github.com/aws/aws-sdk-go-v2/service/redshift v1.44.7 h1:sXKgb/ks0eeSHH2arXiUgMGIew7ka8fhplTLJx1Df48= -github.com/aws/aws-sdk-go-v2/service/redshift v1.44.7/go.mod h1:DNoffDrn/ZewuTyFUolU33+1w6vOieC8mhzF2Yi46PY= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.25.10 h1:FZiVA6SGDCxNUjoJ/CizSudFScdPvPQNbLtPgmrlUUk= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.25.10/go.mod h1:jnDZbfq7zPFvAnigSNc6iaOQ2TTAnzzQdNJQgHvg29s= -github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.18.8 h1:NtKuvfbHtkVQi/NDtGiDxmv6rL2ZcA93NXARmHkudDU= -github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.18.8/go.mod h1:lGVq1ZIzcwgjxeXlfXkY3DNC582SqYLwTjXZ1KGmAkM= -github.com/aws/aws-sdk-go-v2/service/rekognition v1.40.6 h1:v/UrTB1CHz+CXXPpE0jjGkgHT1sbpsEKs7/XsmrVa4k= -github.com/aws/aws-sdk-go-v2/service/rekognition v1.40.6/go.mod h1:AzDdeMyTSSSlZ+VO1S788x9x3lJVmVdqYlZxZ3rmi2U= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.10.11 h1:Ejmh88QYLOxgyh+kzoQUbLNyUbD4P7SLWmQ8Jx7qmmE= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.10.11/go.mod h1:+iMxqfKvnJVrbiHxDGyf47c7FI8TDukqjoMsLqoLrRw= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.22.6 h1:+oQIusl/699jbxbWeSI9fQ5ACZUxH6eeKxiXHtHjztQ= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.22.6/go.mod h1:hECEgQ2nBryyGTtts2k1m6MUjbaFJpoUd1wmNXpkEaY= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.21.10 h1:Lz66AebKV//iN8kelcsBe0fQekLmCkIzZSq/Yr/S+C4= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.21.10/go.mod h1:6u29rN3TBB89EOtTnEsjywjOmjA4nmUV8elhfLwinaw= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.11.6 h1:CnOWQ/6BKnBPcVTb9P7p6SsbFHsUvJJ2UbcQnZuIG+c= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.11.6/go.mod h1:urcaaPlew4LHXM66eEZeaWJBhCjKWzOCAmE2XInleA8= -github.com/aws/aws-sdk-go-v2/service/route53 v1.40.10 h1:J9uHribwEgHmesH5r0enxsZYyiGBWd2AaExSW2SydqE= -github.com/aws/aws-sdk-go-v2/service/route53 v1.40.10/go.mod h1:tdzmlLwRjsHJjd4XXoSSnubCkVdRa39y4jCp4RACMkY= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.23.10 h1:R56F/k3CQZHwrd3kHQ65Y91KHPBITruyPSX5/JGYe9E= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.23.10/go.mod h1:W3+eDMk01Na1U3aQfwGkkEP1Yfe6WUn8hXzyInvGlcU= -github.com/aws/aws-sdk-go-v2/service/route53profiles v1.0.7 h1:32/NRAG4ka8/hwr1k9ZA2xwarcJeWO6djaIFJ42tuFg= -github.com/aws/aws-sdk-go-v2/service/route53profiles v1.0.7/go.mod h1:H9RRL0qQ+s+XlaZO5s5G3Z8cVZpKEoj313hOyglUwj0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1 h1:UAxBuh0/8sFJk1qOkvOKewP5sWeWaTPDknbQz0ZkDm0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.55.1/go.mod h1:hWjsYGjVuqCgfoveVcVFPXIWgz0aByzwaxKlN1StKcM= -github.com/aws/aws-sdk-go-v2/service/s3control v1.44.13 h1:HhsZlX5gsL/KfEyHyBO5H0ewgmXoiBpjDPAZ3Ggrj8g= -github.com/aws/aws-sdk-go-v2/service/s3control v1.44.13/go.mod h1:4fXOTqROQgQ4Y6JP0G/vjF//YfG5oHxAwI2TPbgEblU= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.8.10 h1:tXVgXdk69TNCERB3gQofwGWIKBOSQYXLyhpRaiEmk/g= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.8.10/go.mod h1:+1zSuvpsye9jvBPYLg++LFV9wKaLZuKRpciXnJcRqkQ= -github.com/aws/aws-sdk-go-v2/service/schemas v1.24.10 h1:WxqJ2K51dsWHdwYUdi1oTqarDFcUOJUwcQOSTEEjQ8k= -github.com/aws/aws-sdk-go-v2/service/schemas v1.24.10/go.mod h1:Mr4cAhSy1m0p+AVxfTNmzPgkFo/Go8Pm2eIIJ9MlEMs= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.30.0 h1:nqR1mkoDntCpOwdlEfa2pZLiwvQeF4Mi56WzOTyuF/s= -github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.30.0/go.mod h1:M9TqBwpQ7AC6zu1Yji7vijRliqir7hxjuRcnxIk7jCc= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.49.2 h1:ybKzmQRXvLkQ9rb251QPmaC5ZlCK1g8b1MLq7DD5eaE= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.49.2/go.mod h1:6SQ5lQJXJZ4HL8ewgW7kp68UkqQtUE/3UmEvDLpJxKk= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.14.0 h1:VQuxwHBq5iKVKbsPdrfKWijA9V3vUmtTOeuqNiL6IkU= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.14.0/go.mod h1:R23fuxDRRYRzUYthyjMLC+j5J3FOdt8vEruXVmzieEc= -github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.26.10 h1:YmcqlNM/+On+uz1U8mO67xmCBpIDBunL/Jcvxh5HjnQ= -github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.26.10/go.mod h1:qNYkunnIvN0ttbrpYRRZnv2TYUEcAlQmhKXkvT46Rrs= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.12 h1:KW0wxLufq7ngz1ofsZcjqSDoJZo3mBsNIwtrQVB7Z/Y= -github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.29.12/go.mod h1:pMCoQoF+2NgjwpSf4vM+QHp23hnQCje0n5LxrnmMH4Q= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.10 h1:B4VK4LEI/L5dtYq2Omzt4XQ9WwtZX7I+YwmkhcDdEV8= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.10/go.mod h1:jAMj6BiwJo5rCrR97LdKlo1M494krOfnPJCS6X7etcU= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.30.0 h1:9K/f5C/JsiamFFof/E4kKo7DpkZ1z5sa98hI7XHV3P0= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.30.0/go.mod h1:FAFzNrXuMkCLLVL89dpjJq2yJFbgFkyJC98jSgVHsso= -github.com/aws/aws-sdk-go-v2/service/shield v1.25.10 h1:QTc2t3diE1+J1ESShBMZZetQQCJpr4DC6qGsJfpWrrQ= -github.com/aws/aws-sdk-go-v2/service/shield v1.25.10/go.mod h1:pQgQYgpvef5P1jqHjB5+q/ss21ndQ3QtcVbfzNk/GrU= -github.com/aws/aws-sdk-go-v2/service/signer v1.22.13 h1:c3VQdGTewW+OJq0iw/P5rnFpfio+Dy0u9ulPdc+QW5k= -github.com/aws/aws-sdk-go-v2/service/signer v1.22.13/go.mod h1:tm0X1UQcNg0XaT1wRSR+TJXdgTL6SMu7ZNb9EDkqXjA= -github.com/aws/aws-sdk-go-v2/service/sns v1.29.11 h1:cZN4fMAERLi1Q4ZklHj1ru0oFSQ5Dacad0cY26gu/Fc= -github.com/aws/aws-sdk-go-v2/service/sns v1.29.11/go.mod h1:au0J6BWDeQfeyItMkuqT6fhhyZ3cVARGC9FVEDaz+Fk= -github.com/aws/aws-sdk-go-v2/service/sqs v1.32.6 h1:FrGnU+Ggf+jUFj1O7Pdw5hCk42dmyO9TOTCVL7mDISk= -github.com/aws/aws-sdk-go-v2/service/sqs v1.32.6/go.mod h1:2Ef3ZgVWL7lyz5YZf854YkMboK6qF1NbG/0hc9StZsg= -github.com/aws/aws-sdk-go-v2/service/ssm v1.50.6 h1:E+gbKlOadAI0qV+8uh0JnYmkRJi7k7XvMXcKso0Inyc= -github.com/aws/aws-sdk-go-v2/service/ssm v1.50.6/go.mod h1:vR37XXoCLx2fzr/fUaTQoQ6ZlBK8Ua6VLnxLfxN6vLY= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.22.10 h1:JIw0378UWnueUdaZhOv8MO1zZ6ReIQXpYqSv01TDvio= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.22.10/go.mod h1:JDRWRN6hxzkF/XDtGSmLUYRP88SkdHBr6LFW1/yZiXI= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.30.10 h1:MizOPvyKVTN07X9x2dpd/bpvjEuPUj8NyOD4Njp4T7c= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.30.10/go.mod h1:jgVY27QLdMMdFV+ZlvVmVWiu5HsjnjZO5Hjaqh0soLU= -github.com/aws/aws-sdk-go-v2/service/ssmsap v1.13.5 h1:kiveZFwK8mqJrkaMorymQp6J6l3s/pY5n/i6tabYz3Y= -github.com/aws/aws-sdk-go-v2/service/ssmsap v1.13.5/go.mod h1:auz/mQcCWc6ijosjuNXKw1JItUvqj+ERG1iHwJfHcvE= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.11 h1:gEYM2GSpr4YNWc6hCd5nod4+d4kd9vWIAWrmGuLdlMw= -github.com/aws/aws-sdk-go-v2/service/sso v1.20.11/go.mod h1:gVvwPdPNYehHSP9Rs7q27U1EU+3Or2ZpXvzAYJNh63w= -github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.25.11 h1:9R4+nCSXYw+Ea10gD/uDPLEy7jV/m3i7tTN0x4cYPDg= -github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.25.11/go.mod h1:yXOGN/jjKLKLkWjZSKRWrnRAdw+6qWXF7bYXL/fB/d4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5 h1:iXjh3uaH3vsVcnyZX7MqCoCfcyxIrVE9iOQruRaWPrQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.24.5/go.mod h1:5ZXesEuy/QcO0WUnt+4sDkxhdXRHTu2yG0uCSH8B6os= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.12 h1:M/1u4HBpwLuMtjlxuI2y6HoVLzF5e2mfxHCg7ZVMYmk= -github.com/aws/aws-sdk-go-v2/service/sts v1.28.12/go.mod h1:kcfd+eTdEi/40FIbLq4Hif3XMXnl5b/+t/KTfLt9xIk= -github.com/aws/aws-sdk-go-v2/service/swf v1.23.2 h1:/EudBRyXqjvogP4JLFb31Jt8rz4YYy1UgW9KzKR+1xo= -github.com/aws/aws-sdk-go-v2/service/swf v1.23.2/go.mod h1:z92PP2/Cnis08+F2SlpnLT2kpJPpBQcWQ6aNGyGRvQg= -github.com/aws/aws-sdk-go-v2/service/synthetics v1.24.10 h1:PMQAcJQH/84Qma/LKvv4bvg0cdJmkcg4t433HZvV+BE= -github.com/aws/aws-sdk-go-v2/service/synthetics v1.24.10/go.mod h1:ecCYcAmgR/HOcRLfvMsUnSvNiI2rIpwCdoEkxl9tDo8= -github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.0.8 h1:dPKuHz5E8aOZHOt/2l5E9p4kX7WeEw93yKsgZxBvMg4= -github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.0.8/go.mod h1:ZCewKIHsDadZ9jgcCJYtvdRfH2CEMRxRXLPFobkEQec= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.25.11 h1:xdW8/PT8R5Qx/IjkAdMvZomjjOdIWlqsFMCH6mqgjsQ= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.25.11/go.mod h1:QfuI1DCBSBqbqc7hxOB0glVXBJE8NLX81hr9cc9yirQ= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.37.6 h1:KpnJG4jr1OhjkNnRklDEolRJr1CuFFeJBgKoAIXlhYE= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.37.6/go.mod h1:NMzEA79tY7NAOXO+fHz57LaOkK7WylnjJpQxmlhgoUc= -github.com/aws/aws-sdk-go-v2/service/transfer v1.48.3 h1:imZ9ImrvPCMGIMtRTLVBO6+mxGNcXw8Mi5WupIEwB9M= -github.com/aws/aws-sdk-go-v2/service/transfer v1.48.3/go.mod h1:RBiHBLIFC7Sye7F6EW16swUjnsETkgjHLBLbEo6lZAM= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.15.0 h1:mUdYHBcfWGNclMsAKSMjCmEgR95z4wzj21JH6bh3f9c= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.15.0/go.mod h1:CI0PDiO2lZqVoaSOLWmmAzDPSixUTzUSqEnlZUdhWq8= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.8.6 h1:Mn3zWbtu2877a9ONYd3WNRY43NentIgSHNUNnFs9vuQ= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.8.6/go.mod h1:0iCBzvgSjFsiQfmgRPHBK+8iZdH9mXVAG7EcfyrX4ZM= -github.com/aws/aws-sdk-go-v2/service/waf v1.20.10 h1:nN1bcxknc8kFrI+YSupMiRCmrzjKfMIucvMtKhLbWFo= -github.com/aws/aws-sdk-go-v2/service/waf v1.20.10/go.mod h1:hriMVzhWjoXy3+71A8Q/T+lGprjWLCH3IgEvcuDIvOM= -github.com/aws/aws-sdk-go-v2/service/wafregional v1.21.10 h1:HiZrToGiVRP1nzh0nTS3cQH1N6o04MrHb8nwsLNuVX8= -github.com/aws/aws-sdk-go-v2/service/wafregional v1.21.10/go.mod h1:+Rlg1RQVNbUbslQRkTSPk1QjGTPx7MCSnaEpN+VZrIY= -github.com/aws/aws-sdk-go-v2/service/wafv2 v1.49.3 h1:wnhDyatF0gn17s098Vd+/aHmgNvk3N7sknESF++wMck= -github.com/aws/aws-sdk-go-v2/service/wafv2 v1.49.3/go.mod h1:4U73NhYe9Eyz81zJgFKyho6Rmw1ZpIYnwhsdlx65mqI= -github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.30.6 h1:8W0gNavRGoSn2kolXQb/wr8MG9D7QrBAg/yjlTkmy04= -github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.30.6/go.mod h1:1P1kcHgiFKRuFfXGUck9vNaMCEmIeigbsBjb86UN2eg= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.39.6 h1:V4AQVudNs3PjsrXiDAX6HITaTLpo9W1r5yuUgzMONis= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.39.6/go.mod h1:BZlMv5EkPEBRCrHxTM6dH8nohuwIQaEHGHcI76a4pjs= -github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.18.6 h1:0IyUHkXxEeIVXWVtPB0+vQMM5sxBOWdPIoqCKwaGiG8= -github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.18.6/go.mod h1:utmfTQCJk0fAsiKFJ0FrGTJXFqyZoj5ZHm9FWT8Nf/0= -github.com/aws/aws-sdk-go-v2/service/xray v1.25.10 h1:EaxobHo3hQaj8HaGTdJwM8KRkAspfUQTthTeEXL6THA= -github.com/aws/aws-sdk-go-v2/service/xray v1.25.10/go.mod h1:doojKT3qF2pa1UDEuazJtGxdm2/Og9s9irewwJ+rpXU= -github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q= -github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.32.3 h1:1X7ZNHsaDGwjZcNev1rbwr+NxV/wNbvj/Iw7ibFhD5Q= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.32.3/go.mod h1:0NHJUsvqVpWtSg9rROCJ1AxLmDCHJTdYEhcSs6Oto9I= +github.com/aws/aws-sdk-go-v2/service/account v1.19.3 h1:w/ZZ69+nzIYoussDQvIqyezI6iKGAjiHnVWmG+8Qs1I= +github.com/aws/aws-sdk-go-v2/service/account v1.19.3/go.mod h1:s7hT4ZWjp8GoSr0z8d5ZsJ8k+C2g4AsknLtmQaJgp0c= +github.com/aws/aws-sdk-go-v2/service/acm v1.28.4 h1:wiW1Y6/1lysA0eJZRq0I53YYKuV9MNAzL15z2eZRlEE= +github.com/aws/aws-sdk-go-v2/service/acm v1.28.4/go.mod h1:bzjymHHRhexkSMIvUHMpKydo9U82bmqQ5ru0IzYM8m8= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0 h1:CCaeK/FqBo/fmhSSqY0K8buep/ELBDEWc8IoOjf2piM= +github.com/aws/aws-sdk-go-v2/service/acmpca v1.34.0/go.mod h1:vDUysl9ROGF6GAsl1OgTg6xHDnw391hCc5+IYg2U/GQ= +github.com/aws/aws-sdk-go-v2/service/amp v1.27.3 h1:o1cMErMp45oKZ2ScvBOdVXYhvu6FdUcz0Xn+JpDd408= +github.com/aws/aws-sdk-go-v2/service/amp v1.27.3/go.mod h1:TuSBSV1IedYHHrC4A3bW84WjQXNSzc6XasgvuDRDb4E= +github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3 h1:Plmg9O8/Pt4SKvPtUfSqCfv+SSSllouzlISFcvHK4bM= +github.com/aws/aws-sdk-go-v2/service/amplify v1.23.3/go.mod h1:aP9g/m4SSSWUU+htIGXJIY8qy+pGydwr3gpt3OcjBJE= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.3 h1:vAtlXN1IZ+2etHppbmgbPw0ADNVRXS0Dfff/mPRLC3Y= +github.com/aws/aws-sdk-go-v2/service/apigateway v1.25.3/go.mod h1:jmTl7BrsxCEUl4HwtL9tCDVfmSmCwatcUQA7QXgtT34= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.3 h1:g99B1JOPkygjlDAjsD0xhvWifAs25Xw9SJ9WwC9Rn20= +github.com/aws/aws-sdk-go-v2/service/apigatewayv2 v1.22.3/go.mod h1:IN1OJRdB0VVSXsx1wlEfaDPpuXwSPkAVjhj7R5iSKsU= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.31.3 h1:nQVKaNJ8VrSKJpGQgzb+HVlrd8ehMuqYXF3Em+UK3P8= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.31.3/go.mod h1:tRgqJ4QiuXQeZ0QNDF6jdr+ImyXz5J4ystLtgUxPsD8= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.9.3 h1:Vz7if7/byANRrsN9Z0VQm1ZUff5iep5uZN16F7Z2A6c= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.9.3/go.mod h1:0ner/kMBkm2TcxNEtLglUXTZ+UDXuXnqDE+9FKUxOtM= +github.com/aws/aws-sdk-go-v2/service/appflow v1.43.3 h1:zD7Md/MQTUfa83LgDmaKGHTLs3+mpl3LncfS5wUypSI= +github.com/aws/aws-sdk-go-v2/service/appflow v1.43.3/go.mod h1:2b2pJQjTVLfBIzXs9TphXy1zJyRvNp34kbBgrnz4ByI= +github.com/aws/aws-sdk-go-v2/service/appintegrations v1.27.3 h1:joEF6jGgq/6aHp4MEHidJyOfqWrwqJpjxHYfcocjNsU= +github.com/aws/aws-sdk-go-v2/service/appintegrations v1.27.3/go.mod h1:7q06vKzUfBAZZrIfii6V8KC/+PmVNzNL2opqo9ivUMk= +github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.30.4 h1:qPr8FF0Jo4AIBcxb8gFmYcOW/zlsQX4iv8WkOGDm/F8= +github.com/aws/aws-sdk-go-v2/service/applicationautoscaling v1.30.4/go.mod h1:gNFF1rFmR0dVaBfehDuil+nuTqwzdJexrcvKaDY2JU8= +github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.26.3 h1:G7hP9np1L0ykj02CFQgkqdZERUmHCXdw8WmR5pW2pHM= +github.com/aws/aws-sdk-go-v2/service/applicationinsights v1.26.3/go.mod h1:NU+zX7v6CGH1X2Lz+lg3EqDjdqOgiCe2MjtobaToi6o= +github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.2.3 h1:TzO+pIk4UFmMTrHRsrqyOO3qUBxV4EYyEOFYjN1I7aI= +github.com/aws/aws-sdk-go-v2/service/applicationsignals v1.2.3/go.mod h1:xN0wvFa9G1ENYN0RbajUQ8VN3LMzyL3rcu2yP08cSMs= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.30.3 h1:x6wptcqKbH2eQw7v43MI25ILW3OtIyYwZ9gifEM0DW8= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.30.3/go.mod h1:buTv8bJjlKxqALyK7/2G1206H/YYllu0R/F9Hz0rhv4= +github.com/aws/aws-sdk-go-v2/service/appstream v1.36.3 h1:msS6jU0f3kTgLfUQk7JxazMbfwG5/RbsOwiwXDBO9IU= +github.com/aws/aws-sdk-go-v2/service/appstream v1.36.3/go.mod h1:zgB9SASIAI0KWFuUSlo9pGC37f6DDjh1ZJfZEhQcPhU= +github.com/aws/aws-sdk-go-v2/service/appsync v1.34.3 h1:th1DsTjU1sw61RM9rW5g5c61QP1awuWt+zGBYFSIgb0= +github.com/aws/aws-sdk-go-v2/service/appsync v1.34.3/go.mod h1:1BIEiY+76rNP8PEcv/Iyt7ybml38JqitIbrHfMDEYb8= +github.com/aws/aws-sdk-go-v2/service/athena v1.44.3 h1:T2tJUqFEs8+2944NHspI3dRFELzKH4HfPXdrrIy18WA= +github.com/aws/aws-sdk-go-v2/service/athena v1.44.3/go.mod h1:Vn+X6oPpEMNBFAlGGHHNiNc+Tk10F3dPYLbtbED7fIE= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.35.3 h1:bUG4DuAXPb0inqsuG/kugMUwsJxxc2l7Sw2+jR+lvmI= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.35.3/go.mod h1:2uO8WcgMPuckIGMQd4HpDsUFhE8G6t3MkMNnrqREnl0= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.43.3 h1:y4kBd6IXizNoJ1QnVa1kFFmonxnv6mm6z+q7z0Jkdhg= +github.com/aws/aws-sdk-go-v2/service/autoscaling v1.43.3/go.mod h1:j2WsKJ/NQS+y8JUgpv+BBzyzddNZP2SG60fB5aQBZaA= +github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.22.3 h1:DnZw/gxHCBnqOVi/ML/E3QFYVF3/lIV/j8FhyTS7JWo= +github.com/aws/aws-sdk-go-v2/service/autoscalingplans v1.22.3/go.mod h1:yda4Po8TZKCPFw7B3f4KfoRV308C+6sriJmuuTRSvlU= +github.com/aws/aws-sdk-go-v2/service/backup v1.36.3 h1:8yBWFpIBlL8uOHKFgWykiRnku2wQVQP+hF91/FKFdnc= +github.com/aws/aws-sdk-go-v2/service/backup v1.36.3/go.mod h1:HLROV+NOBQ/hGMGc72X65qRctcEIKvaf6k7PekTLw+k= +github.com/aws/aws-sdk-go-v2/service/batch v1.43.0 h1:LQDwHqwORPQC1cP8iF+gaEbw6gFNVQ88m8qa66ou8d0= +github.com/aws/aws-sdk-go-v2/service/batch v1.43.0/go.mod h1:gzEWhQvhwjniRJbCksLNPR6//8dmfRHJGJMfFcNqOdk= +github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.5.3 h1:SUgFOQbtQNPqjvN68d8esf9qHWqh45wTZ7205wOz7oo= +github.com/aws/aws-sdk-go-v2/service/bcmdataexports v1.5.3/go.mod h1:KS4Up5owaEKw+EUTveQsSf9zsaUiJCSdoxZW1M8dbuE= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.12.0 h1:Ie1I5DsX0N5cQlJw+XwK8x/nZuca9MK7V/3FjumxSNc= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.12.0/go.mod h1:KP4dFAvbA6N2iUkDj61pqd140QyfceyK69PeKPD6860= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.16.0 h1:9DpqAvqAPGhJ4bnqJX8WiDJZUDdmRlotYoh95K8NgVc= +github.com/aws/aws-sdk-go-v2/service/bedrockagent v1.16.0/go.mod h1:RhcOKxIQHAqPTPIEUtEMG9eMnIRruBMY6+cmx4Mh8Dg= +github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3 h1:BfuKcgSyNTzS2N57JSM4uQ/dq1Qw8TQkoOoVvsFXoCw= +github.com/aws/aws-sdk-go-v2/service/budgets v1.25.3/go.mod h1:QJ119U4g137qbYZRXqFxtvyARMT88athXWt9gYcRBjM= +github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3 h1:BFVoEcC9czVq0/KHdNheLtPUGjBvu133EfgIF0hO3SI= +github.com/aws/aws-sdk-go-v2/service/chatbot v1.4.3/go.mod h1:9jB/CYDhmh+LPD3iRNnu4Zj+9A3AMoBQkxPp1j8reSs= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3 h1:NY/98Ry+J3xzQXaH9uy8KXya6JiOnoXjFqGLL7aKHLw= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.18.3/go.mod h1:AC5wH108q+kaTSjuQoKoKCH4fxGKoteUMRPb0wLYzGI= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.17.3 h1:e8mAmTy94SOhD/KdTRpocBj6+KOyxjQg7JYN1oBjT08= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.17.3/go.mod h1:Snfhyz0+wdVWPaqSLP2Bf3nziCeyP61AzEzwnxEhbWY= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.14.3 h1:GDqMlQfhiyBD3pWTY2JanoTyCmCMdWu8BejrYU1qQXs= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.14.3/go.mod h1:mRQ3DX5oSX/YETFLFjY4JNyerAE1yrumwZgYcmktrAk= +github.com/aws/aws-sdk-go-v2/service/cloud9 v1.26.3 h1:QBP3/69oA+0+j5oNHXL/V8Hj4NTEjYZaOXHPNFhbFv0= +github.com/aws/aws-sdk-go-v2/service/cloud9 v1.26.3/go.mod h1:ehJ9aR1QffkV/66jI90pJ05g2qCOIMuOLsuSkJ93cHc= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.20.3 h1:QdoWu2A7sOU7g38Uj1dH9rCvJcINiAV7B/exER1AOKo= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.20.3/go.mod h1:AOsjRDzfgBXF2xsVqwoirlk69ZzSzZIiZdxMyqTih6k= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.3 h1:mIpL+FXa+2U6oc85b/15JwJhNUU+c/LHwxM3hpQIxXQ= +github.com/aws/aws-sdk-go-v2/service/cloudformation v1.53.3/go.mod h1:lcQ7+K0Q9x0ozhjBwDfBkuY8qexSP/QXLgp0jj+/NZg= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.38.4 h1:I/sQ9uGOs72/483obb2SPoa9ZEsYGbel6jcTTwD/0zU= +github.com/aws/aws-sdk-go-v2/service/cloudfront v1.38.4/go.mod h1:P6ByphKl2oNQZlv4WsCaLSmRncKEcOnbitYLtJPfqZI= +github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.6.3 h1:ZHv5lcXUXHVAHZEZW3NfBqa4PcaclQPKf7AMiFJ4Oq4= +github.com/aws/aws-sdk-go-v2/service/cloudfrontkeyvaluestore v1.6.3/go.mod h1:Lv6trdyO6NW+ReaFMDUSrEaExuO/EUGOzBYLQ5xkbd8= +github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.25.2 h1:Db/kjlMkNVzqiWpeazMWcLZGGVOIsAL4Ftpl7SC7O1M= +github.com/aws/aws-sdk-go-v2/service/cloudhsmv2 v1.25.2/go.mod h1:BgimFWmGZs2F5QzLQA/X9IKqhHpckuWJ2yR3/GwlOqA= +github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.24.3 h1:Dlop6K06hj/An056A77eq8MEmLmmz7TF35m403ZH2Vo= +github.com/aws/aws-sdk-go-v2/service/cloudsearch v1.24.3/go.mod h1:vBprWws4t1YOJtHb7m4BtfFIJ64tmsN4d+9bkl82994= +github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.42.3 h1:dtFepCqT+Lm3sFxracD6PvVJAMTuIKTRd3yqBpMOomk= +github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.42.3/go.mod h1:p+4/sHQpT3kcfY2LruQuVgVFKd72yLnqJUayHhwfStY= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3 h1:VminN0bFfPQkaJ2MZOJh0d7+sVu0SKdZnO9FfyE1C18= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.40.3/go.mod h1:SxcxnimuI5pVps173h7VcyuFadgOFFfl2aUXUCswoY0= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.37.3 h1:pnvujeesw3tP0iDLKdREjPAzxmPqC8F0bov77VN2wSk= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.37.3/go.mod h1:eJZGfJNuTmvBgiy2O5XIPlHMBi4GUYoJoKZ6U6wCVVk= +github.com/aws/aws-sdk-go-v2/service/codeartifact v1.30.3 h1:9eAjfGKFWduKyCR94Qi/JfORoJLndGydph2dcLtM7gI= +github.com/aws/aws-sdk-go-v2/service/codeartifact v1.30.3/go.mod h1:AdirH4VV5v1ik2pOOU0WdEdojBBgzTdECBrOQl0ojOc= +github.com/aws/aws-sdk-go-v2/service/codebuild v1.40.3 h1:v+CiUB5RsmyRpGQ5Tddwn3prS1Y+uCIKVAzZ0Wb3Nyk= +github.com/aws/aws-sdk-go-v2/service/codebuild v1.40.3/go.mod h1:HDiBVjDHX2n7UGFgynZLkVGPXvEnurxlEeaxPF/Ql/0= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.15.3 h1:Bz3QJAdZd1z1rxlllKMl0s5y8kjbryqeMhlX57XJ5q8= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.15.3/go.mod h1:R3dXCLxoYZu1zDODLw810Je3DOydgMUC2MZqyf8Gi9g= +github.com/aws/aws-sdk-go-v2/service/codecommit v1.24.3 h1:fqMQmtdFtZkPgCFKn4S9xp21RSCfdR3mytel6zfAzaQ= +github.com/aws/aws-sdk-go-v2/service/codecommit v1.24.3/go.mod h1:VgBrrInGfpFZyyCfVJ+EhV57+I924PItEJ4/yqT34u8= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.27.3 h1:MSA1lrc/3I1rDQtLKmCe0P3J/jgc39jmN3SZBFVfJxA= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.27.3/go.mod h1:Zqk3aokH+BfnsAfJl10gz9zWU3TC28e5rR5N/U7yYDk= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.22.3 h1:SLX2POpbXZne1+f9RmdqEhof4p5zCpJRIt/ch4R3/bU= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.22.3/go.mod h1:n/bcMFxX+woGslg9MazSiTs5FIPDXozv1F/TvjbIZeA= +github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.27.3 h1:Fx4bbX53SzG2flM5fJHFgJ3fA7WpWohTwc4Q5np0ZYU= +github.com/aws/aws-sdk-go-v2/service/codegurureviewer v1.27.3/go.mod h1:FNcnCnqWpfricoUGPTi5AoMpDp0UF4xOtS7hvdRUz38= +github.com/aws/aws-sdk-go-v2/service/codepipeline v1.30.3 h1:yoSnmI4DWImw7bFpv+9tMqcn0TtGZRLnyyvUE9j7KJw= +github.com/aws/aws-sdk-go-v2/service/codepipeline v1.30.3/go.mod h1:V/08OFKsq9jFlh0zb5WC3AvBXhPgTbMfoVrsWU0gKGg= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.27.3 h1:ZuvuMAG2sgoruSgJ/rxLOZWtK2kkyn225YphvpOvPDc= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.27.3/go.mod h1:lhFfISGURSZzi/OQYyc94YoGXu3FhMp1/3g4lANOktY= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.24.3 h1:dOpVsTQ+KP4cISpU7i+djPuNxlmRuQtrDilqbC9qhDU= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.24.3/go.mod h1:jzetUSpzLqwmfFc8YWImGPkkrgNrQHR0AeDSPZBVVNY= +github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.25.5 h1:iMKC49JNJGq0MLvdKU7DSuB5uZUg33bIfcasNZjoMh4= +github.com/aws/aws-sdk-go-v2/service/cognitoidentity v1.25.5/go.mod h1:nEqtURWmhc/EXQ1yYIoEtvCqQYgl5yYKxdQU8taJnv0= +github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.41.4 h1:jkvdmVYoVWVrAIjgt9aiR9e7GRK2DnxrMnvKjA5EJd0= +github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider v1.41.4/go.mod h1:aynIysFCBIq18wfN2GrIYAeofOnQKV3LtkjyrQKfaFY= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.33.3 h1:3ZaUAjyN1VEdvH8xVTu87GLDpzp/BDTb5WjqpHU8po8= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.33.3/go.mod h1:IKMf00PVvTyj1E/ey0MGDuI58VHdRiiMtAf/2+c74EE= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.37.3 h1:0T+EzT9/cWUDqMmZ1Hvg7l7ZOso3satQ2T9trD8T6Ro= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.37.3/go.mod h1:Du8rTxK7DvQDcYWZnAH2kJfCxvIwNfKcdb/1MJJzmn4= +github.com/aws/aws-sdk-go-v2/service/configservice v1.48.3 h1:Ir1tfXyCY3XE/ENEb0mRUBn6VoWb1w9SDKYFwO+otJI= +github.com/aws/aws-sdk-go-v2/service/configservice v1.48.3/go.mod h1:Z4sA07QNZ7IWEix3oW3QeiIe21jaCTTOW8ftLgeWI3s= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.19.3 h1:6UpDqIDPvl6j+OpjjMfAWRyAKfNvZdRp6e88/gKubis= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.19.3/go.mod h1:/ni69CCzOeSFB/ySFHzJnWh6MQDJe/rNkvKW8+jfe9k= +github.com/aws/aws-sdk-go-v2/service/controltower v1.16.3 h1:uivw03qvOgsT9OHDdL7FQQ9rjnL4DoML867QemUTaOI= +github.com/aws/aws-sdk-go-v2/service/controltower v1.16.3/go.mod h1:tOyU8KwO9JqZlUXjpX3eXnf0r9iKkK/6sqlaNloJ5IQ= +github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.26.3 h1:t+h4OYWHsU9pQ6W7cDHso8TbM0fDfTjO7IPRsAl7CfY= +github.com/aws/aws-sdk-go-v2/service/costandusagereportservice v1.26.3/go.mod h1:mclb7wohFWSVV4EgpRd12YtX5SLAX++hKSWco/GqH8o= +github.com/aws/aws-sdk-go-v2/service/costexplorer v1.40.3 h1:wzusAKyjqSBbOjulrFF2caN+D4ylnI14cTT8xTKm7Sw= +github.com/aws/aws-sdk-go-v2/service/costexplorer v1.40.3/go.mod h1:qgL8c9hUSWedmBiyydYvQgmzKv04NJpgHgblzWMtDOg= +github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3 h1:+AiQwAYmhOXn0m+6B42XBR9UkDhSno0QjQl5XHCPg4k= +github.com/aws/aws-sdk-go-v2/service/costoptimizationhub v1.7.3/go.mod h1:Om/t/NhLjZu7rYMYBI1rWyGqEUfqSn/vk/k1/7pLEC8= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3 h1:Aq+7pnVWk59dS2BMVSOEDWN0yProaw0XhaUsRGbH7MM= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.39.3/go.mod h1:4duVgMu+RBKpiU+Hz4FjPedMLWNFVL4lhauBVYz8OZ4= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3 h1:HzdVJzMjEhQhLjUB1xGRMhs4zjaemPLUbdhhA4wfnMI= +github.com/aws/aws-sdk-go-v2/service/databasemigrationservice v1.40.3/go.mod h1:2SvlhcMgqPNNVr53/0m91cxPTY6mUFvp6o+Kzi63zUM= +github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3 h1:tFFs24+oIWlHLbTyluhnQIHaj8o4nc8yXHNnAc8PTN8= +github.com/aws/aws-sdk-go-v2/service/databrew v1.31.3/go.mod h1:WP7xXB608MyVv3yFzduKlLeYmU0AxMo7zeF9Cuwbvwc= +github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3 h1:GndlSdjdgcW1r+mGL635+6ZlwXgdu/663aHHyBJ6Jtk= +github.com/aws/aws-sdk-go-v2/service/dataexchange v1.30.3/go.mod h1:xUxKkSfH4sCQixoxh3pYc7C4N+OH2POgS0dhkOzR+u8= +github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3 h1:ZrKMl8jsL5YHurOLf0YVLb7JBYxGtqQQAknJ5g4MTz4= +github.com/aws/aws-sdk-go-v2/service/datasync v1.40.3/go.mod h1:+ObRlRcKO/p38yJSkpVZKlCU3t9PqXMORXC+xTkb9NU= +github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2 h1:9l6JiWZz/2Sp3ne9E/AXECwnzi7NASQUJnQ7xts/8oA= +github.com/aws/aws-sdk-go-v2/service/datazone v1.13.2/go.mod h1:li7vb6Ip/zyT59298XmAhs+dtXR2GqHXQlIdgL3QycE= +github.com/aws/aws-sdk-go-v2/service/dax v1.21.3 h1:uGHbOU0lBxntNZ/+Y2HbVo//AVFdl/BpMz7viHf/r8M= +github.com/aws/aws-sdk-go-v2/service/dax v1.21.3/go.mod h1:FNgKx9JXy9L0bThUl86EMV9gwUgqf2eexpitcne/AXc= +github.com/aws/aws-sdk-go-v2/service/detective v1.29.3 h1:HimZr2FJaLzxinq9QypFY2gGM+40pMWPwxB+ZNTkfNI= +github.com/aws/aws-sdk-go-v2/service/detective v1.29.3/go.mod h1:fiEtdUerGX5RHS/upeHldpHKikvfQz1MJCgquNFQeDo= +github.com/aws/aws-sdk-go-v2/service/devicefarm v1.25.2 h1:DSv0r8nKo8+ix2h5Rz/Zl62kkJPRxXIEQzmRI3CQVpY= +github.com/aws/aws-sdk-go-v2/service/devicefarm v1.25.2/go.mod h1:7Ev/BlW5/zbURomHu/2Ay8l/HAgoQAbaSP2XlMUED9I= +github.com/aws/aws-sdk-go-v2/service/devopsguru v1.32.3 h1:dVk+ogfz83rhZLaWSwSbgTQnxno+DIhZ3Q3KFdxTVmA= +github.com/aws/aws-sdk-go-v2/service/devopsguru v1.32.3/go.mod h1:Rbgi0LKyAIyWHlqVtgU5wy39omdfHHvlGjrl+Vg41us= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.27.3 h1:Ua8NLsRNDm/HSotawG9MjeUEdo88uuTsEJ+EQB99G7c= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.27.3/go.mod h1:DeGGGnrVVVNQlfMpAqmIiEndGTlDVbUIzNI4MbyyH68= +github.com/aws/aws-sdk-go-v2/service/dlm v1.26.3 h1:LAZoBLsYn4eSTzJlfIu+v/+EHzqLqkPlIIc+y36HgEA= +github.com/aws/aws-sdk-go-v2/service/dlm v1.26.3/go.mod h1:Sy6z2qbpj3pxXtwi0H5nR8WG1AMj2M2Gv6qPw2ChFYM= +github.com/aws/aws-sdk-go-v2/service/docdb v1.36.3 h1:6LabOycU59L+JfgCavDzfK1lheqj0wt/Fbta5OpeiUI= +github.com/aws/aws-sdk-go-v2/service/docdb v1.36.3/go.mod h1:cA+GYSfYfLSczv09u72Ger5kQ6JR5UHW3YmHD8c66tA= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.11.3 h1:1DLJ+BTpBLXMuWJPHPoemYYcBJS4GBpXg2VYZx29I4A= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.11.3/go.mod h1:wZl6Satx8GY99iRC/wA5nsPOCPOnnaizt/kb1t6hSRk= +github.com/aws/aws-sdk-go-v2/service/drs v1.28.3 h1:ss4Ib/kWbYA4pveQtSOluDE/Kf0e0jQ9SPwltAmRxKY= +github.com/aws/aws-sdk-go-v2/service/drs v1.28.3/go.mod h1:tjzPl3EOCkojHm9Q4y+Kuq7GGSJJw/P0UIqc4eHvtFI= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3 h1:nEhZKd1JQ4EB1tekcqW1oIVpDC1ZFrjrp/cLC5MXjFQ= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.34.3/go.mod h1:q9vzW3Xr1KEXa8n4waHiFt1PrppNDlMymlYP+xpsFbY= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0 h1:zPwhEYn3Y83mnnr9QG+i6NTiAbVbcJe6RpCSJKHIQNE= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.170.0/go.mod h1:9KdiRVKTZyPRTlbX3i41FxTV+5OatZ7xOJCN4lleX7g= +github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3 h1:+v2hv29pWaVDASIScHuUhDC93nqJGVlGf6cujrJMHZE= +github.com/aws/aws-sdk-go-v2/service/ecr v1.30.3/go.mod h1:RhaP7Wil0+uuuhiE4FzOOEFZwkmFAk1ZflXzK+O3ptU= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3 h1:n2eqzO9VabUkd77b88Hos6OEtbGohB/TRrtXLTZi38Y= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.25.3/go.mod h1:Oy3yHBGkKtTmsn6iJGEZxytzZQrEvoFRWldB4XmzlO4= +github.com/aws/aws-sdk-go-v2/service/ecs v1.44.3 h1:JkVDQ9mfUSwMOGWIEmyB74mIznjKnHykJSq3uwusBBs= +github.com/aws/aws-sdk-go-v2/service/ecs v1.44.3/go.mod h1:MsQWy/90Xwn3cy5u+eiiXqC521xIm21wOODIweLo4hs= +github.com/aws/aws-sdk-go-v2/service/efs v1.31.3 h1:vHNTbv0pFB/E19MokZcWAxZIggWgcLlcixNePBe6iZc= +github.com/aws/aws-sdk-go-v2/service/efs v1.31.3/go.mod h1:P1X7sDHKpqZCLac7bRsFF/EN2REOgmeKStQTa14FpEA= +github.com/aws/aws-sdk-go-v2/service/eks v1.46.2 h1:byyz/tBy/uGyucr/QLE1UmTuGaJx9ge19aWUZCiOMCc= +github.com/aws/aws-sdk-go-v2/service/eks v1.46.2/go.mod h1:awleuSoavuUt32hemzWdSrI47zq7slFtIj8St07EXpE= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.40.3 h1:nmEN5lGIAShc0nNFjvUk2/YYlsTSwX2n1XF37Av93Yw= +github.com/aws/aws-sdk-go-v2/service/elasticache v1.40.3/go.mod h1:OcUtpbcNsyMdA/Wv5XenKl8aG3yrqA6HVIOF7ms+Ikc= +github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.26.2 h1:OA2kqnEcSqpnznO4hb4MKDXxeCRuEkADGgnihLwvn4E= +github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk v1.26.2/go.mod h1:N/YWNrjILpIoai7cZ4Uq2KCNvBPf25Y+vIhbm9QpwDc= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.26.3 h1:5B2Dq2zy/hgtEO3wITnOZiyh6e+GyuHTGw6bK/8+L3w= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing v1.26.3/go.mod h1:mgU2kG+D5ybtfGhEuZRW8usYOGrNSgsimRt/hOSI65s= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3 h1:yiBmRRlVwehTN2TF0wbUkM7BluYFOLZU/U2SeQHE+q8= +github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.33.3/go.mod h1:L5bVuO4PeXuDuMYZfL3IW69E6mz6PDCYpp6IKDlcLMA= +github.com/aws/aws-sdk-go-v2/service/emr v1.42.2 h1:j3aHjEsxFGCNGOCJjJM6AtPhdvn1pw2i2hGqxLU0qeI= +github.com/aws/aws-sdk-go-v2/service/emr v1.42.2/go.mod h1:rN91rXF7gucnSnArDWbv9xDdZjBEetO4LFoJgGK/Wqw= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3 h1:zxpxkpY1h+kPWquiUSG8u2CJ3AtEJPqqBqiMKxLwPjI= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.23.3/go.mod h1:9+NjcAre2lLrpGvCrb9V+TUDii5D+Z8xER/vCPZdZFg= +github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3 h1:pjZzcXU25gsD2WmlmlayEsyXIWMVOK3//x4BXvK9c0U= +github.com/aws/aws-sdk-go-v2/service/eventbridge v1.33.3/go.mod h1:4ew4HelByABYyBE+8iU8Rzrp5PdBic5yd9nFMhbnwE8= +github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3 h1:bAuNjv1PmyZvjojnXlozw68T2X2eq1xhjteyU6qGDQU= +github.com/aws/aws-sdk-go-v2/service/evidently v1.21.3/go.mod h1:EtC1+tObvVB/l/c9Dh6IILA/r/cu9Pc17S870zRihq4= +github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3 h1:Y8VS/XHyeJ1cxSCtmvUOFLqfNIl9rASWOE/gsrydGFw= +github.com/aws/aws-sdk-go-v2/service/finspace v1.26.3/go.mod h1:xbE7o+ADq+h0DeKA/05618ox75wY/jtoZTF9XuvSvnI= +github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3 h1:BMYs3DZYSIaIDhkPSsAUeobQ7Z0ipNRJSiFTP2C4RWE= +github.com/aws/aws-sdk-go-v2/service/firehose v1.31.3/go.mod h1:8rN4JsVXcCHl/f4hwOWVuy+iQ5iolXOdSX+QFYZyubw= +github.com/aws/aws-sdk-go-v2/service/fis v1.26.3 h1:NwddG0xUTBM2zoq4D8rotQmT2Z/S8IGM+D2wYzKFSQs= +github.com/aws/aws-sdk-go-v2/service/fis v1.26.3/go.mod h1:QmdVf0N/vrhckZLHK4x+f+u9EUuMhetsRgu1rjU1eL0= +github.com/aws/aws-sdk-go-v2/service/fms v1.35.3 h1:QeYAz3JhpkTxkS+fifDBfmgWFdSRBI21MQzN2bCO1xo= +github.com/aws/aws-sdk-go-v2/service/fms v1.35.3/go.mod h1:GXASgVouW5X/bmEgOoV/tkzJkp5ib7ZeA+YxMc5piqs= +github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3 h1:de8RU808VMx8km6t2wY3WDWigB6GqbNEcyVQRJFaIYs= +github.com/aws/aws-sdk-go-v2/service/glacier v1.24.3/go.mod h1:F/qjepwnxPHHUTK9ikZp14jLyrvB18kZ/22MmaPxtHE= +github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.27.0 h1:nlm6tZX8gwsVktDKTQe3IOagNVK1+6CGf9IpdWM6x+E= +github.com/aws/aws-sdk-go-v2/service/globalaccelerator v1.27.0/go.mod h1:ehkx8lBkJkZbdKYX2GyMFmdOAfcGs0mjcbaNXNiHAEE= +github.com/aws/aws-sdk-go-v2/service/grafana v1.24.3 h1:riHLAJSqo5zczCyMSo8XDA46X2aDpQvB46F0seKuNEM= +github.com/aws/aws-sdk-go-v2/service/grafana v1.24.3/go.mod h1:2ipW9QX9MlePs99Dy8ohwfdW847hMJG6BU9jvixIpxE= +github.com/aws/aws-sdk-go-v2/service/greengrass v1.25.3 h1:5KauP/IHPWGoHni4mt2Sjp0EtHMkdWtPP3v81qaHHyg= +github.com/aws/aws-sdk-go-v2/service/greengrass v1.25.3/go.mod h1:Cw18f8jWmb5IQlxd48bIDSXOPfKf5am3Zr9GnOyCcTw= +github.com/aws/aws-sdk-go-v2/service/groundstation v1.29.3 h1:qo3UtqkypEXmUSOGepFqFt1bbEi1EAsJcHm6I3WQtOk= +github.com/aws/aws-sdk-go-v2/service/groundstation v1.29.3/go.mod h1:upTLlgFk3Yw83uo6jNxlFD2EdU/iwZc+FM1OG+Zhikw= +github.com/aws/aws-sdk-go-v2/service/guardduty v1.45.3 h1:V7+xcerreGBsoLqraRPAJRCaFiN/04kP85mMeQjgRO4= +github.com/aws/aws-sdk-go-v2/service/guardduty v1.45.3/go.mod h1:zjxzcOjdQYMgh90Xm5XRVbeQD7bSeD7XaPB77CNq1C8= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.26.3 h1:hIlZp+8MV4c5dWOelj4ygDv8w/uyuKURga1FHT8MI44= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.26.3/go.mod h1:n7B4cOb7+4pzcO0F7KVnUgnS9Z5dKQHxQrCR7D/bZyE= +github.com/aws/aws-sdk-go-v2/service/iam v1.34.3 h1:p4L/tixJ3JUIxCteMGT6oMlqCbEv/EzSZoVwdiib8sU= +github.com/aws/aws-sdk-go-v2/service/iam v1.34.3/go.mod h1:rfOWxxwdecWvSC9C2/8K/foW3Blf+aKnIIPP9kQ2DPE= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.25.3 h1:eiL4q6pEzvazErz3gBOoP9hDm3Ul8pV69Qn7BrPARrU= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.25.3/go.mod h1:oNDSqrUg2dofbodrdr9fBzJ6dX8Lkh/2xN7LXXdvr5A= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.28.3 h1:dscyhNwL1v6pYPCflnp8/jBMeCC5y5Vn8npXmM/EE78= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.28.3/go.mod h1:EI8IxOq2F4KHZQQEB4rmQPXmYILE2avtX6wOiR8A5XQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17 h1:YPYe6ZmvUfDDDELqEKtAd6bo8zxhkm+XEFEzQisqUIE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.17/go.mod h1:oBtcnYua/CgzCWYN7NZ5j7PotFDaFSUjCYVTtfyn7vw= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16 h1:lhAX5f7KpgwyieXjbDnRTjPEUI0l3emSRyxXj1PXP8w= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.9.16/go.mod h1:AblAlCwvi7Q/SFowvckgN+8M3uFPlopSYeLlbNDArhA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15 h1:246A4lSTXWJw/rmlQI+TT2OcqeDMKBdyjEQrafMaQdA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.15/go.mod h1:haVfg3761/WF7YPuJOER2MP0k4UAXyHaLclKXB6usDg= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.16.3 h1:3dIg2t4akBnpmzXJO20z/JxqS7AQfuR7+WZKQRpdpmM= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.16.3/go.mod h1:kGhxggatnXh1Kog+ppPQwEHVdaJiuGuEYg1DbdSXPwU= +github.com/aws/aws-sdk-go-v2/service/iot v1.55.3 h1:di+va5f5fLC32K+0eDQa2AWQujjLgdeTXakUQXtsS68= +github.com/aws/aws-sdk-go-v2/service/iot v1.55.3/go.mod h1:2blUX4qcMUQIyWY6nfu8R0kMORCNH0oLRZU1EOj2+mk= +github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3 h1:SEt8SRvlGvnOkqDV5PJ9eFvwz03H9A67Co/QPPdic5Y= +github.com/aws/aws-sdk-go-v2/service/iotanalytics v1.24.3/go.mod h1:XDi19IK0UluaSVnm1mu2AakZKHtWjg6gksitvH7+LQw= +github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3 h1:9Lao6kmD9P+yywuIn9I8hrraJ2jHIztU/GJspIxn6lA= +github.com/aws/aws-sdk-go-v2/service/iotevents v1.25.3/go.mod h1:V2BDVrnP+Tn+MM1xxFI7Qcb+YPhiGgY5PUoKzrKHaCQ= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3 h1:d7y5Gs9BfO+1Jhj8y1/lZhegiJXXy/DlanzwRgYrkXM= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.14.3/go.mod h1:rtw6VOH+4X/TWoOKQlOC+oq/WBDJD4BqaPi930II6Mk= +github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3 h1:MUx27PrqicGxgsiDWo7xv/Zsl4b0X8kHCRvMpX7XrQs= +github.com/aws/aws-sdk-go-v2/service/kafka v1.35.3/go.mod h1:mBWO7tOHjEvfZ88cUBhCfViO9vclCumFcTeiR1cB4IA= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3 h1:jJyh5SN/b78UZjIsVqM8/N5GQsD12sEvM2g5bVsFVhg= +github.com/aws/aws-sdk-go-v2/service/kafkaconnect v1.19.3/go.mod h1:XuvDeFgRl8LZ0tPHImZYbq/71qXlXEh4a3UBvTOmKZw= +github.com/aws/aws-sdk-go-v2/service/kendra v1.52.3 h1:SgSKyym+vQfUvEOyuLR9uPJ8o63pBIMI06xWLGZ75s0= +github.com/aws/aws-sdk-go-v2/service/kendra v1.52.3/go.mod h1:I7nz57YLvHw0sd5TjLRyAc8Ea7Qic6Emk+V+TwleBYY= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.12.3 h1:25HN/tJRRf0rwPzDpNyTALuk3Yrd9wBEXR+WMZIMA38= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.12.3/go.mod h1:/sTpi3FG4DsTSTabyXfKXypVEjCuNU/8jxTCQLWYRZQ= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3 h1:ktR7RUdUQ8m9rkgCPRsS7iTJgFp9MXEX0nltrT8bxY4= +github.com/aws/aws-sdk-go-v2/service/kinesis v1.29.3/go.mod h1:hufTMUGSlcBLGgs6leSPbDfY1sM3mrO2qjtVkPMTDhE= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.3 h1:UPTdlTOwWUX49fVi7cymEN6hDqCwe3LNv1vi7TXUutk= +github.com/aws/aws-sdk-go-v2/service/kms v1.35.3/go.mod h1:gjDP16zn+WWalyaUqwCCioQ8gU8lzttCCc9jYsiQI/8= +github.com/aws/aws-sdk-go-v2/service/lakeformation v1.35.3 h1:Rfl7JjXVdriUprd8TTlbgcTyPU/Pl+v/O/nMD9HYpgA= +github.com/aws/aws-sdk-go-v2/service/lakeformation v1.35.3/go.mod h1:cyogDr92z2UF8fBoRN/+/gKuVTrxBD10bo6PVn3tDeQ= +github.com/aws/aws-sdk-go-v2/service/lambda v1.56.3 h1:r/y4nQOln25cbjrD8Wmzhhvnvr2ObPjgcPvPdoU9yHs= +github.com/aws/aws-sdk-go-v2/service/lambda v1.56.3/go.mod h1:/4Vaddp+wJc1AA8ViAqwWKAcYykPV+ZplhmLQuq3RbQ= +github.com/aws/aws-sdk-go-v2/service/launchwizard v1.6.3 h1:HlZn+zJoCEFuUvKLGbGXVIwXp3XA1xvLf/udp7ABDvk= +github.com/aws/aws-sdk-go-v2/service/launchwizard v1.6.3/go.mod h1:IJIHGsE1X4tRCw3s+SMG0NlIQM4yM7rlj5CfUDqT/+M= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.45.3 h1:sZwjTNfuXXk3Fyor/bEpjcznTD1+f6OEYxONrAU2sAc= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.45.3/go.mod h1:GE1lDQwM3Dm7Fysaet+yeNanYwwTvfLIUlK3P/owUw8= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.40.3 h1:dy4sbyGy7BS4c0KaPZwg1P5ZP+lW+auTVcPiwrmbn8M= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.40.3/go.mod h1:EMgqMhof+RuaYvQavxKC0ZWvP7yB4B4NJhP+dbm13u0= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.29.3 h1:UJEd/gP0jzWDfr4f/3TPKSls8MuomApfPap1CS/PxMY= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.29.3/go.mod h1:o21usaj4iSiu94CTwmLKS94wMM4+AJY/HNR5vWmcEC0= +github.com/aws/aws-sdk-go-v2/service/m2 v1.15.3 h1:2rO4AxOqwtWar9xx051FKeDDXu8njV0DZt+tdlfy8y4= +github.com/aws/aws-sdk-go-v2/service/m2 v1.15.3/go.mod h1:OKkohde5gLaVJ2MWJkBxU0DXBggmMDdEQ6dSxeKdDcU= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0 h1:Y79CoATONI7M7deTCC5RX/84rK5n/oK1s8HWk7LMV+4= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.32.0/go.mod h1:6cpEF3W3oCNX9shBj9N3lrehYdxLuzDbYZdhOiaoN94= +github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3 h1:1ls4o+377rEfTuZ4YaqDrSo75qpC1ySv8m2FfVk23tw= +github.com/aws/aws-sdk-go-v2/service/mediaconvert v1.57.3/go.mod h1:JAiHALb6LfTclPNBdUUTL8xmDZcwBCTbSVgJEkgiIv4= +github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3 h1:9aDpYGrfgFjfvzOdAfMcEdGbWa3l/1RjGtOr4On9Kd4= +github.com/aws/aws-sdk-go-v2/service/medialive v1.54.3/go.mod h1:49kVyWdlOWpusFyzDrmxCG9PqXlKtpKmHYoTv5h1O5k= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3 h1:fBtklFkqk6QhJBzSBgNJiwWySt1RvspmvCvY+giXgdI= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.32.3/go.mod h1:BejXbLdRRWr6uMl4wZrz3iAcJDVgJu3EEstqDq8wxEE= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3 h1:ytQ77lC/wrYatbiLSZlYSpgjzvtgXBey0xxRsBA4swY= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.14.3/go.mod h1:+R07/s3U8lJzEZDiwFxv/jmlSNbQjnoSqKaZEoqWt5Y= +github.com/aws/aws-sdk-go-v2/service/mediastore v1.22.3 h1:WBVRvc0iIJdbdCkBjWRMVtUOMmAvOyN70x1KrBTOFm0= +github.com/aws/aws-sdk-go-v2/service/mediastore v1.22.3/go.mod h1:plJWP1InGjEZiJvXfTlBqTBeMW8ddEZeIdYYFTYZMyE= +github.com/aws/aws-sdk-go-v2/service/mq v1.25.3 h1:SyRcb9GRPcoNKCuLnpj1qGIr/8stnVIf4DsuRhXIzEA= +github.com/aws/aws-sdk-go-v2/service/mq v1.25.3/go.mod h1:Xu8nT/Yj64z5Gj1ebVB3drPEIBsPNDoFhx2xZDrdGlc= +github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4 h1:lptYTP7Br5zll9USf2aKY1ZlN69vYAlZOSCv1Q+k1S4= +github.com/aws/aws-sdk-go-v2/service/mwaa v1.29.4/go.mod h1:mtgvj3nNI+LiRNT07JaHbTh6E/y8QRrClvd+/GMhMS4= +github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3 h1:XEbvRhi+ELazJaqh8k0KgTZrAgXM3rmR0hsGPTIpUIo= +github.com/aws/aws-sdk-go-v2/service/neptunegraph v1.10.3/go.mod h1:tfCOS8E/SwIkqHHGgpwRZTly3ZQxcsORZPEVBKMkbx4= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3 h1:hb3i/o9ouQj6RZjykyGI1koOfp22/ZMuWpuPfeu+zNE= +github.com/aws/aws-sdk-go-v2/service/networkfirewall v1.40.3/go.mod h1:Y9mINPJv+o9q8Ztr5/PRh2C1Iynik64IhPzwe2ERGqQ= +github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3 h1:I+m+rITTdVA9BNJeuCzYgMQjqbUE10xcY0OqgBvFEFE= +github.com/aws/aws-sdk-go-v2/service/networkmonitor v1.5.3/go.mod h1:R+4X5haYg3eRWYb99y+m1UhlVjFrHNlcfl3WES5e1oQ= +github.com/aws/aws-sdk-go-v2/service/oam v1.13.3 h1:KCbGN36Q/qQ27mv+/4BSax0q6/KSAxh3K3R+gRhNHwg= +github.com/aws/aws-sdk-go-v2/service/oam v1.13.3/go.mod h1:T/GYfs9EvCp1ke+82YQJZTTP0FlRETQnny3uPl1YTlY= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.13.3 h1:xRRPnilDJCDohQ+J1dUH4UvzL6P+KPQ0NwO7cs0odfc= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.13.3/go.mod h1:J9Ybe5zLnJG/PsLrdI80ihIW1MYSHMlQyVtdc1X9irQ= +github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2 h1:+tGF0JH2u4HwneqNFAKFHqENwfpBweKj67+LbwTKpqE= +github.com/aws/aws-sdk-go-v2/service/organizations v1.30.2/go.mod h1:6wxO8s5wMumyNRsOgOgcIvqvF8rIf8Cj7Khhn/bFI0c= +github.com/aws/aws-sdk-go-v2/service/osis v1.12.3 h1:T9+bvsT2me+zQx7rUUTgalP7u5lOruoZoH8Xnp1gSPI= +github.com/aws/aws-sdk-go-v2/service/osis v1.12.3/go.mod h1:582tNTtG2bLnDxD5ceguyDlc7hAqtHYY29xHcux37Lo= +github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.12.3 h1:9oQMCF4oLvWSCDTiiAVEwPs4Sl/iBsC/17qvIa2sYjU= +github.com/aws/aws-sdk-go-v2/service/paymentcryptography v1.12.3/go.mod h1:NNyvgUO7XweCVxGTSnllS6XdsD/9Il6Kc63D/stKgiM= +github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.7.3 h1:xKVSPlN0K1r9VBe6MaKHgUi3EvJotLE9s4etstJq0jw= +github.com/aws/aws-sdk-go-v2/service/pcaconnectorad v1.7.3/go.mod h1:4Lk91jzPQQKOzml7LHOR/zAE5FF4+mL0CPrArI8vnCY= +github.com/aws/aws-sdk-go-v2/service/pipes v1.14.3 h1:fYZlFa1OvrgaFODrdf0KVDp4qCRHMZNr8S/F3aGNuno= +github.com/aws/aws-sdk-go-v2/service/pipes v1.14.3/go.mod h1:S0g2KF8IpU6Ptn46eSywrS+w1PMUwrf/xWF8szcTZ2Q= +github.com/aws/aws-sdk-go-v2/service/polly v1.42.3 h1:MuoVKFJr/TUimLdT6nvio+OehAPM7kILgNLF3rYcaP0= +github.com/aws/aws-sdk-go-v2/service/polly v1.42.3/go.mod h1:PQlzSg4fsvxUgyXl0VIORU06zIQV2Y1Jd5YkDrP46FI= +github.com/aws/aws-sdk-go-v2/service/pricing v1.30.3 h1:CO5rn/wveWDphdllj+E6fdfX26XhmBj6zbntQbwajzE= +github.com/aws/aws-sdk-go-v2/service/pricing v1.30.3/go.mod h1:JnnBNRgok4OQBoHCzpS37BgWNQkbY73q97HZMCDgvho= +github.com/aws/aws-sdk-go-v2/service/qbusiness v1.10.2 h1:ZEVUuXUj5FERUTzzACAFJ8p/0q3AWTkvnbOOp9nVIXA= +github.com/aws/aws-sdk-go-v2/service/qbusiness v1.10.2/go.mod h1:+O5t/RLHL/ureGkytxCumU3VQjAaKOQ4PU89+aZC9ow= +github.com/aws/aws-sdk-go-v2/service/qldb v1.23.3 h1:qrU3Xiv20E8yPTJq7ZDTjVOBuYVbEE9NsucXKP57YiE= +github.com/aws/aws-sdk-go-v2/service/qldb v1.23.3/go.mod h1:ZePPGflmFHyvUediLcKpc4I9ZaIARm/OgAvtayU7sD0= +github.com/aws/aws-sdk-go-v2/service/ram v1.27.3 h1:MoQ0up3IiE2fl0+qySx3Lb0swK6G6ESQ4S3w3WfJZ48= +github.com/aws/aws-sdk-go-v2/service/ram v1.27.3/go.mod h1:XymSCzlSx2QjdvU/KdV/+niPQBZRC1A8luPDFz3pjyg= +github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3 h1:5Izo7ZI9zrvG9VLpJdnDl97gNyCFr310RtriuKIJgFk= +github.com/aws/aws-sdk-go-v2/service/rbin v1.18.3/go.mod h1:GlAG1tgrchQnNlO/fxXLmmF6t+v+9fQMNHNdW7Zc8Zc= +github.com/aws/aws-sdk-go-v2/service/rds v1.81.4 h1:tBtjOMKyEWLvsO6HaX6A+0A0V1gKcU2aSZKQXw6MSCM= +github.com/aws/aws-sdk-go-v2/service/rds v1.81.4/go.mod h1:j27FNXhbbHXC3ExFsJkoxq2Y+4dQypf8KFX1IkgwVvM= +github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4 h1:wNBruTRRDfBv2Pz3Mvw6JIJS7ujfTd1ztCG5pIlrfRk= +github.com/aws/aws-sdk-go-v2/service/redshift v1.46.4/go.mod h1:AhuwOvTE4nMwWfJQNZ2khZGV9yXexB2MjNYtCuLQA4s= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3 h1:rtX1ZHGPpqbQGZlPuN1u7nA+0zjq0DB7QTVNlYY/gfw= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.27.3/go.mod h1:8Ah7aUFE9G0dppkn6ZXn1iExeHUV4369IJ2GRi7++Y0= +github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.20.3 h1:dZTe+TGD6B15Qhhugp4MUOCLPzaODOxc5qc6K5/yZDA= +github.com/aws/aws-sdk-go-v2/service/redshiftserverless v1.20.3/go.mod h1:oJRMDbpdkGsrRiSmJUumhj4KuXdP4QN9A5AK1rE0xps= +github.com/aws/aws-sdk-go-v2/service/rekognition v1.43.2 h1:nrR1xZ6QoW7lUvFmLHOwTK2n25nnuPhP2f++C3DlPRc= +github.com/aws/aws-sdk-go-v2/service/rekognition v1.43.2/go.mod h1:UkvOY/p1SKtJgzvwmlPnrFWOP2kj6efrbcbQHFy9qvM= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.12.3 h1:GEkqXpMrNF6UpC8edjE66HZgVpqppvxxMRhHcBbyQiU= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.12.3/go.mod h1:PQCEcRWQIPD+uqrqSaLJDfveDYqHTPaimym1+5WtvMU= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.24.3 h1:lxYeMxHTz8TculPM7bxM4uZxJpAH394xY215ub595H8= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.24.3/go.mod h1:wyzvCa9oNmh3Ejs0kM63IR7lq9Vie9bcg2YIg+p9alY= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.23.3 h1:ByynKMsGZGmpUpnQ99y+lS7VxZrNt3mdagCnHd011Kk= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.23.3/go.mod h1:ZR4h87npHPuVQ2SEeoWMe+CO/HcS9g2iYMLnT5HawW8= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.13.3 h1:3lqKckUrVhC86nI5d/7suyv4sBhUJgACHfbs8qTj6+g= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.13.3/go.mod h1:7IIMPfX6TzfxRIJIp1NLYWFkApDOMnlb5XrynzpxMkA= +github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3 h1:MmLCRqP4U4Cw9gJ4bNrCG0mWqEtBlmAVleyelcHARMU= +github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3/go.mod h1:AMPjK2YnRh0YgOID3PqhJA1BRNfXDfGOnSsKHtAe8yA= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.25.3 h1:VGLIgiClxmwxBpGzHERgNgwJMukHZpLcQZqJuQYjAiM= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.25.3/go.mod h1:Kgq5O7ZaDk0mTZmX6YCL+ZtZ1YcJHtGsVubp0OT77MA= +github.com/aws/aws-sdk-go-v2/service/route53profiles v1.2.3 h1:N4f5sliNiWcp3abC+8YpcaVjXuaNJIlz/dBd+saimm0= +github.com/aws/aws-sdk-go-v2/service/route53profiles v1.2.3/go.mod h1:r2B4BvTn3zSMK+BFHGl0q63B/nJMOk9/NukLZzqO8sY= +github.com/aws/aws-sdk-go-v2/service/rum v1.19.3 h1:DR+GYJRPL7eEZknnGdwm+lH686LmUBB/X2YVQDHLNY4= +github.com/aws/aws-sdk-go-v2/service/rum v1.19.3/go.mod h1:5jFxbuc05P/+BbJvVbBspMbzDR2IFU0LegQG3iUvj8g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2 h1:sZXIzO38GZOU+O0C+INqbH7C2yALwfMWpd64tONS/NE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.58.2/go.mod h1:Lcxzg5rojyVPU/0eFwLtcyTaek/6Mtic5B1gJo7e/zE= +github.com/aws/aws-sdk-go-v2/service/s3control v1.46.3 h1:3De8/YQpup0mLNKh0G9JHWJLEkWNdghd5z84vw4v+yw= +github.com/aws/aws-sdk-go-v2/service/s3control v1.46.3/go.mod h1:sUA7DOI2fdRHQQUpvRVfYKTo9P0+UAsWYBHvyqFHcC0= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3 h1:gmpU7E0ntMzXr+yQQIXbiiueOewf/1BQ9WgeaXo6BcQ= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.10.3/go.mod h1:jnQp5kPPvEgPmVPm0h/XZPmlx7DQ0pqUiISRO4s6U3s= +github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3 h1:ZJW2OQNpkR8P7URtISmF8twpvz2V0tUN/OgMenlxkao= +github.com/aws/aws-sdk-go-v2/service/schemas v1.26.3/go.mod h1:QcRvTKZ9cBv6TlZECUStXI1z1qlCMWKpPi/ZefknVpQ= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3 h1:ilavrucVBQHYnMjD2KmZQDCU1fuluQb0l9zRigGNVEc= +github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.3/go.mod h1:TKKN7IQoM7uTnyuFm9bm9cw5P//ZYTl4m3htBWQ1G/c= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3 h1:tFzkGJZKDWgwGDSQXwxZK7Bm3NzlKOW6KwNr14xXZqc= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.51.3/go.mod h1:MfWlz2hEZ2O0XdyBBJNtF6qUZwpHtvc892BU7gludBw= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3 h1:7isk2tSNmVbm2f8epPfokkHjjWfwS46IpNNmI+rarUo= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.16.3/go.mod h1:X5rHkguK4jCvFOM74tkme3oLUOaR++APKgwhNcIdOW0= +github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.22.3 h1:E4NzUkgPrKmlbC9OxVUEQnTdPRg3MTTiDwmq5dJfH9U= +github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository v1.22.3/go.mod h1:/nzQOH+tOGrQVv5QbVN+88HoNYc15s8aKsJmOT9MPJI= +github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.28.3 h1:l19QC3al5lqQydnJRz1cpduAoL0YoEeSxI5Wb5NUEis= +github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry v1.28.3/go.mod h1:0Em81iN4ZnER1M0XDirgcbsZK3jNghA0YlY2Xw2BDOQ= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.3 h1:EthA93BNgTnk36FoI9DCKtv4S0m63WzdGDYlBp/CvHQ= +github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.31.3/go.mod h1:4xh/h0pevPhBkA4b2iYosZaqrThccxFREQxiGuZpJlc= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.23.3 h1:J6R7Mo3nDY9BmmG4V9EpQa70A0XOoCuWPYTpsmouM48= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.23.3/go.mod h1:be52Ycqv581QoIOZzHfZFWlJLcGAI2M/ItUSlx7lLp0= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.32.3 h1:DLJCsgYZoNIIIFnWd3MXyg9ehgnlihOKDEvOAkzGRMc= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.32.3/go.mod h1:klyMXN+cNAndrESWMyT7LA8Ll0I6Nc03jxfSkeuU/Xg= +github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3 h1:7BK+k08c5r1oqqHeb6ye0affEQQJ/fimBTGZSjmpjwk= +github.com/aws/aws-sdk-go-v2/service/sfn v1.29.3/go.mod h1:+mtHHxsylrf+kjxcbvfnu6jtyTT8Fa9BlqjQk5XJZ80= +github.com/aws/aws-sdk-go-v2/service/shield v1.27.3 h1:SfjI6FuphzspGPvcRD8hjMD6wLUAE6vtJLGrui19j2s= +github.com/aws/aws-sdk-go-v2/service/shield v1.27.3/go.mod h1:JpxjPa91y1hRb3G8xxzhOQFcK/r90it41jA/hD0q+Gg= +github.com/aws/aws-sdk-go-v2/service/signer v1.24.3 h1:vN91JPGjBc5imkkpIqVWolvFxZygpDlRUovx221Wid8= +github.com/aws/aws-sdk-go-v2/service/signer v1.24.3/go.mod h1:1/6iDWLI/6V+I8n9ZnUd5m7zkPWQVituijVZs0jRdGU= +github.com/aws/aws-sdk-go-v2/service/sns v1.31.3 h1:eSTEdxkfle2G98FE+Xl3db/XAXXVTJPNQo9K/Ar8oAI= +github.com/aws/aws-sdk-go-v2/service/sns v1.31.3/go.mod h1:1dn0delSO3J69THuty5iwP0US2Glt0mx2qBBlI13pvw= +github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3 h1:Vjqy5BZCOIsn4Pj8xzyqgGmsSqzz7y/WXbN3RgOoVrc= +github.com/aws/aws-sdk-go-v2/service/sqs v1.34.3/go.mod h1:L0enV3GCRd5iG9B64W35C4/hwsCB00Ib+DKVGTadKHI= +github.com/aws/aws-sdk-go-v2/service/ssm v1.52.3 h1:iu53lwRKbZOGCVUH09g3J0xU8A+bAGVo09VR9K4d0Yg= +github.com/aws/aws-sdk-go-v2/service/ssm v1.52.3/go.mod h1:v7NIzEFIHBiicOMaMTuEmbnzGnqW0d+6ulNALul6fYE= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.24.3 h1:j2hdqn1dz8FPePLCQNXtDMd/6URmRya2Ys3Um78a1Es= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.24.3/go.mod h1:jTZ3loeBr6JRNIhq7C24OwjtzEaV9tAJUtWjLIKoin8= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3 h1:IXODiFsgKoyW7QVWWHoIjdBB2dWPRFPT5KREfBxHoQ8= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.32.3/go.mod h1:JvtI6itHlTxyGew0oT7xYNbF7OA767givRMsCuBFK5k= +github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3 h1:vBcoorWl+c4r5un837H8fhLoS0Kc8SKlGBHpyq7KM9w= +github.com/aws/aws-sdk-go-v2/service/ssmsap v1.15.3/go.mod h1:Mq0FruBai8A9f7fpzjcfD+S+y0I4DkZTygb3HxuqDB4= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3 h1:Fv1vD2L65Jnp5QRsdiM64JvUM4Xe+E0JyVsRQKv6IeA= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.3/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3 h1:pBE7FzR3AUpauidRUITPlDWTQ4hHktI649xZt3e/wKM= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.27.3/go.mod h1:EyoPT+dUT5zqspxSub9KHDWOZyIP30bPgIavBvGGVz0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ= +github.com/aws/aws-sdk-go-v2/service/swf v1.25.3 h1:7zYsHA9ORjiCHYzTJf0g+gwo3mPpn2XbMlWQreiXWdM= +github.com/aws/aws-sdk-go-v2/service/swf v1.25.3/go.mod h1:FIwuqwcEguy+ToyQzMwpMAXc9Kxh5QwH3nlXMeHdHnA= +github.com/aws/aws-sdk-go-v2/service/synthetics v1.26.3 h1:JPgfM6lEqJ3O3kYLYWxYaZEL4pE4binxBWYzXxFADBE= +github.com/aws/aws-sdk-go-v2/service/synthetics v1.26.3/go.mod h1:iVEoUBC/J06ZwJujK/pa57Gm+G9OOfYxynf2O2hWtWc= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3 h1:Qbimk+9ZyMxjyunIkdvaDeA/LLbeSV0NqurwC2D/gKg= +github.com/aws/aws-sdk-go-v2/service/timestreaminfluxdb v1.2.3/go.mod h1:2AEQ9klGEJdMIg+bC1gnGGiJqKebIkhfwJyNYBYh9dg= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3 h1:GbbpHIz5tBazjVOunsf6xcgruWFvj1DT+jUNyKDwK2s= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.27.3/go.mod h1:sXSJhu0vub083lif2S+g7fPocwVuqu9D9Bp1FEIYqOE= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.39.3 h1:vgXMSzoRvWgptv2xmpsF7kWUiwr/e+RrBxLVIAH3pfY= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.39.3/go.mod h1:xtCxGy771E4UOUqmxqLa/EoA73U/06wA/wvEexj9JSE= +github.com/aws/aws-sdk-go-v2/service/transfer v1.50.3 h1:CpeH+cboQS9A0ar387V6dxVxs6UYUXO1N4rtRU2244c= +github.com/aws/aws-sdk-go-v2/service/transfer v1.50.3/go.mod h1:plbUFzNIVQ/qYehjK2qKzZNP3Qu5vob2Jeezeeb8pMc= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.17.3 h1:RvKL61+VcqZIL9dS3BE0bQTyN1lCrDCv3cz9kdkNm6k= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.17.3/go.mod h1:AmO4nIKOKHzJCbVn467c4keHpzmZwy7s98zEsLjcJos= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.10.3 h1:sg673tzRhiA0N0iyc8EojgNnenuUQFFJmzxa/ni3VGI= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.10.3/go.mod h1:vo6OSH+QLP6X9gyZiccj6SV1eiPWgtT7V5GongrGA2s= +github.com/aws/aws-sdk-go-v2/service/waf v1.23.3 h1:D0EDHlqaPWwm05+/3SaJye4HwXs6TWPJe2wINrbc+Dw= +github.com/aws/aws-sdk-go-v2/service/waf v1.23.3/go.mod h1:M0olbEl0NTVF9337MxfjJz4iUl6za1Zka5ZFSZvJ+AU= +github.com/aws/aws-sdk-go-v2/service/wafregional v1.23.3 h1:7dr6En0/6KRFoz8VmnYks9dVvL+tkL5RjRrxqGzr1zI= +github.com/aws/aws-sdk-go-v2/service/wafregional v1.23.3/go.mod h1:24TtlRsv4LKAE3VnRJQhpatr8cpX0yj8NSzg8/lxOCw= +github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4 h1:1khBA5uryBRJoCb4G2iR5RT06BkfPEjjDCHAiRb8P3Q= +github.com/aws/aws-sdk-go-v2/service/wafv2 v1.51.4/go.mod h1:QpFImaPGKNwa+MiZ+oo6LbV1PVQBapc0CnrAMRScoxM= +github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.32.3 h1:BjPTq4qiR/Ywu3yf3DeGepCj5RB1c4rtEUmE62bmkus= +github.com/aws/aws-sdk-go-v2/service/wellarchitected v1.32.3/go.mod h1:jeL9apgA3x3fwH3ZkaDPIfYcXZUlmCXNrU4o+6oY4oM= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.44.2 h1:xqlHduaOQOIstwjydeUA3MyQOsX78Xz+0xbkc/Lwi18= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.44.2/go.mod h1:YRGgDr23EJC+32pPpWnoVB2p4JP3u5xASobpmoOlhEo= +github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.21.3 h1:fZVJVU+fgDbHDZpHv447C43ZM9E9QHbj7reT6tB19FA= +github.com/aws/aws-sdk-go-v2/service/workspacesweb v1.21.3/go.mod h1:CWln0RlRf0Cc4Csr4HkyXI6BkkIujyTeWuwTo3hijP0= +github.com/aws/aws-sdk-go-v2/service/xray v1.27.3 h1:0jSgvovW7R95P8XJiGxYfrnxdryQyClvebJeYbUlecw= +github.com/aws/aws-sdk-go-v2/service/xray v1.27.3/go.mod h1:yKewwhgsy9idJZ7oJLrFleYmy2oq/JSLQWdHNgLUYMM= +github.com/aws/smithy-go v1.20.3 h1:ryHwveWzPV5BIof6fyDvor6V3iUL7nTfiTKXHiW05nE= +github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E= github.com/beevik/etree v1.4.0 h1:oz1UedHRepuY3p4N5OjE0nK1WLCqtzHf25bxplKOHLs= github.com/beevik/etree v1.4.0/go.mod h1:cyWiXwGoasx60gHvtnEh5x8+uIjUVnjWqBvEnhnqKDA= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= @@ -438,8 +480,8 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/gdavison/terraform-plugin-log v0.0.0-20230928191232-6c653d8ef8fb h1:HM67IMNxlkqGxAM5ymxMg2ANCcbL4oEr5cy+tGZ6fNo= @@ -475,10 +517,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0 h1:l16/Vrl0+x+HjHJWEjcKPwHYoxN9EC78gAFXKlH6m84= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.23.0/go.mod h1:HAmscHyzSOfB1Dr16KLc177KNbn83wscnZC+N7WyaM8= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.53 h1:jgOMbQlypMpUMaqYJotjT7ERSMvQP00Mppgjgh8lNt8= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.53/go.mod h1:nvpXIeF0ANfZ7sMssXKSSR3pyXfksajxoC2tl4jjN08= -github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.54 h1:raRbM2Wynqv0Nyhe7AwVnFgb2roGSvpSUeQKxEg8Lts= -github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.54/go.mod h1:Q5SSO00VVkkbiPtT6ssI9twHV7yfh4gPLOtoLQJMbzw= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.54 h1:O37FpbmkDSmSPgukMJLAzJzo5WBSFQx0iwn4PlY6BKI= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.54/go.mod h1:TJ+Mz49cn0zKURLX5haphWDbmGWz15OsEiLp1CcXDwY= +github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.55 h1:7izXD15MCmPcWbKJ5qAwcSlnWvTwkioIJkq0+OJIJG0= +github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.55/go.mod h1:I4WCc/OKwraiUqRXb4p+/sVMyMld2EXTSacu5RShbqI= github.com/hashicorp/awspolicyequivalence v1.6.0 h1:7aadmkalbc5ewStC6g3rljx1iNvP4QyAhg2KsHx8bU8= github.com/hashicorp/awspolicyequivalence v1.6.0/go.mod h1:9IOaIHx+a7C0NfUNk1A93M7kHd5rJ19aoUx37LZGC14= github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= @@ -505,34 +547,34 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0= -github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA= -github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= -github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= +github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14= +github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= -github.com/hashicorp/terraform-plugin-framework v1.9.0 h1:caLcDoxiRucNi2hk8+j3kJwkKfvHznubyFsJMWfZqKU= -github.com/hashicorp/terraform-plugin-framework v1.9.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-framework v1.10.0 h1:xXhICE2Fns1RYZxEQebwkB2+kXouLC932Li9qelozrc= +github.com/hashicorp/terraform-plugin-framework v1.10.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0 h1:b8vZYB/SkXJT4YPbT3trzE6oJ7dPyMy68+9dEDKsJjE= github.com/hashicorp/terraform-plugin-framework-jsontypes v0.1.0/go.mod h1:tP9BC3icoXBz72evMS5UTFvi98CiKhPdXF6yLs1wS8A= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-timetypes v0.4.0 h1:XLI93Oqw2/KTzYjgCXrUnm8LBkGAiHC/mDQg5g5Vob4= github.com/hashicorp/terraform-plugin-framework-timetypes v0.4.0/go.mod h1:mGuieb3bqKFYwEYB4lCMt302Z3siyv4PFYk/41wAUps= -github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= -github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0 h1:bxZfGo9DIUoLLtHMElsu+zwqI4IsMZQBRRy4iLzZJ8E= +github.com/hashicorp/terraform-plugin-framework-validators v0.13.0/go.mod h1:wGeI02gEhj9nPANU62F2jCaHjXulejm/X+af4PdZaNo= github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= github.com/hashicorp/terraform-plugin-mux v0.16.0 h1:RCzXHGDYwUwwqfYYWJKBFaS3fQsWn/ZECEiW7p2023I= github.com/hashicorp/terraform-plugin-mux v0.16.0/go.mod h1:PF79mAsPc8CpusXPfEVa4X8PtkB+ngWoiUClMrNZlYo= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0 h1:kJiWGx2kiQVo97Y5IOGR4EMcZ8DtMswHhUuFibsCQQE= github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0/go.mod h1:sl/UoabMc37HA6ICVMmGO+/0wofkVIRxf+BMb/dnoIg= -github.com/hashicorp/terraform-plugin-testing v1.8.0 h1:wdYIgwDk4iO933gC4S8KbKdnMQShu6BXuZQPScmHvpk= -github.com/hashicorp/terraform-plugin-testing v1.8.0/go.mod h1:o2kOgf18ADUaZGhtOl0YCkfIxg01MAiMATT2EtIHlZk= +github.com/hashicorp/terraform-plugin-testing v1.9.0 h1:xOsQRqqlHKXpFq6etTxih3ubdK3HVDtfE1IY7Rpd37o= +github.com/hashicorp/terraform-plugin-testing v1.9.0/go.mod h1:fhhVx/8+XNJZTD5o3b4stfZ6+q7z9+lIWigIYdT6/44= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -638,33 +680,33 @@ github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0 h1:FGMfzzxfkNkw+gvKJOeT8dSmBjgrSFh+ClLl+OMKPno= -go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.51.0/go.mod h1:hmHUXiKhyxbIhuNfG5ZTySq9HqqxJFNxaFOfXXvoMmQ= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo= +github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0 h1:kAytSRJYoIy4eJtDOfSGf9LOCD4QdXFN37YJs0+bYrw= +go.opentelemetry.io/contrib/instrumentation/github.com/aws/aws-sdk-go-v2/otelaws v0.52.0/go.mod h1:l6VnFEqDdeMSMfwULTDDY9ewlnlVLhmvBainVT+h/Zs= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -683,13 +725,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -701,8 +743,8 @@ golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= diff --git a/website/allowed-subcategories.txt b/website/allowed-subcategories.txt index 1198ceb4469..25874df602c 100644 --- a/website/allowed-subcategories.txt +++ b/website/allowed-subcategories.txt @@ -5,8 +5,6 @@ AMP (Managed Prometheus) API Gateway API Gateway V2 Account Management -Agents for Amazon Bedrock -Amazon Bedrock Amazon Q Business Amplify App Mesh @@ -18,6 +16,7 @@ AppIntegrations AppStream 2.0 AppSync Application Auto Scaling +Application Signals Athena Audit Manager Auto Scaling @@ -25,6 +24,8 @@ Auto Scaling Plans BCM Data Exports Backup Batch +Bedrock +Bedrock Agents CE (Cost Explorer) Chatbot Chime @@ -45,6 +46,7 @@ CloudWatch Application Insights CloudWatch Evidently CloudWatch Internet Monitor CloudWatch Logs +CloudWatch Network Monitor CloudWatch Observability Access Manager CloudWatch RUM CloudWatch Synthetics @@ -119,6 +121,7 @@ FinSpace GameLift Global Accelerator Glue +Glue DataBrew Ground Station GuardDuty HealthLake diff --git a/website/docs/cdktf/python/d/apigatewayv2_api.html.markdown b/website/docs/cdktf/python/d/apigatewayv2_api.html.markdown index 9ab45547016..49eef17892e 100644 --- a/website/docs/cdktf/python/d/apigatewayv2_api.html.markdown +++ b/website/docs/cdktf/python/d/apigatewayv2_api.html.markdown @@ -36,7 +36,7 @@ class MyConvertedCode(TerraformStack): The arguments of this data source act as filters for querying the available APIs in the current region. The given filters must match exactly one API whose data will be exported as attributes. -This argument supports the following arguments: +This data source supports the following arguments: * `api_id` - (Required) API identifier. @@ -70,4 +70,4 @@ The `cors_configuration` object supports the following: * `expose_headers` - Set of exposed HTTP headers. * `max_age` - Number of seconds that the browser should cache preflight request results. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/appstream_image.html.markdown b/website/docs/cdktf/python/d/appstream_image.html.markdown new file mode 100644 index 00000000000..a6034a8ddd5 --- /dev/null +++ b/website/docs/cdktf/python/d/appstream_image.html.markdown @@ -0,0 +1,88 @@ +--- +subcategory: "AppStream 2.0" +layout: "aws" +page_title: "AWS: aws_appstream_image" +description: |- + Terraform data source for describing an AWS AppStream 2.0 Appstream Image. +--- + + + +# Data Source: aws_appstream_image + +Terraform data source for managing an AWS AppStream 2.0 Image. + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_appstream_image import DataAwsAppstreamImage +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsAppstreamImage(self, "test", + most_recent=True, + name="AppStream-WinServer2019-06-17-2024", + type="PUBLIC" + ) +``` + +## Argument Reference + +The following arguments are optional: + +* `name` - Name of the image being searched for. Cannot be used with name_regex or arn. +* `name_regex` - Regular expression name of the image being searched for. Cannot be used with arn or name. +* `arn` - Arn of the image being searched for. Cannot be used with name_regex or name. +* `type` - The type of image which must be (PUBLIC, PRIVATE, or SHARED). +* `most_recent` - Boolean that if it is set to true and there are multiple images returned the most recent will be returned. If it is set to false and there are multiple images return the datasource will error. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `application` - A application object that contains the following: + * `app_block_arn` - The app block ARN of the application. + * `created_time` - The time at which the application was created within the app block. + * `description` - The description of the application. + * `display_name` - The application name to display. + * `enabled` - Bool based on if the application is enabled. + * `icon_s3_location` - A list named icon_s3_location that contains the following: + * `s3_bucket` - S3 bucket of the S3 object. + * `s3_key` - S3 key of the S3 object. + * `icon_url` - URL of the application icon. This URL may be time-limited. + * `instance_families` - List of the instance families of the application. + * `launch_parameters` - Arguments that are passed to the application at it's launch. + * `launch_path` - Path to the application's excecutable in the instance. + * `metadata` - String to string map that contains additional attributes used to describe the application. + * `Name` - Name of the application. + * `platforms` - Array of strings describing the platforms on which the application can run. + Values will be from: WINDOWS | WINDOWS_SERVER_2016 | WINDOWS_SERVER_2019 | WINDOWS_SERVER_2022 | AMAZON_LINUX2 + * `working_directory` - Working directory for the application. +* `appstream_agent_version` - Version of the AppStream 2.0 agent to use for instances that are launched from this image. Has a maximum length of 100 characters. +* `arn` - ARN of the image. +* `base_image_arn` - ARN of the image from which the image was created. +* `created_time` - Time at which this image was created. +* `description` - Description of image. +* `display_name` - Image name to display. +* `image_builder_name` - The name of the image builder that was used to created the private image. If the image is sharedthen the value is null. +* `image_builder_supported` - Boolean to indicate whether an image builder can be launched from this image. +* `image error` - Resource error object that describes the error containing the following: + * `error_code` - Error code of the image. Values will be from: IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION | IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION | IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION | NETWORK_INTERFACE_LIMIT_EXCEEDED | INTERNAL_SERVICE_ERROR | IAM_SERVICE_ROLE_IS_MISSING | MACHINE_ROLE_IS_MISSING | STS_DISABLED_IN_REGION | SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES | IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION | SUBNET_NOT_FOUND | IMAGE_NOT_FOUND | INVALID_SUBNET_CONFIGURATION | SECURITY_GROUPS_NOT_FOUND | IGW_NOT_ATTACHED | IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION | FLEET_STOPPED | FLEET_INSTANCE_PROVISIONING_FAILURE | DOMAIN_JOIN_ERROR_FILE_NOT_FOUND | DOMAIN_JOIN_ERROR_ACCESS_DENIED | DOMAIN_JOIN_ERROR_LOGON_FAILURE | DOMAIN_JOIN_ERROR_INVALID_PARAMETER | DOMAIN_JOIN_ERROR_MORE_DATA | DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN | DOMAIN_JOIN_ERROR_NOT_SUPPORTED | DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME | DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED | DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED | DOMAIN_JOIN_NERR_PASSWORD_EXPIRED | DOMAIN_JOIN_INTERNAL_SERVICE_ERROR as the values. + * `error_message` - Error message of the image. + * `error_timestamp` - Time when the error occurred. +* `image_permissions` - List of strings describing the image permissions containing the following: + * `allow_fleet` - Boolean indicating if the image can be used for a fleet. + * `allow_image_builder` - indicated whether the image can be used for an image builder. +* `platform` - Operating system platform of the image. Values will be from: WINDOWS | WINDOWS_SERVER_2016 | WINDOWS_SERVER_2019 | WINDOWS_SERVER_2022 | AMAZON_LINUX2 +* `public_image_released_date` - Release date of base image if public. For private images, it is the release date of the base image that it was created from. +* `state` - Current state of image. Image starts in PENDING state which changes to AVAILABLE if creation passes and FAILED if it fails. Values will be from: PENDING | AVAILABLE | FAILED | COPYING | DELETING | CREATING | IMPORTING. +* `visibility` - Visibility type enum indicating whether the image is PUBLIC, PRIVATE, or SHARED. Valid values include: PUBLIC | PRIVATE | SHARED. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/autoscaling_group.html.markdown b/website/docs/cdktf/python/d/autoscaling_group.html.markdown index bf50dc5ae75..3fee0972ece 100644 --- a/website/docs/cdktf/python/d/autoscaling_group.html.markdown +++ b/website/docs/cdktf/python/d/autoscaling_group.html.markdown @@ -93,6 +93,7 @@ interpolation. * `instance_generations` - List of instance generation names. * `local_storage` - Indicates whether instance types with instance store volumes are included, excluded, or required. * `local_storage_types` - List of local storage type names. + * `max_spot_price_as_percentage_of_optimal_on_demand_price` - Price protection threshold for Spot Instances. * `memory_gib_per_vcpu` - List of objects describing the minimum and maximum amount of memory (GiB) per vCPU. * `min` - Minimum. * `max` - Maximum. @@ -144,4 +145,4 @@ interpolation. * `pool_state` - Instance state to transition to after the lifecycle actions are complete. * `warm_pool_size` - Current size of the warm pool. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/backup_plan.html.markdown b/website/docs/cdktf/python/d/backup_plan.html.markdown index 20249978101..41592b3448a 100644 --- a/website/docs/cdktf/python/d/backup_plan.html.markdown +++ b/website/docs/cdktf/python/d/backup_plan.html.markdown @@ -43,7 +43,8 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the backup plan. * `name` - Display name of a backup plan. +* `rule` - Rules of a backup plan. * `tags` - Metadata that you can assign to help organize the plans you create. * `version` - Unique, randomly generated, Unicode, UTF-8 encoded string that serves as the version ID of the backup plan. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_custom_model.html.markdown b/website/docs/cdktf/python/d/bedrock_custom_model.html.markdown index 886763ae22f..f1ef668f084 100644 --- a/website/docs/cdktf/python/d/bedrock_custom_model.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_custom_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_model" description: |- @@ -61,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `validation_metrics` - The loss metric for each validator that you provided. * `validation_loss` - The validation loss associated with the validator. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_custom_models.html.markdown b/website/docs/cdktf/python/d/bedrock_custom_models.html.markdown index 77b71969f25..b731fcdd7cb 100644 --- a/website/docs/cdktf/python/d/bedrock_custom_models.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_custom_models.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_models" description: |- @@ -42,4 +42,4 @@ This data source exports the following attributes in addition to the arguments a * `model_arn` - The ARN of the custom model. * `model_name` - The name of the custom model. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_foundation_model.html.markdown b/website/docs/cdktf/python/d/bedrock_foundation_model.html.markdown index 6ede9811d5a..41b02babe27 100644 --- a/website/docs/cdktf/python/d/bedrock_foundation_model.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_foundation_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_foundation_model" description: |- @@ -57,4 +57,4 @@ This data source exports the following attributes in addition to the arguments a * `provider_name` - Model provider name. * `response_streaming_supported` - Indicates whether the model supports streaming. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/bedrock_foundation_models.html.markdown b/website/docs/cdktf/python/d/bedrock_foundation_models.html.markdown index cf74ad8b058..a8f2b75daa9 100644 --- a/website/docs/cdktf/python/d/bedrock_foundation_models.html.markdown +++ b/website/docs/cdktf/python/d/bedrock_foundation_models.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_foundation_models" description: |- @@ -78,4 +78,4 @@ This data source exports the following attributes in addition to the arguments a * `provider_name` - Model provider name. * `response_streaming_supported` - Indicates whether the model supports streaming. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cloudfront_origin_access_control.html.markdown b/website/docs/cdktf/python/d/cloudfront_origin_access_control.html.markdown new file mode 100644 index 00000000000..6e5b7e8f121 --- /dev/null +++ b/website/docs/cdktf/python/d/cloudfront_origin_access_control.html.markdown @@ -0,0 +1,51 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_origin_access_control" +description: |- + Use this data source to retrieve information for an Amazon CloudFront origin access control config. +--- + + + +# Data Source: aws_cloudfront_origin_access_control + +Use this data source to retrieve information for an Amazon CloudFront origin access control config. + +## Example Usage + +The below example retrieves a CloudFront origin access control config. + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_cloudfront_origin_access_identity import DataAwsCloudfrontOriginAccessIdentity +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsCloudfrontOriginAccessIdentity(self, "example", + id="E2T5VTFBZJ3BJB" + ) +``` + +## Argument Reference + +* `id` (Required) - The identifier for the origin access control settings. For example: `E2T5VTFBZJ3BJB`. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `description` - A description of the origin access control. +* `etag` - Current version of the origin access control's information. For example: `E2QWRUHAPOMQZL`. +* `name` - A name to identify the origin access control. +* `origin_access_control_origin_type` - The type of origin that this origin access control is for. +* `signing_behavior` - Specifies which requests CloudFront signs. +* `signing_protocol` - The signing protocol of the origin access control, which determines how CloudFront signs (authenticates) requests. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/cognito_user_pool.html.markdown b/website/docs/cdktf/python/d/cognito_user_pool.html.markdown new file mode 100644 index 00000000000..033cdebedb4 --- /dev/null +++ b/website/docs/cdktf/python/d/cognito_user_pool.html.markdown @@ -0,0 +1,133 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_user_pool" +description: |- + Terraform data source for managing an AWS Cognito User Pool. +--- + + + +# Data Source: aws_cognito_user_pool + +Terraform data source for managing an AWS Cognito User Pool. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsCognitoUserPool +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsCognitoUserPool(self, "example", + user_pool_id="us-west-2_aaaaaaaaa" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `user_pool_id` - (Required) The cognito pool ID + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the User Pool. +* [account_recovery_setting](#account-recover-setting) - The available verified method a user can use to recover their password when they call ForgotPassword. You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email. +* [admin_create_user_config](#admin-create-user-config) - The configuration for AdminCreateUser requests. +* `auto_verified_attributes` - The attributes that are auto-verified in a user pool. +* `creation_date` - The date and time, in ISO 8601 format, when the item was created. +* `custom_domain` - A custom domain name that you provide to Amazon Cognito. This parameter applies only if you use a custom domain to host the sign-up and sign-in pages for your application. An example of a custom domain name might be auth.example.com. +* `deletion_protection` - When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. +* [device_configuration](#device-configuration) - The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool. +* `domain` - The domain prefix, if the user pool has a domain associated with it. +* [email_configuration](#email-configuration) - The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool. +* `estimated_number_of_users` - A number estimating the size of the user pool. +* [lambda_config](#lambda-config) - The AWS Lambda triggers associated with the user pool. +* `last_modified_date` - The date and time, in ISO 8601 format, when the item was modified. +* `mfa_configuration` - Can be one of the following values: `OFF` | `ON` | `OPTIONAL` +* `name` - The name of the user pool. +* [schema_attributes](#schema-attributes) - A list of the user attributes and their properties in your user pool. The attribute schema contains standard attributes, custom attributes with a custom: prefix, and developer attributes with a dev: prefix. For more information, see User pool attributes. +* `sms_authentication_message` - The contents of the SMS authentication message. +* `sms_configuration_failure` - The reason why the SMS configuration can't send the messages to your users. +* `sms_verification_message` - The contents of the SMS authentication message. +* `user_pool_tags` - The tags that are assigned to the user pool. A tag is a label that you can apply to user pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria. +* `username_attributes` - Specifies whether a user can use an email address or phone number as a username when they sign up. + +### account recover setting + +* [recovery_mechanism](#recovery-mechanism) - Details about an individual recovery mechanism. + +### recovery mechanism + +* `name` - Name of the recovery mechanism (e.g., email, phone number). +* `priority` - Priority of this mechanism in the recovery process (lower numbers are higher priority). + +### admin create user config + +* `allow_admin_create_user_only` - Whether only admins can create users. +* `unused_account_validity_days` - Number of days an unconfirmed user account remains valid. +* [invite_message_template](#invite-message-template) - Templates for invitation messages. + +### invite message template + +* `email_message` - Email message content. +* `email_subject` - Email message subject. +* `sms_message` - SMS message content. + +### device configuration + +* `challenge_required_on_new_device` - Whether a challenge is required on new devices. +* `device_only_remembered_on_user_prompt` - Whether devices are only remembered if the user prompts it. + +### email configuration + +* `configuration_set` - Configuration set used for sending emails. +* `email_sending_account` - Email sending account. +* `from` - Email sender address. +* `reply_to_email_address` - Reply-to email address. +* `source_arn` - Source Amazon Resource Name (ARN) for emails. + +### lambda config + +* [custom_email_sender](#lambda-function) - Configuration for a custom email sender Lambda function. +* [custom_sms_sender](#lambda-function) - Configuration for a custom SMS sender Lambda function +* [pre_token_generation_config](#lambda-function) - Configuration for a Lambda function that executes before token generation. + +### lambda function + +* `lambda_arn` - ARN of the Lambda function. +* `lambda_version` - Version of the Lambda function. + +### schema attributes + +* `attribute_data_type` - Data type of the attribute (e.g., string, number). +* `developer_only_attribute` - Whether the attribute is for developer use only. +* `mutable` - Whether the attribute can be changed after user creation. +* `name` - Name of the attribute. +* `required` - Whether the attribute is required during user registration. +* [number_attribute_constraints](#number-attribute-constraints) - Constraints for numeric attributes. +* [string_attribute_constraints](#string-attribute-constraints) - Constraints for string attributes. + +### number attribute constraints + +* `max_value` - Maximum allowed value. +* `min_value` - Minimum allowed value. + +### string attribute constraints + +* `max_length` - Maximum allowed length. +* `min_length` - Minimum allowed length. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_contact_flow.html.markdown b/website/docs/cdktf/python/d/connect_contact_flow.html.markdown index 1ca5af79b36..0ff15de5ab4 100644 --- a/website/docs/cdktf/python/d/connect_contact_flow.html.markdown +++ b/website/docs/cdktf/python/d/connect_contact_flow.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `contact_flow_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `contact_flow_id` - (Optional) Returns information on a specific Contact Flow by contact flow id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance @@ -74,4 +74,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Tags to assign to the Contact Flow. * `type` - Type of Contact Flow. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_contact_flow_module.html.markdown b/website/docs/cdktf/python/d/connect_contact_flow_module.html.markdown index a33366b9fc2..98129c89244 100644 --- a/website/docs/cdktf/python/d/connect_contact_flow_module.html.markdown +++ b/website/docs/cdktf/python/d/connect_contact_flow_module.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `contact_flow_module_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `contact_flow_module_id` - (Optional) Returns information on a specific Contact Flow Module by contact flow module id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance @@ -75,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - Type of Contact Flow Module Module. Values are either `ACTIVE` or `ARCHIVED`. * `status` - Status of the Contact Flow Module Module. Values are either `PUBLISHED` or `SAVED`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_hours_of_operation.html.markdown b/website/docs/cdktf/python/d/connect_hours_of_operation.html.markdown index ebe67466937..44fc6e3ce0b 100644 --- a/website/docs/cdktf/python/d/connect_hours_of_operation.html.markdown +++ b/website/docs/cdktf/python/d/connect_hours_of_operation.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `hours_of_operation_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `hours_of_operation_id` - (Optional) Returns information on a specific Hours of Operation by hours of operation id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance @@ -93,4 +93,4 @@ A `start_time` block supports the following arguments: * `hours` - Hour of opening. * `minutes` - Minute of opening. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_instance.html.markdown b/website/docs/cdktf/python/d/connect_instance.html.markdown index 9f1c1741475..90ca3d02f3b 100644 --- a/website/docs/cdktf/python/d/connect_instance.html.markdown +++ b/website/docs/cdktf/python/d/connect_instance.html.markdown @@ -56,7 +56,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** One of either `instance_id` or `instance_alias` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instance_id` - (Optional) Returns information on a specific connect instance by id @@ -80,4 +80,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - State of the instance. * `service_role` - Service role of the instance. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_queue.html.markdown b/website/docs/cdktf/python/d/connect_queue.html.markdown index 22b22247ca1..089a50cfb84 100644 --- a/website/docs/cdktf/python/d/connect_queue.html.markdown +++ b/website/docs/cdktf/python/d/connect_queue.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `queue_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `queue_id` - (Optional) Returns information on a specific Queue by Queue id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance @@ -84,4 +84,4 @@ A `outbound_caller_config` block supports the following arguments: * `outbound_caller_id_number_id` - Specifies the caller ID number. * `outbound_flow_id` - Outbound whisper flow to be used during an outbound call. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_quick_connect.html.markdown b/website/docs/cdktf/python/d/connect_quick_connect.html.markdown index 29a6670ce54..68face663a3 100644 --- a/website/docs/cdktf/python/d/connect_quick_connect.html.markdown +++ b/website/docs/cdktf/python/d/connect_quick_connect.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `quick_connect_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `quick_connect_id` - (Optional) Returns information on a specific Quick Connect by Quick Connect id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance @@ -96,4 +96,4 @@ A `user_config` block contains the following arguments: * `contact_flow_id` - Identifier of the contact flow. * `user_id` - Identifier for the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_routing_profile.html.markdown b/website/docs/cdktf/python/d/connect_routing_profile.html.markdown index 666aec49b57..77e883fe998 100644 --- a/website/docs/cdktf/python/d/connect_routing_profile.html.markdown +++ b/website/docs/cdktf/python/d/connect_routing_profile.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `routing_profile_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instance_id` - Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Routing Profile by name @@ -90,4 +90,4 @@ A `queue_configs` block supports the following attributes: * `queue_id` - Identifier for the queue. * `queue_name` - Name for the queue. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_security_profile.html.markdown b/website/docs/cdktf/python/d/connect_security_profile.html.markdown index bc9f9f23494..cf2f3d231d6 100644 --- a/website/docs/cdktf/python/d/connect_security_profile.html.markdown +++ b/website/docs/cdktf/python/d/connect_security_profile.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `security_profile_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `security_profile_id` - (Optional) Returns information on a specific Security Profile by Security Profile id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance @@ -75,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a * `permissions` - List of permissions assigned to the security profile. * `tags` - Map of tags to assign to the Security Profile. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_user.html.markdown b/website/docs/cdktf/python/d/connect_user.html.markdown index 2750c80434f..a01f4f529fd 100644 --- a/website/docs/cdktf/python/d/connect_user.html.markdown +++ b/website/docs/cdktf/python/d/connect_user.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `user_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific User by name @@ -96,4 +96,4 @@ A `phone_config` block supports the following attributes: * `desk_phone_number` - The phone number for the user's desk phone. * `phone_type` - The phone type. Valid values are `DESK_PHONE` and `SOFT_PHONE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_user_hierarchy_group.html.markdown b/website/docs/cdktf/python/d/connect_user_hierarchy_group.html.markdown index 7ace9c2a50c..96ba1ebe17e 100644 --- a/website/docs/cdktf/python/d/connect_user_hierarchy_group.html.markdown +++ b/website/docs/cdktf/python/d/connect_user_hierarchy_group.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `hierarchy_group_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `hierarchy_group_id` - (Optional) Returns information on a specific hierarchy group by hierarchy group id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance @@ -88,4 +88,4 @@ A level block supports the following attributes: * `id` - The identifier of the hierarchy group. * `name` - Name of the hierarchy group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/connect_vocabulary.html.markdown b/website/docs/cdktf/python/d/connect_vocabulary.html.markdown index ad3f0d06d79..b2fa71d4229 100644 --- a/website/docs/cdktf/python/d/connect_vocabulary.html.markdown +++ b/website/docs/cdktf/python/d/connect_vocabulary.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** `instance_id` and one of either `name` or `vocabulary_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Vocabulary by name @@ -79,4 +79,4 @@ separated by a colon (`:`). * `tags` - A map of tags to assign to the Vocabulary. * `vocabulary_id` - The identifier of the custom vocabulary. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/db_snapshot.html.markdown b/website/docs/cdktf/python/d/db_snapshot.html.markdown index 1b441eebf5c..de4aa73d276 100644 --- a/website/docs/cdktf/python/d/db_snapshot.html.markdown +++ b/website/docs/cdktf/python/d/db_snapshot.html.markdown @@ -60,7 +60,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** One of either `db_instance_identifier` or `db_snapshot_identifier` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `most_recent` - (Optional) If more than one result is returned, use the most recent Snapshot. @@ -100,4 +100,4 @@ This data source exports the following attributes in addition to the arguments a * `snapshot_create_time` - Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). Changes for the copy when the snapshot is copied. * `original_snapshot_create_time` - Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). Doesn't change when the snapshot is copied. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/default_tags.html.markdown b/website/docs/cdktf/python/d/default_tags.html.markdown index febf0bc8c22..bc5a3c07fa3 100644 --- a/website/docs/cdktf/python/d/default_tags.html.markdown +++ b/website/docs/cdktf/python/d/default_tags.html.markdown @@ -86,11 +86,6 @@ This data source has no arguments. This data source exports the following attributes in addition to the arguments above: -* `tags` - Blocks of default tags set on the provider. See details below. +* `tags` - Key-value mapping of provider default tags. -### tags - -* `key` - Key name of the tag (i.e., `tags.#.key`). -* `value` - Value of the tag (i.e., `tags.#.value`). - - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_capacity_block_offering.html.markdown b/website/docs/cdktf/python/d/ec2_capacity_block_offering.html.markdown new file mode 100644 index 00000000000..76ee87a757c --- /dev/null +++ b/website/docs/cdktf/python/d/ec2_capacity_block_offering.html.markdown @@ -0,0 +1,59 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_ec2_capacity_block_offering" +description: |- + Information about a single EC2 Capacity Block Offering. +--- + + + +# Data Source: aws_ec2_capacity_block_offering + +Information about a single EC2 Capacity Block Offering. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_ec2_capacity_block_offering import DataAwsEc2CapacityBlockOffering +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsEc2CapacityBlockOffering(self, "example", + capacity_duration_hours=24, + end_date_range="2024-05-30T15:04:05Z", + instance_count=1, + instance_platform="Linux/UNIX", + instance_type="p4d.24xlarge", + start_date_range="2024-04-28T15:04:05Z" + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `capacity_duration_hours` - (Required) The amount of time of the Capacity Block reservation in hours. +* `end_date_range` - (Optional) The date and time at which the Capacity Block Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) +* `instance_count` - (Required) The number of instances for which to reserve capacity. +* `instance_type` - (Required) The instance type for which to reserve capacity. +* `start_date_range` - (Optional) The date and time at which the Capacity Block Reservation starts. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `availability_zone` - The Availability Zone in which to create the Capacity Reservation. +* `currency_code` - The currency of the payment for the Capacity Block. +* `capacity_block_offering_id` - The Capacity Block Reservation ID. +* `upfront_fee` - The total price to be paid up front. +* `tenancy` - Indicates the tenancy of the Capacity Reservation. Specify either `default` or `dedicated`. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachments.html.markdown b/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachments.html.markdown new file mode 100644 index 00000000000..e53968e6bd6 --- /dev/null +++ b/website/docs/cdktf/python/d/ec2_transit_gateway_peering_attachments.html.markdown @@ -0,0 +1,94 @@ +--- +subcategory: "Transit Gateway" +layout: "aws" +page_title: "AWS: aws_ec2_transit_gateway_peering_attachments" +description: |- + Get information on EC2 Transit Gateway Peering Attachments +--- + + + +# Data Source: aws_ec2_transit_gateway_peering_attachments + +Get information on EC2 Transit Gateway Peering Attachments. + +## Example Usage + +### All Resources + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsEc2TransitGatewayPeeringAttachments +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsEc2TransitGatewayPeeringAttachments(self, "test") +``` + +### By Filter + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformCount, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import DataAwsEc2TransitGatewayPeeringAttachments +from imports.aws.data_aws_ec2_transit_gateway_peering_attachment import DataAwsEc2TransitGatewayPeeringAttachment +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + filtered = DataAwsEc2TransitGatewayPeeringAttachments(self, "filtered", + filter=[{ + "name": "state", + "values": ["pendingAcceptance"] + } + ] + ) + # In most cases loops should be handled in the programming language context and + # not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + # you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + # you need to keep this like it is. + unit_count = TerraformCount.of( + Token.as_number(Fn.length_of(filtered.ids))) + DataAwsEc2TransitGatewayPeeringAttachment(self, "unit", + id=Token.as_string(Fn.lookup_nested(filtered.ids, [unit_count.index])), + count=unit_count + ) +``` + +## Argument Reference + +This data source supports the following arguments: + +* `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. + +### filter Argument Reference + +* `name` - (Required) Name of the field to filter by, as defined by [the underlying AWS API][1] +* `values` - (Required) List of one or more values for the filter. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `ids` A list of all attachments ids matching the filter. You can retrieve more information about the attachment using the [aws_ec2_transit_gateway_peering_attachment][2] data source, searching by identifier. + +[1]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayPeeringAttachments.html +[2]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_transit_gateway_peering_attachment + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `read` - (Default `20m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/ecr_lifecycle_policy_document.html.markdown b/website/docs/cdktf/python/d/ecr_lifecycle_policy_document.html.markdown index 491edb33761..19f031ffbe8 100644 --- a/website/docs/cdktf/python/d/ecr_lifecycle_policy_document.html.markdown +++ b/website/docs/cdktf/python/d/ecr_lifecycle_policy_document.html.markdown @@ -60,14 +60,14 @@ Each document configuration may have one or more `rule` blocks, which each accep * `action` (Optional) - Specifies the action type. * `type` (Required) - The supported value is `expire`. * `description` (Optional) - Describes the purpose of a rule within a lifecycle policy. -* `priority` (Required) - Sets the order in which rules are evaluated, lowest to highest. When you add rules to a lifecycle policy, you must give them each a unique value for `priority`. Values do not need to be sequential across rules in a policy. A rule with a `tag_status` value of any must have the highest value for `priority` and be evaluated last. +* `priority` (Required) - Sets the order in which rules are evaluated, lowest to highest. When you add rules to a lifecycle policy, you must give them each a unique value for `priority`. Values do not need to be sequential across rules in a policy. A rule with a `tag_status` value of "any" must have the highest value for `priority` and be evaluated last. * `selection` (Required) - Collects parameters describing the selection criteria for the ECR lifecycle policy: - * `tag_status` (Required) - Determines whether the lifecycle policy rule that you are adding specifies a tag for an image. Acceptable options are tagged, untagged, or any. If you specify any, then all images have the rule applied to them. If you specify tagged, then you must also specify a `tag_prefix_list` value. If you specify untagged, then you must omit `tag_prefix_list`. - * `tag_pattern_list` (Required if `tag_status` is set to tagged and `tag_prefix_list` isn't specified) - You must specify a comma-separated list of image tag patterns that may contain wildcards (*) on which to take action with your lifecycle policy. For example, if your images are tagged as prod, prod1, prod2, and so on, you would use the tag pattern list prod* to specify all of them. If you specify multiple tags, only the images with all specified tags are selected. There is a maximum limit of four wildcards (*) per string. For example, ["*test*1*2*3", "test*1*2*3*"] is valid but ["test*1*2*3*4*5*6"] is invalid. - * `tag_prefix_list` (Required if `tag_status` is set to tagged and `tag_pattern_list` isn't specified) - You must specify a comma-separated list of image tag prefixes on which to take action with your lifecycle policy. For example, if your images are tagged as prod, prod1, prod2, and so on, you would use the tag prefix prod to specify all of them. If you specify multiple tags, only images with all specified tags are selected. - * `count_type` (Required) - Specify a count type to apply to the images. If `count_type` is set to imageCountMoreThan, you also specify `count_number` to create a rule that sets a limit on the number of images that exist in your repository. If `count_type` is set to sinceImagePushed, you also specify `count_unit` and `count_number` to specify a time limit on the images that exist in your repository. - * `count_unit` (Required if `count_type` is set to sinceImagePushed) - Specify a count unit of days to indicate that as the unit of time, in addition to `count_number`, which is the number of days. - * `count_number` (Required) - Specify a count number. If the `count_type` used is imageCountMoreThan, then the value is the maximum number of images that you want to retain in your repository. If the `count_type` used is sinceImagePushed, then the value is the maximum age limit for your images. + * `tag_status` (Required) - Determines whether the lifecycle policy rule that you are adding specifies a tag for an image. Acceptable options are "tagged", "untagged", or "any". If you specify "any", then all images have the rule applied to them. If you specify "tagged", then you must also specify a `tag_prefix_list` value. If you specify "untagged", then you must omit `tag_prefix_list`. + * `tag_pattern_list` (Required if `tag_status` is set to "tagged" and `tag_prefix_list` isn't specified) - You must specify a comma-separated list of image tag patterns that may contain wildcards (\*) on which to take action with your lifecycle policy. For example, if your images are tagged as `prod`, `prod1`, `prod2`, and so on, you would use the tag pattern list `["prod\*"]` to specify all of them. If you specify multiple tags, only the images with all specified tags are selected. There is a maximum limit of four wildcards (\*) per string. For example, `["*test*1*2*3", "test*1*2*3*"]` is valid but `["test*1*2*3*4*5*6"]` is invalid. + * `tag_prefix_list` (Required if `tag_status` is set to "tagged" and `tag_pattern_list` isn't specified) - You must specify a comma-separated list of image tag prefixes on which to take action with your lifecycle policy. For example, if your images are tagged as `prod`, `prod1`, `prod2`, and so on, you would use the tag prefix "prod" to specify all of them. If you specify multiple tags, only images with all specified tags are selected. + * `count_type` (Required) - Specify a count type to apply to the images. If `count_type` is set to "imageCountMoreThan", you also specify `count_number` to create a rule that sets a limit on the number of images that exist in your repository. If `count_type` is set to "sinceImagePushed", you also specify `count_unit` and `count_number` to specify a time limit on the images that exist in your repository. + * `count_unit` (Required if `count_type` is set to "sinceImagePushed") - Specify a count unit of days to indicate that as the unit of time, in addition to `count_number`, which is the number of days. + * `count_number` (Required) - Specify a count number. If the `count_type` used is "imageCountMoreThan", then the value is the maximum number of images that you want to retain in your repository. If the `count_type` used is "sinceImagePushed", then the value is the maximum age limit for your images. ## Attribute Reference @@ -75,4 +75,4 @@ This data source exports the following attributes in addition to the arguments a * `json` - The above arguments serialized as a standard JSON policy document. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/fsx_ontap_file_system.html.markdown b/website/docs/cdktf/python/d/fsx_ontap_file_system.html.markdown index f7924c3404f..20c9f3f2100 100644 --- a/website/docs/cdktf/python/d/fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/python/d/fsx_ontap_file_system.html.markdown @@ -48,7 +48,9 @@ In addition to all arguments above, the following attributes are exported: * `daily_automatic_backup_start_time` - The preferred time (in `HH:MM` format) to take daily automatic backups, in the UTC time zone. * `deployment_type` - The file system deployment type. * `disk_iops_configuration` - The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system, specifying the number of provisioned IOPS and the provision mode. See [Disk IOPS](#disk-iops) Below. -* `dns_name` - DNS name for the file system (e.g. `fs-12345678.corp.example.com`). +* `dns_name` - DNS name for the file system. + + **Note:** This attribute does not apply to FSx for ONTAP file systems and is consequently not set. You can access your FSx for ONTAP file system and volumes via a [Storage Virtual Machine (SVM)](fsx_ontap_storage_virtual_machine.html) using its DNS name or IP address. * `endpoint_ip_address_range` - (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system exist. * `endpoints` - The Management and Intercluster FileSystemEndpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. See [FileSystemEndpoints](#file-system-endpoints) below. * `ha_pairs` - The number of HA pairs for the file system. @@ -82,4 +84,4 @@ In addition to all arguments above, the following attributes are exported: * `DNSName` - The file system's DNS name. You can mount your file system using its DNS name. * `IpAddresses` - IP addresses of the file system endpoint. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/glue_catalog_table.html.markdown b/website/docs/cdktf/python/d/glue_catalog_table.html.markdown index 4e0bb7ed2e8..40ee2b0dfd2 100644 --- a/website/docs/cdktf/python/d/glue_catalog_table.html.markdown +++ b/website/docs/cdktf/python/d/glue_catalog_table.html.markdown @@ -73,6 +73,7 @@ This data source exports the following attributes in addition to the arguments a ### storage_descriptor +* `additional_locations` - List of locations that point to the path where a Delta table is located * `bucket_columns` - List of reducer grouping columns, clustering columns, and bucketing columns in the table. * `columns` - Configuration block for columns in the table. See [`columns`](#columns) below. * `compressed` - Whether the data in the table is compressed. @@ -130,4 +131,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the target table. * `region` - Region of the target table. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/lakeformation_data_lake_settings.html.markdown b/website/docs/cdktf/python/d/lakeformation_data_lake_settings.html.markdown index 77b546a508b..1809e447d92 100644 --- a/website/docs/cdktf/python/d/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/cdktf/python/d/lakeformation_data_lake_settings.html.markdown @@ -49,6 +49,7 @@ This data source exports the following attributes in addition to the arguments a * `allow_external_data_filtering` - Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `external_data_filtering_allow_list` - A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `authorized_session_tag_value_list` - Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. +* `allow_full_table_external_data_access` - Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. ### create_database_default_permissions @@ -60,4 +61,4 @@ This data source exports the following attributes in addition to the arguments a * `permissions` - List of permissions granted to the principal. * `principal` - Principal who is granted permissions. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/launch_configuration.html.markdown b/website/docs/cdktf/python/d/launch_configuration.html.markdown index 362c54c2fb9..b0cf10e5408 100644 --- a/website/docs/cdktf/python/d/launch_configuration.html.markdown +++ b/website/docs/cdktf/python/d/launch_configuration.html.markdown @@ -54,6 +54,7 @@ This data source exports the following attributes in addition to the arguments a * `http_put_response_hop_limit` - The desired HTTP PUT response hop limit for instance metadata requests. * `security_groups` - List of associated Security Group IDS. * `associate_public_ip_address` - Whether a Public IP address is associated with the instance. +* `primary_ipv6` - Whether the first IPv6 GUA will be made the primary IPv6 address. * `user_data` - User Data of the instance. * `enable_monitoring` - Whether Detailed Monitoring is Enabled. * `ebs_optimized` - Whether the launched EC2 instance will be EBS-optimized. @@ -89,4 +90,4 @@ This data source exports the following attributes in addition to the arguments a * `device_name` - Name of the device. * `virtual_name` - Virtual Name of the device. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mskconnect_connector.html.markdown b/website/docs/cdktf/python/d/mskconnect_connector.html.markdown index c383319b834..464e3b6b013 100644 --- a/website/docs/cdktf/python/d/mskconnect_connector.html.markdown +++ b/website/docs/cdktf/python/d/mskconnect_connector.html.markdown @@ -43,6 +43,7 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the connector. * `description` - Summary description of the connector. +* `tags` - A map of tags assigned to the resource. * `version` - Current version of the connector. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mskconnect_custom_plugin.html.markdown b/website/docs/cdktf/python/d/mskconnect_custom_plugin.html.markdown index 9e9744e8197..740cce09f87 100644 --- a/website/docs/cdktf/python/d/mskconnect_custom_plugin.html.markdown +++ b/website/docs/cdktf/python/d/mskconnect_custom_plugin.html.markdown @@ -45,5 +45,6 @@ This data source exports the following attributes in addition to the arguments a * `description` - a summary description of the custom plugin. * `latest_revision` - an ID of the latest successfully created revision of the custom plugin. * `state` - the state of the custom plugin. +* `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/mskconnect_worker_configuration.html.markdown b/website/docs/cdktf/python/d/mskconnect_worker_configuration.html.markdown index e3f66a5f653..aae35379238 100644 --- a/website/docs/cdktf/python/d/mskconnect_worker_configuration.html.markdown +++ b/website/docs/cdktf/python/d/mskconnect_worker_configuration.html.markdown @@ -45,5 +45,6 @@ This data source exports the following attributes in addition to the arguments a * `description` - a summary description of the worker configuration. * `latest_revision` - an ID of the latest successfully created revision of the worker configuration. * `properties_file_content` - contents of connect-distributed.properties file. +* `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/networkmanager_core_network_policy_document.html.markdown b/website/docs/cdktf/python/d/networkmanager_core_network_policy_document.html.markdown index 83dacd527d6..67a26668295 100644 --- a/website/docs/cdktf/python/d/networkmanager_core_network_policy_document.html.markdown +++ b/website/docs/cdktf/python/d/networkmanager_core_network_policy_document.html.markdown @@ -182,6 +182,7 @@ The following arguments are available: * `core_network_configuration` (Required) - The core network configuration section defines the Regions where a core network should operate. For AWS Regions that are defined in the policy, the core network creates a Core Network Edge where you can connect attachments. After it's created, each Core Network Edge is peered with every other defined Region and is configured with consistent segment and routing across all Regions. Regions cannot be removed until the associated attachments are deleted. Detailed below. * `segments` (Required) - Block argument that defines the different segments in the network. Here you can provide descriptions, change defaults, and provide explicit Regional operational and route filters. The names defined for each segment are used in the `segment_actions` and `attachment_policies` section. Each segment is created, and operates, as a completely separated routing domain. By default, attachments can only communicate with other attachments in the same segment. Detailed below. * `segment_actions` (Optional) - A block argument, `segment_actions` define how routing works between segments. By default, attachments can only communicate with other attachments in the same segment. Detailed below. +* `network_function_groups` (Optional) - Block argument that defines the service insertion actions you want to include. Detailed below. ### `attachment_policies` @@ -192,15 +193,17 @@ The following arguments are available: * `conditions` (Required) - A block argument. Detailed Below. * `description` (Optional) - A user-defined description that further helps identify the rule. * `rule_number` (Required) - An integer from `1` to `65535` indicating the rule's order number. Rules are processed in order from the lowest numbered rule to the highest. Rules stop processing when a rule is matched. It's important to make sure that you number your rules in the exact order that you want them processed. +* `add_to_network_function_group` (Optional) - The name of the network function group to attach to the attachment policy. ### `action` The following arguments are available: -* `association_method` (Required) - Defines how a segment is mapped. Values can be `constant` or `tag`. `constant` statically defines the segment to associate the attachment to. `tag` uses the value of a tag to dynamically try to map to a segment.reference_policies_elements_condition_operators.html) to evaluate. +* `association_method` (Optional) - Defines how a segment is mapped. Values can be `constant` or `tag`. `constant` statically defines the segment to associate the attachment to. `tag` uses the value of a tag to dynamically try to map to a segment.reference_policies_elements_condition_operators.html) to evaluate. * `segment` (Optional) - Name of the `segment` to share as defined in the `segments` section. This is used only when the `association_method` is `constant`. * `tag_value_of_key` (Optional) - Maps the attachment to the value of a known key. This is used with the `association_method` is `tag`. For example a `tag` of `stage = “test”`, will map to a segment named `test`. The value must exactly match the name of a segment. This allows you to have many segments, but use only a single rule without having to define multiple nearly identical conditions. This prevents creating many similar conditions that all use the same keys to map to segments. * `require_acceptance` (Optional) - Determines if this mapping should override the segment value for `require_attachment_acceptance`. You can only set this to `true`, indicating that this setting applies only to segments that have `require_attachment_acceptance` set to `false`. If the segment already has the default `require_attachment_acceptance`, you can set this to inherit segment’s acceptance value. +* `add_to_network_function_group` (Optional) - The name of the network function group to attach to the attachment policy. ### `conditions` @@ -244,20 +247,33 @@ The following arguments are available: ### `segment_actions` -`segment_actions` have differnet outcomes based on their `action` argument value. There are 2 valid values for `action`: `create-route` & `share`. Behaviors of the below arguments changed depending on the `action` you specify. For more details on their use see the [AWS documentation](https://docs.aws.amazon.com/vpc/latest/cloudwan/cloudwan-policies-json.html#cloudwan-segment-actions-json). +`segment_actions` have different outcomes based on their `action` argument value. Behaviors of the below arguments changed depending on the `action` you specify. For more details on their use see the [AWS documentation](https://docs.aws.amazon.com/vpc/latest/cloudwan/cloudwan-policies-json.html#cloudwan-segment-actions-json). ~> **NOTE:** `share_with` and `share_with_except` break from the AWS API specification. The API has 1 argument `share-with` and it can accept 3 input types as valid (`"*"`, `[""]`, or `{ except: [""]}`). To emulate this behavior, `share_with` is always a list that can accept the argument `["*"]` as valid for `"*"` and `share_with_except` is a that can accept `[""]` as valid for `{ except: [""]}`. You may only specify one of: `share_with` or `share_with_except`. The following arguments are available: -* `action` (Required) - Action to take for the chosen segment. Valid values `create-route` or `share`. +* `action` (Required) - Action to take for the chosen segment. Valid values: `create-route`, `share`, `send-via` and `send-to`. * `description` (Optional) - A user-defined string describing the segment action. * `destination_cidr_blocks` (Optional) - List of strings containing CIDRs. You can define the IPv4 and IPv6 CIDR notation for each AWS Region. For example, `10.1.0.0/16` or `2001:db8::/56`. This is an array of CIDR notation strings. * `destinations` (Optional) - A list of strings. Valid values include `["blackhole"]` or a list of attachment ids. -* `mode` (Optional) - String. This mode places the attachment and return routes in each of the `share_with` segments. Valid values include: `attachment-route`. +* `mode` (Optional) - String. When `action` is `share`, a `mode` value of `attachment-route` places the attachment and return routes in each of the `share_with` segments. When `action` is `send-via`, indicates the mode used for packets. Valid values: `attachment-route`, `single-hop`, `dual-hop`. * `segment` (Optional) - Name of the segment. * `share_with` (Optional) - A list of strings to share with. Must be a substring is all segments. Valid values include: `["*"]` or `[""]`. * `share_with_except` (Optional) - A set subtraction of segments to not share with. +* `when_sent_to` (Optional) - The destination segments for the `send-via` or `send-to` `action`. + * `segments` (Optional) - A list of strings. The list of segments that the `send-via` `action` uses. +* `via` (Optional) - The network function groups and any edge overrides associated with the action. + * `network_function_groups` (Optional) - A list of strings. The network function group to use for the service insertion action. + * `with_edge_override` (Optional) - Any edge overrides and the preferred edge to use. + * `edge_sets` (Optional) - A list of strings. The list of edges associated with the network function group. + * `use_edge` (Optional) - The preferred edge to use. + +### `network_function_groups` + +* `name` (Required) - This identifies the network function group container. +* `description` (Optional) - Optional description of the network function group. +* `require_attachment_acceptance` (Required) - This will be either `true`, that attachment acceptance is required, or `false`, that it is not required. ## Attribute Reference @@ -265,4 +281,4 @@ This data source exports the following attributes in addition to the arguments a * `json` - Standard JSON policy document rendered based on the arguments above. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/oam_link.html.markdown b/website/docs/cdktf/python/d/oam_link.html.markdown index b722810db51..d1674ca1b53 100644 --- a/website/docs/cdktf/python/d/oam_link.html.markdown +++ b/website/docs/cdktf/python/d/oam_link.html.markdown @@ -44,10 +44,31 @@ The following arguments are required: This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the link. +* `id` - ARN of the link. * `label` - Label that is assigned to this link. * `label_template` - Human-readable name used to identify this source account when you are viewing data from it in the monitoring account. +* `link_configuration` - Configuration for creating filters that specify that only some metric namespaces or log groups are to be shared from the source account to the monitoring account. See [`link_configuration` Block](#link_configuration-block) for details. * `link_id` - ID string that AWS generated as part of the link ARN. * `resource_types` - Types of data that the source account shares with the monitoring account. * `sink_arn` - ARN of the sink that is used for this link. - \ No newline at end of file +### `link_configuration` Block + +The `link_configuration` configuration block supports the following arguments: + +* `log_group_configuration` - Configuration for filtering which log groups are to send log events from the source account to the monitoring account. See [`log_group_configuration` Block](#log_group_configuration-block) for details. +* `metric_configuration` - Configuration for filtering which metric namespaces are to be shared from the source account to the monitoring account. See [`metric_configuration` Block](#metric_configuration-block) for details. + +### `log_group_configuration` Block + +The `log_group_configuration` configuration block supports the following arguments: + +* `filter` - Filter string that specifies which log groups are to share their log events with the monitoring account. See [LogGroupConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_LogGroupConfiguration.html) for details. + +### `metric_configuration` Block + +The `metric_configuration` configuration block supports the following arguments: + +* `filter` - Filter string that specifies which metrics are to be shared with the monitoring account. See [MetricConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_MetricConfiguration.html) for details. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/oam_sink.html.markdown b/website/docs/cdktf/python/d/oam_sink.html.markdown index 3b4bdc74de3..9b86f4bc6a6 100644 --- a/website/docs/cdktf/python/d/oam_sink.html.markdown +++ b/website/docs/cdktf/python/d/oam_sink.html.markdown @@ -44,8 +44,9 @@ The following arguments are required: This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the sink. +* `id` - ARN of the sink. * `name` - Name of the sink. * `sink_id` - Random ID string that AWS generated as part of the sink ARN. * `tags` - Tags assigned to the sink. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/opensearch_domain.html.markdown b/website/docs/cdktf/python/d/opensearch_domain.html.markdown index cf6fa449ca4..5524d568d87 100644 --- a/website/docs/cdktf/python/d/opensearch_domain.html.markdown +++ b/website/docs/cdktf/python/d/opensearch_domain.html.markdown @@ -78,6 +78,7 @@ This data source exports the following attributes in addition to the arguments a * `identity_pool_id` - Cognito Identity pool used by the domain. * `role_arn` - IAM Role with the AmazonOpenSearchServiceCognitoAccess policy attached. * `created` – Status of the creation of the domain. +* `dashboard_endpoint` - Domain-specific endpoint used to access the [Dashboard application](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). * `deleted` – Status of the deletion of the domain. * `domain_id` – Unique identifier for the domain. * `ebs_options` - EBS Options for the instances in the domain. @@ -91,7 +92,7 @@ This data source exports the following attributes in addition to the arguments a * `enabled` - Whether encryption at rest is enabled in the domain. * `kms_key_id` - KMS key id used to encrypt data at rest. * `endpoint` – Domain-specific endpoint used to submit index, search, and data upload requests. -* `dashboard_endpoint` - Domain-specific endpoint used to access the [Dashboard application](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). +* `ip_address_type` - Type of IP addresses supported by the endpoint for the domain. * `kibana_endpoint` - (**Deprecated**) Domain-specific endpoint for kibana without https scheme. Use the `dashboard_endpoint` attribute instead. * `log_publishing_options` - Domain log publishing related options. * `log_type` - Type of OpenSearch log being published. @@ -117,4 +118,4 @@ This data source exports the following attributes in addition to the arguments a * `subnet_ids` - Subnets used by the domain. * `vpc_id` - VPC used by the domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/organizations_policy.html.markdown b/website/docs/cdktf/python/d/organizations_policy.html.markdown index eb40ff989f1..bf7ad9a9081 100644 --- a/website/docs/cdktf/python/d/organizations_policy.html.markdown +++ b/website/docs/cdktf/python/d/organizations_policy.html.markdown @@ -24,26 +24,25 @@ from cdktf import Fn, Token, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsOrganizationalPolicies from imports.aws.data_aws_organizations_organization import DataAwsOrganizationsOrganization from imports.aws.data_aws_organizations_policies_for_target import DataAwsOrganizationsPoliciesForTarget +from imports.aws.data_aws_organizations_policy import DataAwsOrganizationsPolicy class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - DataAwsOrganizationalPolicies(self, "test", - policy_id=Fn.lookup_nested(current.policies, ["0", "id"]) - ) - data_aws_organizations_organization_current = - DataAwsOrganizationsOrganization(self, "current") + current = DataAwsOrganizationsOrganization(self, "current") data_aws_organizations_policies_for_target_current = - DataAwsOrganizationsPoliciesForTarget(self, "current_2", + DataAwsOrganizationsPoliciesForTarget(self, "current_1", filter="SERVICE_CONTROL_POLICY", - target_id=Token.as_string( - Fn.lookup_nested(data_aws_organizations_organization_current.roots, ["0", "id" - ])) + target_id=Token.as_string(Fn.lookup_nested(current.roots, ["0", "id"])) ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. data_aws_organizations_policies_for_target_current.override_logical_id("current") + DataAwsOrganizationsPolicy(self, "test", + policy_id=Token.as_string( + Fn.lookup_nested(data_aws_organizations_policies_for_target_current.policies, ["0", "id" + ])) + ) ``` ## Argument Reference @@ -63,4 +62,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - The friendly name of the policy. * `type` - The type of policy values can be `SERVICE_CONTROL_POLICY | TAG_POLICY | BACKUP_POLICY | AISERVICES_OPT_OUT_POLICY` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/route53_zone.html.markdown b/website/docs/cdktf/python/d/route53_zone.html.markdown index bd22d0280f6..ccc9e7a2c1e 100644 --- a/website/docs/cdktf/python/d/route53_zone.html.markdown +++ b/website/docs/cdktf/python/d/route53_zone.html.markdown @@ -48,10 +48,9 @@ class MyConvertedCode(TerraformStack): The arguments of this data source act as filters for querying the available Hosted Zone. You have to use `zone_id` or `name`, not both of them. The given filter must match exactly one -Hosted Zone. If you use `name` field for private Hosted Zone, you need to add `private_zone` field to `true` +Hosted Zone. If you use `name` field for private Hosted Zone, you need to add `private_zone` field to `true`. * `zone_id` - (Optional) Hosted Zone id of the desired Hosted Zone. - * `name` - (Optional) Hosted Zone name of the desired Hosted Zone. * `private_zone` - (Optional) Used with `name` field to get a private Hosted Zone. * `vpc_id` - (Optional) Used with `name` field to get a private Hosted Zone associated with the vpc_id (in this case, private_zone is not mandatory). @@ -69,10 +68,14 @@ The following attribute is additionally exported: * `arn` - ARN of the Hosted Zone. * `caller_reference` - Caller Reference of the Hosted Zone. * `comment` - Comment field of the Hosted Zone. +* `linked_service_principal` - The service that created the Hosted Zone (e.g., `servicediscovery.amazonaws.com`). +* `linked_service_description` - The description provided by the service that created the Hosted Zone (e.g., `arn:aws:servicediscovery:us-east-1:1234567890:namespace/ns-xxxxxxxxxxxxxxxx`). +* `name` - The Hosted Zone name. * `name_servers` - List of DNS name servers for the Hosted Zone. * `primary_name_server` - The Route 53 name server that created the SOA record. +* `private_zone` - Indicates whether this is a private hosted zone. * `resource_record_set_count` - The number of Record Set in the Hosted Zone. -* `linked_service_principal` - The service that created the Hosted Zone (e.g., `servicediscovery.amazonaws.com`). -* `linked_service_description` - The description provided by the service that created the Hosted Zone (e.g., `arn:aws:servicediscovery:us-east-1:1234567890:namespace/ns-xxxxxxxxxxxxxxxx`). +* `tags` - A map of tags assigned to the Hosted Zone. +* `zone_id` - The Hosted Zone identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/service_discovery_service.html.markdown b/website/docs/cdktf/python/d/service_discovery_service.html.markdown index bc6ab1092ee..ac41836e4dd 100644 --- a/website/docs/cdktf/python/d/service_discovery_service.html.markdown +++ b/website/docs/cdktf/python/d/service_discovery_service.html.markdown @@ -46,39 +46,39 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the service. * `arn` - ARN of the service. * `description` - Description of the service. -* `dns_config` - Complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. -* `health_check_config` - Complex type that contains settings for an optional health check. Only for Public DNS namespaces. -* `health_check_custom_config` - A complex type that contains settings for ECS managed health checks. +* `dns_config` - Complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dns_config` Block](#dns_config-block) for details. +* `health_check_config` - Complex type that contains settings for an optional health check. Only for Public DNS namespaces. See [`health_check_config` Block](#health_check_config-block) for details. +* `health_check_custom_config` - A complex type that contains settings for ECS managed health checks. See [`health_check_custom_config` Block](#health_check_custom_config-block) for details. * `tags` - Map of tags to assign to the service. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `tags_all` - (**Deprecated**) Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -### dns_config +### `dns_config` Block -This argument supports the following arguments: +The `dns_config` configuration block supports the following arguments: * `namespace_id` - ID of the namespace to use for DNS configuration. -* `dns_records` - An array that contains one DnsRecord object for each resource record set. +* `dns_records` - An array that contains one DnsRecord object for each resource record set. See [`dns_records` Block](#dns_records-block) for details. * `routing_policy` - Routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED -#### dns_records +#### `dns_records` Block -This argument supports the following arguments: +The `dns_records` configuration block supports the following arguments: * `ttl` - Amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. * `type` - Type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME -### health_check_config +### `health_check_config` Block -This argument supports the following arguments: +The `health_check_config` configuration block supports the following arguments: * `failure_threshold` - Number of consecutive health checks. Maximum value of 10. * `resource_path` - Path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. * `type` - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP -### health_check_custom_config +### `health_check_custom_config` Block -This argument supports the following arguments: +The `health_check_custom_config` configuration block supports the following arguments: * `failure_threshold` - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/d/transfer_connector.html.markdown b/website/docs/cdktf/python/d/transfer_connector.html.markdown new file mode 100644 index 00000000000..37112c1100d --- /dev/null +++ b/website/docs/cdktf/python/d/transfer_connector.html.markdown @@ -0,0 +1,67 @@ +--- +subcategory: "Transfer Family" +layout: "aws" +page_title: "AWS: aws_transfer_connector" +description: |- + Terraform data source for managing an AWS Transfer Family Connector. +--- + + + +# Data Source: aws_transfer_connector + +Terraform data source for managing an AWS Transfer Family Connector. + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_transfer_connector import DataAwsTransferConnector +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DataAwsTransferConnector(self, "test", + id="c-xxxxxxxxxxxxxx" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) Unique identifier for connector + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `access_role` - ARN of the AWS Identity and Access Management role. +* `arn` - ARN of the Connector. +* `as2_config` - Structure containing the parameters for an AS2 connector object. Contains the following attributes: + * `basic_auth_secret_id` - Basic authentication for AS2 connector API. Returns a null value if not set. + * `compression` - Specifies whether AS2 file is compressed. Will be ZLIB or DISABLED + * `encryption_algorithm` - Algorithm used to encrypt file. Will be AES128_CBC or AES192_CBC or AES256_CBC or DES_EDE3_CBC or NONE. + * `local_profile_id` - Unique identifier for AS2 local profile. + * `mdn_response` - Used for outbound requests to tell if response is asynchronous or not. Will be either SYNC or NONE. + * `mdn_signing_algorithm` - Signing algorithm for MDN response. Will be SHA256 or SHA384 or SHA512 or SHA1 or NONE or DEFAULT. + * `message_subject` - Subject HTTP header attribute in outbound AS2 messages to the connector. + * `partner_profile_id` - Unique identifier used by connector for partner profile. + * `signing_algorithm` - Algorithm used for signing AS2 messages sent with the connector. +* `logging_role` - ARN of the IAM role that allows a connector to turn on CLoudwatch logging for Amazon S3 events. +* `security_policy_name` - Name of security policy. +* `service_managed_egress_ip_addresses` - List of egress Ip addresses. +* `sftp_config` - Object containing the following attributes: + * `trusted_host_keys` - List of the public portions of the host keys that are used to identify the servers the connector is connected to. + * `user_secret_id` - Identifer for the secret in AWS Secrets Manager that contains the SFTP user's private key, and/or password. +* `tags` - Object containing the following attributes: + * `key` - Name of the tag. + * `value` - Values associated with the tags key. +* `url` - URL of the partner's AS2 or SFTP endpoint. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/guides/custom-service-endpoints.html.markdown b/website/docs/cdktf/python/guides/custom-service-endpoints.html.markdown index bfaa90c7211..ac2f54ba115 100644 --- a/website/docs/cdktf/python/guides/custom-service-endpoints.html.markdown +++ b/website/docs/cdktf/python/guides/custom-service-endpoints.html.markdown @@ -108,6 +108,7 @@ class MyConvertedCode(TerraformStack):
  • appflow
  • appintegrations (or appintegrationsservice)
  • applicationinsights
  • +
  • applicationsignals
  • appmesh
  • apprunner
  • appstream
  • @@ -157,6 +158,7 @@ class MyConvertedCode(TerraformStack):
  • costoptimizationhub
  • cur (or costandusagereportservice)
  • customerprofiles
  • +
  • databrew (or gluedatabrew)
  • dataexchange
  • datapipeline
  • datasync
  • @@ -250,6 +252,7 @@ class MyConvertedCode(TerraformStack):
  • neptunegraph
  • networkfirewall
  • networkmanager
  • +
  • networkmonitor
  • oam (or cloudwatchobservabilityaccessmanager)
  • opensearch (or opensearchservice)
  • opensearchserverless
  • @@ -429,4 +432,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/index.html.markdown b/website/docs/cdktf/python/index.html.markdown index 09287fdf263..4a356cf5702 100644 --- a/website/docs/cdktf/python/index.html.markdown +++ b/website/docs/cdktf/python/index.html.markdown @@ -13,7 +13,7 @@ Use the Amazon Web Services (AWS) provider to interact with the many resources supported by AWS. You must configure the provider with the proper credentials before you can use it. -Use the navigation to the left to read about the available resources. There are currently 1373 resources and 559 data sources available in the provider. +Use the navigation to the left to read about the available resources. There are currently 1387 resources and 564 data sources available in the provider. To learn the basics of Terraform using this provider, follow the hands-on [get started tutorials](https://learn.hashicorp.com/tutorials/terraform/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). Interact with AWS services, @@ -811,4 +811,4 @@ Approaches differ per authentication providers: There used to be no better way to get account ID out of the API when using the federated account until `sts:GetCallerIdentity` was introduced. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/amplify_domain_association.html.markdown b/website/docs/cdktf/python/r/amplify_domain_association.html.markdown index 7c43742dfc7..0f8215db59c 100644 --- a/website/docs/cdktf/python/r/amplify_domain_association.html.markdown +++ b/website/docs/cdktf/python/r/amplify_domain_association.html.markdown @@ -62,11 +62,17 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: * `app_id` - (Required) Unique ID for an Amplify app. +* `certificate_settings` - (Optional) The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you. * `domain_name` - (Required) Domain name for the domain association. * `enable_auto_sub_domain` - (Optional) Enables the automated creation of subdomains for branches. * `sub_domain` - (Required) Setting for the subdomain. Documented below. * `wait_for_verification` - (Optional) If enabled, the resource will wait for the domain association status to change to `PENDING_DEPLOYMENT` or `AVAILABLE`. Setting this to `false` will skip the process. Default: `true`. +The `certificate_settings` configuration block supports the following arguments: + +* `type` - (Required) The certificate type. Valid values are `AMPLIFY_MANAGED` and `CUSTOM`. +* `custom_certificate_arn` - (Optional) The Amazon resource name (ARN) for the custom certificate. + The `sub_domain` configuration block supports the following arguments: * `branch_name` - (Required) Branch name setting for the subdomain. @@ -109,4 +115,4 @@ Using `terraform import`, import Amplify domain association using `app_id` and ` % terraform import aws_amplify_domain_association.app d2ypk4k47z8u6/example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/api_gateway_integration.html.markdown b/website/docs/cdktf/python/r/api_gateway_integration.html.markdown index 08e0479d722..d4ed130ff53 100644 --- a/website/docs/cdktf/python/r/api_gateway_integration.html.markdown +++ b/website/docs/cdktf/python/r/api_gateway_integration.html.markdown @@ -250,7 +250,7 @@ This resource supports the following arguments: * `cache_key_parameters` - (Optional) List of cache key parameters for the integration. * `cache_namespace` - (Optional) Integration's cache namespace. * `content_handling` - (Optional) How to handle request payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT`. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. -* `timeout_milliseconds` - (Optional) Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds. +* `timeout_milliseconds` - (Optional) Custom timeout between 50 and 300,000 milliseconds. The default value is 29,000 milliseconds. You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase time beyond 29,000 milliseconds. * `tls_config` - (Optional) TLS configuration. See below. ### tls_config Configuration Block @@ -288,4 +288,4 @@ Using `terraform import`, import `aws_api_gateway_integration` using `REST-API-I % terraform import aws_api_gateway_integration.example 12345abcde/67890fghij/GET ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_app_authorization.html.markdown b/website/docs/cdktf/python/r/appfabric_app_authorization.html.markdown new file mode 100644 index 00000000000..955824d530c --- /dev/null +++ b/website/docs/cdktf/python/r/appfabric_app_authorization.html.markdown @@ -0,0 +1,96 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_app_authorization" +description: |- + Terraform resource for managing an AWS AppFabric App Authorization. +--- + + + +# Resource: aws_appfabric_app_authorization + +Terraform resource for managing an AWS AppFabric App Authorization. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appfabric_app_authorization import AppfabricAppAuthorization +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppfabricAppAuthorization(self, "example", + app="TERRAFORMCLOUD", + app_bundle_arn=arn, + auth_type="apiKey", + credential=[AppfabricAppAuthorizationCredential( + api_key_credential=[AppfabricAppAuthorizationCredentialApiKeyCredential( + api_key="exampleapikeytoken" + ) + ] + ) + ], + tenant=[AppfabricAppAuthorizationTenant( + tenant_display_name="example", + tenant_identifier="example" + ) + ] + ) +``` + +## Argument Reference + +The following arguments are required: + +* `app` - (Required) The name of the application for valid values see https://docs.aws.amazon.com/appfabric/latest/api/API_CreateAppAuthorization.html. +* `app_bundle_arn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. +* `auth_type` - (Required) The authorization type for the app authorization valid values are oauth2 and apiKey. +* `credential` - (Required) Contains credentials for the application, such as an API key or OAuth2 client ID and secret. +Specify credentials that match the authorization type for your request. For example, if the authorization type for your request is OAuth2 (oauth2), then you should provide only the OAuth2 credentials. +* `tenant` - (Required) Contains information about an application tenant, such as the application display name and identifier. + +Credential support the following: + +* `api_key_credential` - (Optional) Contains API key credential information. +* `oauth2_credential` - (Optional) Contains OAuth2 client credential information. + +API Key Credential support the following: + +* `api_key` - (Required) Contains API key credential information. + +oauth2 Credential support the following: + +* `client_id` - (Required) The client ID of the client application. +* `client_secret` - (Required) The client secret of the client application. + +Tenant support the following: + +* `tenant_display_name` - (Required) The display name of the tenant. +* `tenant_identifier` - (Required) The ID of the application tenant. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the App Authorization. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `auth_url` - The application URL for the OAuth flow. +* `persona` - The user persona of the app authorization. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_app_authorization_connection.html.markdown b/website/docs/cdktf/python/r/appfabric_app_authorization_connection.html.markdown new file mode 100644 index 00000000000..88030dc7ce1 --- /dev/null +++ b/website/docs/cdktf/python/r/appfabric_app_authorization_connection.html.markdown @@ -0,0 +1,63 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_app_authorization_connection" +description: |- + Terraform resource for managing an AWS AppFabric App Authorization Connection. +--- + + + +# Resource: aws_appfabric_app_authorization_connection + +Terraform resource for managing an AWS AppFabric App Authorization Connection. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appfabric_app_authorization_connection import AppfabricAppAuthorizationConnection +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppfabricAppAuthorizationConnection(self, "example", + app_authorization_arn=test.arn, + app_bundle_arn=arn + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `app_bundle_arn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. +* `app_authorization_arn` - (Required) The Amazon Resource Name (ARN) or Universal Unique Identifier (UUID) of the app authorization to use for the request. +* `auth_request` - (Optional) Contains OAuth2 authorization information.This is required if the app authorization for the request is configured with an OAuth2 (oauth2) authorization type. + +Auth Request support the following: + +* `code` - (Required) The authorization code returned by the application after permission is granted in the application OAuth page (after clicking on the AuthURL).. +* `redirect_uri` - (Optional) The redirect URL that is specified in the AuthURL and the application client. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `app` - The name of the application. +* `tenant` - Contains information about an application tenant, such as the application display name and identifier. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_app_bundle.html.markdown b/website/docs/cdktf/python/r/appfabric_app_bundle.html.markdown new file mode 100644 index 00000000000..a40a5787978 --- /dev/null +++ b/website/docs/cdktf/python/r/appfabric_app_bundle.html.markdown @@ -0,0 +1,78 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_app_bundle" +description: |- + Terraform resource for managing an AWS AppFabric AppBundle. +--- + + + +# Resource: aws_appfabric_app_bundle + +Terraform resource for managing an AWS AppFabric AppBundle. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appfabric_app_bundle import AppfabricAppBundle +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppfabricAppBundle(self, "example", + customer_managed_key_arn=Token.as_string(awms_kms_key_example.arn), + tags={ + "Environment": "test" + } + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `customer_managed_key_arn` - (Optional) The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) key to use to encrypt the application data. If this is not specified, an AWS owned key is used for encryption. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the AppBundle. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFabric AppBundle using the `arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appfabric_app_bundle import AppfabricAppBundle +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppfabricAppBundle.generate_config_for_import(self, "example", "arn:aws:appfabric:[region]:[account]:appbundle/ee5587b4-5765-4288-a202-xxxxxxxxxx") +``` + +Using `terraform import`, import AppFabric AppBundle using the `arn`. For example: + +```console +% terraform import aws_appfabric_app_bundle.example arn:aws:appfabric:[region]:[account]:appbundle/ee5587b4-5765-4288-a202-xxxxxxxxxx +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_ingestion.html.markdown b/website/docs/cdktf/python/r/appfabric_ingestion.html.markdown new file mode 100644 index 00000000000..208c4e4addd --- /dev/null +++ b/website/docs/cdktf/python/r/appfabric_ingestion.html.markdown @@ -0,0 +1,85 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_ingestion" +description: |- + Terraform resource for managing an AWS AppFabric Ingestion. +--- + + + +# Resource: aws_appfabric_ingestion + +Terraform resource for managing an AWS AppFabric Ingestion. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appfabric_ingestion import AppfabricIngestion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppfabricIngestion(self, "example", + app="OKTA", + app_bundle_arn=Token.as_string(aws_appfabric_app_bundle_example.arn), + ingestion_type="auditLog", + tags={ + "Environment": "test" + }, + tenant_id="example.okta.com" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `app` - (Required) Name of the application. +Refer to the AWS Documentation for the [list of valid values](https://docs.aws.amazon.com/appfabric/latest/api/API_CreateIngestion.html#appfabric-CreateIngestion-request-app) +* `app_bundle_arn` - (Required) Amazon Resource Name (ARN) of the app bundle to use for the request. +* `ingestion_type` - (Required) Ingestion type. Valid values are `auditLog`. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tenant_id` - (Required) ID of the application tenant. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Ingestion. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFabric Ingestion using the `app_bundle_identifier` and `arn` separated by `,`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appfabric_ingestion import AppfabricIngestion +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppfabricIngestion.generate_config_for_import(self, "example", "arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx,arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx/ingestion/32251416-710b-4425-96ca-xxxxxxxxxx") +``` + +Using `terraform import`, import AppFabric Ingestion using the `app_bundle_identifier` and `arn` separated by `,`. For example: + +```console +% terraform import aws_appfabric_ingestion.example arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx,arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx/ingestion/32251416-710b-4425-96ca-xxxxxxxxxx +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appfabric_ingestion_destination.html.markdown b/website/docs/cdktf/python/r/appfabric_ingestion_destination.html.markdown new file mode 100644 index 00000000000..499c466d7c2 --- /dev/null +++ b/website/docs/cdktf/python/r/appfabric_ingestion_destination.html.markdown @@ -0,0 +1,114 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_ingestion_destination" +description: |- + Terraform resource for managing an AWS AppFabric Ingestion Destination. +--- + + + +# Resource: aws_appfabric_ingestion_destination + +Terraform resource for managing an AWS AppFabric Ingestion Destination. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.appfabric_ingestion_destination import AppfabricIngestionDestination +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + AppfabricIngestionDestination(self, "example", + app_bundle_arn=Token.as_string(aws_appfabric_app_bundle_example.arn), + destination_configuration=[AppfabricIngestionDestinationDestinationConfiguration( + audit_log=[AppfabricIngestionDestinationDestinationConfigurationAuditLog( + destination=[AppfabricIngestionDestinationDestinationConfigurationAuditLogDestination( + s3_bucket=[AppfabricIngestionDestinationDestinationConfigurationAuditLogDestinationS3Bucket( + bucket_name=Token.as_string(aws_s3_bucket_example.bucket) + ) + ] + ) + ] + ) + ] + ) + ], + ingestion_arn=Token.as_string(aws_appfabric_ingestion_example.arn), + processing_configuration=[AppfabricIngestionDestinationProcessingConfiguration( + audit_log=[AppfabricIngestionDestinationProcessingConfigurationAuditLog( + format="json", + schema="raw" + ) + ] + ) + ] + ) +``` + +## Argument Reference + +The following arguments are required: + +* `app_bundle_arn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. +* `ingestion_arn` - (Required) The Amazon Resource Name (ARN) of the ingestion to use for the request. +* `destination_configuration` - (Required) Contains information about the destination of ingested data. +* `processing_configuration` - (Required) Contains information about how ingested data is processed. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +Destination Configuration support the following: + +* `audit_log` - (Required) Contains information about an audit log destination configuration. + +Audit Log Destination Configuration support the following: + +* `destination` - (Required) Contains information about an audit log destination. Only one destination (Firehose Stream) or (S3 Bucket) can be specified. + +Destination support the following: + +* `firehose_stream` - (Optional) Contains information about an Amazon Data Firehose delivery stream. +* `s3_bucket` - (Optional) Contains information about an Amazon S3 bucket. + +Firehose Stream support the following: + +* `streamName` - (Required) The name of the Amazon Data Firehose delivery stream. + +S3 Bucket support the following: + +* `bucketName` - (Required) The name of the Amazon S3 bucket. +* `prefix` - (Optional) The object key to use. + +Processing Configuration support the following: + +* `audit_log` - (Required) Contains information about an audit log processing configuration. + +Audit Log Processing Configuration support the following: + +* `format` - (Required) The format in which the audit logs need to be formatted. Valid values: `json`, `parquet`. +* `schema` - (Required) The event schema in which the audit logs need to be formatted. Valid values: `ocsf`, `raw`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Ingestion Destination. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appstream_fleet.html.markdown b/website/docs/cdktf/python/r/appstream_fleet.html.markdown index d59f6d4c801..acd4d296eb2 100644 --- a/website/docs/cdktf/python/r/appstream_fleet.html.markdown +++ b/website/docs/cdktf/python/r/appstream_fleet.html.markdown @@ -65,7 +65,7 @@ The following arguments are optional: * `enable_default_internet_access` - (Optional) Enables or disables default internet access for the fleet. * `fleet_type` - (Optional) Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON` * `iam_role_arn` - (Optional) ARN of the IAM role to apply to the fleet. -* `idle_disconnect_timeout_in_seconds` - (Optional) Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins. Defaults to 60 seconds. +* `idle_disconnect_timeout_in_seconds` - (Optional) Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins. Defaults to `0`. Valid value is between `60` and `3600 `seconds. * `image_name` - (Optional) Name of the image used to create the fleet. * `image_arn` - (Optional) ARN of the public, private, or shared image to use. * `stream_view` - (Optional) AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays. If not specified, defaults to `APP`. @@ -132,4 +132,4 @@ Using `terraform import`, import `aws_appstream_fleet` using the id. For example % terraform import aws_appstream_fleet.example fleetNameExample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_datasource.html.markdown b/website/docs/cdktf/python/r/appsync_datasource.html.markdown index c83ad1f3431..883eb4b6da4 100644 --- a/website/docs/cdktf/python/r/appsync_datasource.html.markdown +++ b/website/docs/cdktf/python/r/appsync_datasource.html.markdown @@ -104,88 +104,90 @@ This resource supports the following arguments: * `name` - (Required) User-supplied name for the data source. * `type` - (Required) Type of the Data Source. Valid values: `AWS_LAMBDA`, `AMAZON_DYNAMODB`, `AMAZON_ELASTICSEARCH`, `HTTP`, `NONE`, `RELATIONAL_DATABASE`, `AMAZON_EVENTBRIDGE`, `AMAZON_OPENSEARCH_SERVICE`. * `description` - (Optional) Description of the data source. -* `dynamodb_config` - (Optional) DynamoDB settings. See [DynamoDB Config](#dynamodb-config) -* `elasticsearch_config` - (Optional) Amazon Elasticsearch settings. See [ElasticSearch Config](#elasticsearch-config) -* `event_bridge_config` - (Optional) AWS EventBridge settings. See [Event Bridge Config](#event-bridge-config) -* `http_config` - (Optional) HTTP settings. See [HTTP Config](#http-config) -* `lambda_config` - (Optional) AWS Lambda settings. See [Lambda Config](#lambda-config) -* `opensearchservice_config` - (Optional) Amazon OpenSearch Service settings. See [OpenSearch Service Config](#opensearch-service-config) -* `relational_database_config` (Optional) AWS RDS settings. See [Relational Database Config](#relational-database-config) +* `dynamodb_config` - (Optional) DynamoDB settings. See [`dynamodb_config` Block](#dynamodb_config-block) for details. +* `elasticsearch_config` - (Optional) Amazon Elasticsearch settings. See [`elasticsearch_config` Block](#elasticsearch_config-block) for details. +* `event_bridge_config` - (Optional) AWS EventBridge settings. See [`event_bridge_config` Block](#event_bridge_config-block) for details. +* `http_config` - (Optional) HTTP settings. See [`http_config` Block](#http_config-block) for details. +* `lambda_config` - (Optional) AWS Lambda settings. See [`lambda_config` Block](#lambda_config-block) for details. +* `opensearchservice_config` - (Optional) Amazon OpenSearch Service settings. See [`opensearchservice_config` Block](#opensearchservice_config-block) for details. +* `relational_database_config` (Optional) AWS RDS settings. See [`relational_database_config` Block](#relational_database_config-block) for details. * `service_role_arn` - (Optional) IAM service role ARN for the data source. Required if `type` is specified as `AWS_LAMBDA`, `AMAZON_DYNAMODB`, `AMAZON_ELASTICSEARCH`, `AMAZON_EVENTBRIDGE`, or `AMAZON_OPENSEARCH_SERVICE`. -### DynamoDB Config +### `dynamodb_config` Block -This argument supports the following arguments: +The `dynamodb_config` configuration block supports the following arguments: * `table_name` - (Required) Name of the DynamoDB table. * `region` - (Optional) AWS region of the DynamoDB table. Defaults to current region. * `use_caller_credentials` - (Optional) Set to `true` to use Amazon Cognito credentials with this data source. -* `delta_sync_config` - (Optional) The DeltaSyncConfig for a versioned data source. See [Delta Sync Config](#delta-sync-config) +* `delta_sync_config` - (Optional) The DeltaSyncConfig for a versioned data source. See [`delta_sync_config` Block](#delta_sync_config-block) for details. * `versioned` - (Optional) Detects Conflict Detection and Resolution with this data source. -### Delta Sync Config +### `delta_sync_config` Block + +The `delta_sync_config` configuration block supports the following arguments: * `base_table_ttl` - (Optional) The number of minutes that an Item is stored in the data source. * `delta_sync_table_name` - (Required) The table name. * `delta_sync_table_ttl` - (Optional) The number of minutes that a Delta Sync log entry is stored in the Delta Sync table. -### ElasticSearch Config +### `elasticsearch_config` Block -This argument supports the following arguments: +The `elasticsearch_config` configuration block supports the following arguments: * `endpoint` - (Required) HTTP endpoint of the Elasticsearch domain. * `region` - (Optional) AWS region of Elasticsearch domain. Defaults to current region. -### Event Bridge Config +### `event_bridge_config` Block -This argument supports the following arguments: +The `event_bridge_config` configuration block supports the following arguments: * `event_bus_arn` - (Required) ARN for the EventBridge bus. -### HTTP Config +### `http_config` Block -This argument supports the following arguments: +The `http_config` configuration block supports the following arguments: * `endpoint` - (Required) HTTP URL. -* `authorization_config` - (Optional) Authorization configuration in case the HTTP endpoint requires authorization. See [Authorization Config](#authorization-config). +* `authorization_config` - (Optional) Authorization configuration in case the HTTP endpoint requires authorization. See [`authorization_config` Block](#authorization_config-block) for details. -#### Authorization Config +### `authorization_config` Block -This argument supports the following arguments: +The `authorization_config` configuration block supports the following arguments: * `authorization_type` - (Optional) Authorization type that the HTTP endpoint requires. Default values is `AWS_IAM`. -* `aws_iam_config` - (Optional) Identity and Access Management (IAM) settings. See [AWS IAM Config](#aws-iam-config). +* `aws_iam_config` - (Optional) Identity and Access Management (IAM) settings. See [`aws_iam_config` Block](#aws_iam_config-block) for details. -##### AWS IAM Config +### `aws_iam_config` Block -This argument supports the following arguments: +The `aws_iam_config` configuration block supports the following arguments: * `signing_region` - (Optional) Signing Amazon Web Services Region for IAM authorization. * `signing_service_name`- (Optional) Signing service name for IAM authorization. -### Lambda Config +### `lambda_config` Block -This argument supports the following arguments: +The `lambda_config` configuration block supports the following arguments: * `function_arn` - (Required) ARN for the Lambda function. -### OpenSearch Service Config +### `opensearchservice_config` Block -This argument supports the following arguments: +The `opensearchservice_config` configuration block supports the following arguments: * `endpoint` - (Required) HTTP endpoint of the OpenSearch domain. * `region` - (Optional) AWS region of the OpenSearch domain. Defaults to current region. -### Relational Database Config +### `relational_database_config` Block -This argument supports the following arguments: +The `relational_database_config` configuration block supports the following arguments: -* `http_endpoint_config` - (Required) Amazon RDS HTTP endpoint configuration. See [HTTP Endpoint Config](#http-endpoint-config). +* `http_endpoint_config` - (Required) Amazon RDS HTTP endpoint configuration. See [`http_endpoint_config` Block](#http_endpoint_config-block) for details. * `source_type` - (Optional) Source type for the relational database. Valid values: `RDS_HTTP_ENDPOINT`. -#### HTTP Endpoint Config +### `http_endpoint_config` Block -This argument supports the following arguments: +The `http_endpoint_config` configuration block supports the following arguments: * `db_cluster_identifier` - (Required) Amazon RDS cluster identifier. * `aws_secret_store_arn` - (Required) AWS secret store ARN for database credentials. @@ -224,4 +226,4 @@ Using `terraform import`, import `aws_appsync_datasource` using the `api_id`, a % terraform import aws_appsync_datasource.example abcdef123456-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_function.html.markdown b/website/docs/cdktf/python/r/appsync_function.html.markdown index 5383db6756a..10929753a76 100644 --- a/website/docs/cdktf/python/r/appsync_function.html.markdown +++ b/website/docs/cdktf/python/r/appsync_function.html.markdown @@ -92,28 +92,28 @@ This resource supports the following arguments: * `request_mapping_template` - (Optional) Function request mapping template. Functions support only the 2018-05-29 version of the request mapping template. * `response_mapping_template` - (Optional) Function response mapping template. * `description` - (Optional) Function description. -* `runtime` - (Optional) Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See [Runtime](#runtime). -* `sync_config` - (Optional) Describes a Sync configuration for a resolver. See [Sync Config](#sync-config). +* `runtime` - (Optional) Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See [`runtime` Block](#runtime-block) for details. +* `sync_config` - (Optional) Describes a Sync configuration for a resolver. See [`sync_config` Block](#sync_config-block) for details. * `function_version` - (Optional) Version of the request mapping template. Currently the supported value is `2018-05-29`. Does not apply when specifying `code`. -### Runtime +### `runtime` Block -This argument supports the following arguments: +The `runtime` configuration block supports the following arguments: * `name` - (Optional) The name of the runtime to use. Currently, the only allowed value is `APPSYNC_JS`. * `runtime_version` - (Optional) The version of the runtime to use. Currently, the only allowed version is `1.0.0`. -### Sync Config +### `sync_config` Block -This argument supports the following arguments: +The `sync_config` configuration block supports the following arguments: * `conflict_detection` - (Optional) Conflict Detection strategy to use. Valid values are `NONE` and `VERSION`. * `conflict_handler` - (Optional) Conflict Resolution strategy to perform in the event of a conflict. Valid values are `NONE`, `OPTIMISTIC_CONCURRENCY`, `AUTOMERGE`, and `LAMBDA`. -* `lambda_conflict_handler_config` - (Optional) Lambda Conflict Handler Config when configuring `LAMBDA` as the Conflict Handler. See [Lambda Conflict Handler Config](#lambda-conflict-handler-config). +* `lambda_conflict_handler_config` - (Optional) Lambda Conflict Handler Config when configuring `LAMBDA` as the Conflict Handler. See [`lambda_conflict_handler_config` Block](#lambda_conflict_handler_config-block) for details. -#### Lambda Conflict Handler Config +#### `lambda_conflict_handler_config` Block -This argument supports the following arguments: +The `lambda_conflict_handler_config` configuration block supports the following arguments: * `lambda_conflict_handler_arn` - (Optional) ARN for the Lambda function to use as the Conflict Handler. @@ -150,4 +150,4 @@ Using `terraform import`, import `aws_appsync_function` using the AppSync API ID % terraform import aws_appsync_function.example xxxxx-yyyyy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/appsync_graphql_api.html.markdown b/website/docs/cdktf/python/r/appsync_graphql_api.html.markdown index 9f4da785e86..bbc3b591492 100644 --- a/website/docs/cdktf/python/r/appsync_graphql_api.html.markdown +++ b/website/docs/cdktf/python/r/appsync_graphql_api.html.markdown @@ -319,13 +319,13 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: * `authentication_type` - (Required) Authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA` -* `name` - (Required) User-supplied name for the GraphqlApi. -* `log_config` - (Optional) Nested argument containing logging configuration. Defined below. -* `openid_connect_config` - (Optional) Nested argument containing OpenID Connect configuration. Defined below. -* `user_pool_config` - (Optional) Amazon Cognito User Pool configuration. Defined below. -* `lambda_authorizer_config` - (Optional) Nested argument containing Lambda authorizer configuration. Defined below. +* `name` - (Required) User-supplied name for the GraphSQL API. +* `log_config` - (Optional) Nested argument containing logging configuration. See [`log_config` Block](#log_config-block) for details. +* `openid_connect_config` - (Optional) Nested argument containing OpenID Connect configuration. See [`openid_connect_config` Block](#openid_connect_config-block) for details. +* `user_pool_config` - (Optional) Amazon Cognito User Pool configuration. See [`user_pool_config` Block](#user_pool_config-block) for details. +* `lambda_authorizer_config` - (Optional) Nested argument containing Lambda authorizer configuration. See [`lambda_authorizer_config` Block](#lambda_authorizer_config-block) for details. * `schema` - (Optional) Schema definition, in GraphQL schema language format. Terraform cannot perform drift detection of this configuration. -* `additional_authentication_provider` - (Optional) One or more additional authentication providers for the GraphqlApi. Defined below. +* `additional_authentication_provider` - (Optional) One or more additional authentication providers for the GraphSQL API. See [`additional_authentication_provider` Block](#additional_authentication_provider-block) for details. * `introspection_config` - (Optional) Sets the value of the GraphQL API to enable (`ENABLED`) or disable (`DISABLED`) introspection. If no value is provided, the introspection configuration will be set to ENABLED by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled. For more information about introspection, see [GraphQL introspection](https://graphql.org/learn/introspection/). * `query_depth_limit` - (Optional) The maximum depth a query can have in a single request. Depth refers to the amount of nested levels allowed in the body of query. The default value is `0` (or unspecified), which indicates there's no depth limit. If you set a limit, it can be between `1` and `75` nested levels. This field will produce a limit error if the operation falls out of bounds. @@ -335,43 +335,43 @@ This resource supports the following arguments: * `xray_enabled` - (Optional) Whether tracing with X-ray is enabled. Defaults to false. * `visibility` - (Optional) Sets the value of the GraphQL API to public (`GLOBAL`) or private (`PRIVATE`). If no value is provided, the visibility will be set to `GLOBAL` by default. This value cannot be changed once the API has been created. -### log_config +### `log_config` Block -This argument supports the following arguments: +The `log_config` configuration block supports the following arguments: * `cloudwatch_logs_role_arn` - (Required) Amazon Resource Name of the service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account. * `field_log_level` - (Required) Field logging level. Valid values: `ALL`, `ERROR`, `NONE`. * `exclude_verbose_content` - (Optional) Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level. Valid values: `true`, `false`. Default value: `false` -### additional_authentication_provider +### `additional_authentication_provider` Block -This argument supports the following arguments: +The `additional_authentication_provider` configuration block supports the following arguments: * `authentication_type` - (Required) Authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA` -* `openid_connect_config` - (Optional) Nested argument containing OpenID Connect configuration. Defined below. -* `user_pool_config` - (Optional) Amazon Cognito User Pool configuration. Defined below. +* `openid_connect_config` - (Optional) Nested argument containing OpenID Connect configuration. See [`openid_connect_config` Block](#openid_connect_config-block) for details. +* `user_pool_config` - (Optional) Amazon Cognito User Pool configuration. See [`user_pool_config` Block](#user_pool_config-block) for details. -### openid_connect_config +### `openid_connect_config` Block -This argument supports the following arguments: +The `openid_connect_config` configuration block supports the following arguments: * `issuer` - (Required) Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. * `auth_ttl` - (Optional) Number of milliseconds a token is valid after being authenticated. * `client_id` - (Optional) Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. * `iat_ttl` - (Optional) Number of milliseconds a token is valid after being issued to a user. -### user_pool_config +### `user_pool_config` Block -This argument supports the following arguments: +The `user_pool_config` configuration block supports the following arguments: * `default_action` - (Required only if Cognito is used as the default auth provider) Action that you want your GraphQL API to take when a request that uses Amazon Cognito User Pool authentication doesn't match the Amazon Cognito User Pool configuration. Valid: `ALLOW` and `DENY` * `user_pool_id` - (Required) User pool ID. * `app_id_client_regex` - (Optional) Regular expression for validating the incoming Amazon Cognito User Pool app client ID. * `aws_region` - (Optional) AWS region in which the user pool was created. -### lambda_authorizer_config +### `lambda_authorizer_config` Block -This argument supports the following arguments: +The `lambda_authorizer_config` configuration block supports the following arguments: * `authorizer_uri` - (Required) ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow `lambda:InvokeFunction` from service principal `appsync.amazonaws.com`. * `authorizer_result_ttl_in_seconds` - (Optional) Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a `ttlOverride` key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. @@ -411,4 +411,4 @@ Using `terraform import`, import AppSync GraphQL API using the GraphQL API ID. F % terraform import aws_appsync_graphql_api.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_group.html.markdown b/website/docs/cdktf/python/r/autoscaling_group.html.markdown index 9d8c6903284..28701315ae5 100644 --- a/website/docs/cdktf/python/r/autoscaling_group.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_group.html.markdown @@ -708,6 +708,7 @@ This configuration block supports the following: * ssd - solid state drive ``` +- `max_spot_price_as_percentage_of_optimal_on_demand_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spot_max_price_percentage_over_lowest_price` - `memory_gib_per_vcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. - `min` - (Optional) Minimum. May be a decimal number, e.g. `0.5`. - `max` - (Optional) Maximum. May be a decimal number, e.g. `0.5`. @@ -725,7 +726,7 @@ This configuration block supports the following: If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. - `require_hibernate_support` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -- `spot_max_price_percentage_over_lowest_price` - (Optional) Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +- `spot_max_price_percentage_over_lowest_price` - (Optional) Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `max_spot_price_as_percentage_of_optimal_on_demand_price` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. @@ -933,4 +934,4 @@ Using `terraform import`, import Auto Scaling Groups using the `name`. For examp % terraform import aws_autoscaling_group.web web-asg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/autoscaling_policy.html.markdown b/website/docs/cdktf/python/r/autoscaling_policy.html.markdown index 80a1214785b..95985ce744c 100644 --- a/website/docs/cdktf/python/r/autoscaling_policy.html.markdown +++ b/website/docs/cdktf/python/r/autoscaling_policy.html.markdown @@ -327,14 +327,14 @@ The following fields are available in target tracking configuration: ### predefined_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefined_metric_type` - (Required) Metric type. * `resource_label` - (Optional) Identifies the resource associated with the metric type. ### customized_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric_dimension` - (Optional) Dimensions of the metric. * `metric_name` - (Optional) Name of the metric. @@ -345,14 +345,14 @@ This argument supports the following arguments: #### metric_dimension -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. #### metrics -This argument supports the following arguments: +This configuration block supports the following arguments: * `expression` - (Optional) Math expression used on the returned metric. You must specify either `expression` or `metric_stat`, but not both. * `id` - (Required) Short name for the metric used in target tracking scaling policy. @@ -362,7 +362,7 @@ This argument supports the following arguments: ##### metric_stat -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric` - (Required) Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. * `stat` - (Required) Statistic of the metrics to return. @@ -370,7 +370,7 @@ This argument supports the following arguments: ##### metric -This argument supports the following arguments: +This configuration block supports the following arguments: * `dimensions` - (Optional) Dimensions of the metric. * `metric_name` - (Required) Name of the metric. @@ -378,14 +378,14 @@ This argument supports the following arguments: ###### dimensions -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. ### predictive_scaling_configuration -This argument supports the following arguments: +This configuration block supports the following arguments: * `max_capacity_breach_behavior` - (Optional) Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Valid values are `HonorMaxCapacity` or `IncreaseMaxCapacity`. Default is `HonorMaxCapacity`. * `max_capacity_buffer` - (Optional) Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. Valid range is `0` to `100`. If set to `0`, Amazon EC2 Auto Scaling may scale capacity higher than the maximum capacity to equal but not exceed forecast capacity. @@ -395,7 +395,7 @@ This argument supports the following arguments: #### metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `customized_capacity_metric_specification` - (Optional) Customized capacity metric specification. The field is only valid when you use `customized_load_metric_specification` * `customized_load_metric_specification` - (Optional) Customized load metric specification. @@ -406,46 +406,46 @@ This argument supports the following arguments: ##### predefined_load_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefined_metric_type` - (Required) Metric type. Valid values are `ASGTotalCPUUtilization`, `ASGTotalNetworkIn`, `ASGTotalNetworkOut`, or `ALBTargetGroupRequestCount`. * `resource_label` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### predefined_metric_pair_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefined_metric_type` - (Required) Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. For example, if the metric type is `ASGCPUUtilization`, the Auto Scaling group's total CPU metric is used as the load metric, and the average CPU metric is used for the scaling metric. Valid values are `ASGCPUUtilization`, `ASGNetworkIn`, `ASGNetworkOut`, or `ALBRequestCount`. * `resource_label` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### predefined_scaling_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefined_metric_type` - (Required) Describes a scaling metric for a predictive scaling policy. Valid values are `ASGAverageCPUUtilization`, `ASGAverageNetworkIn`, `ASGAverageNetworkOut`, or `ALBRequestCountPerTarget`. * `resource_label` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### customized_scaling_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric_data_queries` - (Required) List of up to 10 structures that defines custom scaling metric in predictive scaling policy ##### customized_load_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric_data_queries` - (Required) List of up to 10 structures that defines custom load metric in predictive scaling policy ##### customized_capacity_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric_data_queries` - (Required) List of up to 10 structures that defines custom capacity metric in predictive scaling policy ##### metric_data_queries -This argument supports the following arguments: +This configuration block supports the following arguments: * `expression` - (Optional) Math expression used on the returned metric. You must specify either `expression` or `metric_stat`, but not both. * `id` - (Required) Short name for the metric used in predictive scaling policy. @@ -455,7 +455,7 @@ This argument supports the following arguments: ##### metric_stat -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric` - (Required) Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. * `stat` - (Required) Statistic of the metrics to return. @@ -463,7 +463,7 @@ This argument supports the following arguments: ##### metric -This argument supports the following arguments: +This configuration block supports the following arguments: * `dimensions` - (Optional) Dimensions of the metric. * `metric_name` - (Required) Name of the metric. @@ -471,7 +471,7 @@ This argument supports the following arguments: ##### dimensions -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. @@ -511,4 +511,4 @@ Using `terraform import`, import AutoScaling scaling policy using the role autos % terraform import aws_autoscaling_policy.test-policy asg-name/policy-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_custom_model.html.markdown b/website/docs/cdktf/python/r/bedrock_custom_model.html.markdown index fd0dd23b392..0af1a2789a0 100644 --- a/website/docs/cdktf/python/r/bedrock_custom_model.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_custom_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_model" description: |- @@ -126,4 +126,4 @@ Using `terraform import`, import Bedrock custom model using the `job_arn`. For e % terraform import aws_bedrock_custom_model.example arn:aws:bedrock:us-west-2:123456789012:model-customization-job/amazon.titan-text-express-v1:0:8k/1y5n57gh5y2e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_model_invocation_logging_configuration.html.markdown b/website/docs/cdktf/python/r/bedrock_model_invocation_logging_configuration.html.markdown index 67c9c990fcc..a309d6dfcfb 100644 --- a/website/docs/cdktf/python/r/bedrock_model_invocation_logging_configuration.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_model_invocation_logging_configuration.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_model_invocation_logging_configuration" description: |- @@ -116,4 +116,4 @@ Using `terraform import`, import Bedrock custom model using the `id` set to the % terraform import aws_bedrock_model_invocation_logging_configuration.my_config us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrock_provisioned_model_throughput.html.markdown b/website/docs/cdktf/python/r/bedrock_provisioned_model_throughput.html.markdown index b8316b66a08..b6135301115 100644 --- a/website/docs/cdktf/python/r/bedrock_provisioned_model_throughput.html.markdown +++ b/website/docs/cdktf/python/r/bedrock_provisioned_model_throughput.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_provisioned_model_throughput" description: |- @@ -74,4 +74,4 @@ Using `terraform import`, import Provisioned Throughput using the `provisioned_m % terraform import aws_bedrock_provisioned_model_throughput.example arn:aws:bedrock:us-west-2:123456789012:provisioned-model/1y5n57gh5y2e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent.html.markdown index 199d19ca01e..db492f52c47 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent" description: |- @@ -179,4 +179,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent using the agent % terraform import aws_bedrockagent_agent.example GGRRAED6JP ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent_action_group.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent_action_group.html.markdown index 28e77213760..98b6782fe43 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent_action_group.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent_action_group.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_action_group" description: |- @@ -122,4 +122,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Action Group th % terraform import aws_bedrockagent_agent_action_group.example MMAUDBZTH4,GGRRAED6JP,DRAFT ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent_alias.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent_alias.html.markdown index 59f08cc52db..98a02840fb4 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent_alias.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent_alias.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_alias" description: |- @@ -159,4 +159,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Alias using the % terraform import aws_bedrockagent_agent_alias.example 66IVY0GUTF,GGRRAED6JP ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_agent_knowledge_base_association.html.markdown b/website/docs/cdktf/python/r/bedrockagent_agent_knowledge_base_association.html.markdown index 5daeb1dcd14..fcb90354d1a 100644 --- a/website/docs/cdktf/python/r/bedrockagent_agent_knowledge_base_association.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_agent_knowledge_base_association.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_knowledge_base_association" description: |- @@ -79,4 +79,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Knowledge Base % terraform import aws_bedrockagent_agent_knowledge_base_association.example GGRRAED6JP,DRAFT,EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_data_source.html.markdown b/website/docs/cdktf/python/r/bedrockagent_data_source.html.markdown index 994c0c6a916..eb755590748 100644 --- a/website/docs/cdktf/python/r/bedrockagent_data_source.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_data_source.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_data_source" description: |- @@ -137,4 +137,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Data Source using the % terraform import aws_bedrockagent_data_source.example GWCMFMQF6T,EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/bedrockagent_knowledge_base.html.markdown b/website/docs/cdktf/python/r/bedrockagent_knowledge_base.html.markdown index 424109412ff..1773f141f9b 100644 --- a/website/docs/cdktf/python/r/bedrockagent_knowledge_base.html.markdown +++ b/website/docs/cdktf/python/r/bedrockagent_knowledge_base.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_knowledge_base" description: |- @@ -183,4 +183,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Knowledge Base using % terraform import aws_bedrockagent_knowledge_base.example EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudformation_stack_set_instance.html.markdown b/website/docs/cdktf/python/r/cloudformation_stack_set_instance.html.markdown index 2ba88ae5036..d8116e3313a 100644 --- a/website/docs/cdktf/python/r/cloudformation_stack_set_instance.html.markdown +++ b/website/docs/cdktf/python/r/cloudformation_stack_set_instance.html.markdown @@ -125,7 +125,7 @@ This resource supports the following arguments: * `stack_set_name` - (Required) Name of the StackSet. * `account_id` - (Optional) Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. -* `deployment_targets` - (Optional) The AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See [deployment_targets](#deployment_targets-argument-reference) below. +* `deployment_targets` - (Optional) AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See [deployment_targets](#deployment_targets-argument-reference) below. * `parameter_overrides` - (Optional) Key-value map of input parameters to override from the StackSet for this Instance. * `region` - (Optional) Target AWS Region to create a Stack based on the StackSet. Defaults to current region. * `retain_stack` - (Optional) During Terraform resource destroy, remove Instance from StackSet while keeping the Stack and its associated resources. Must be enabled in Terraform state _before_ destroy operation to take effect. You cannot reassociate a retained Stack or add an existing, saved Stack to a new StackSet. Defaults to `false`. @@ -136,25 +136,28 @@ This resource supports the following arguments: The `deployment_targets` configuration block supports the following arguments: -* `organizational_unit_ids` - (Optional) The organization root ID or organizational unit (OU) IDs to which StackSets deploys. +* `organizational_unit_ids` - (Optional) Organization root ID or organizational unit (OU) IDs to which StackSets deploys. +* `account_filter_type` - (Optional) Limit deployment targets to individual accounts or include additional accounts with provided OUs. Valid values: `INTERSECTION`, `DIFFERENCE`, `UNION`, `NONE`. +* `accounts` - (Optional) List of accounts to deploy stack set updates. +* `accounts_url` - (Optional) S3 URL of the file containing the list of accounts. ### `operation_preferences` Argument Reference The `operation_preferences` configuration block supports the following arguments: -* `failure_tolerance_count` - (Optional) The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. -* `failure_tolerance_percentage` - (Optional) The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. -* `max_concurrent_count` - (Optional) The maximum number of accounts in which to perform this operation at one time. -* `max_concurrent_percentage` - (Optional) The maximum percentage of accounts in which to perform this operation at one time. -* `region_concurrency_type` - (Optional) The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are `SEQUENTIAL` and `PARALLEL`. -* `region_order` - (Optional) The order of the Regions in where you want to perform the stack operation. +* `failure_tolerance_count` - (Optional) Number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. +* `failure_tolerance_percentage` - (Optional) Percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. +* `max_concurrent_count` - (Optional) Maximum number of accounts in which to perform this operation at one time. +* `max_concurrent_percentage` - (Optional) Maximum percentage of accounts in which to perform this operation at one time. +* `region_concurrency_type` - (Optional) Concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are `SEQUENTIAL` and `PARALLEL`. +* `region_order` - (Optional) Order of the Regions in where you want to perform the stack operation. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - Unique identifier for the resource. If `deployment_targets` is set, this is a comma-delimited string combining stack set name, organizational unit IDs (`/`-delimited), and region (ie. `mystack,ou-123/ou-456,us-east-1`). Otherwise, this is a comma-delimited string combining stack set name, AWS account ID, and region (ie. `mystack,123456789012,us-east-1`). -* `organizational_unit_id` - The organization root ID or organizational unit (OU) ID in which the stack is deployed. +* `organizational_unit_id` - Organization root ID or organizational unit (OU) ID in which the stack is deployed. * `stack_id` - Stack identifier. * `stack_instance_summaries` - List of stack instances created from an organizational unit deployment target. This will only be populated when `deployment_targets` is set. See [`stack_instance_summaries`](#stack_instance_summaries-attribute-reference). @@ -243,4 +246,4 @@ Using `terraform import`, import CloudFormation StackSet Instances when acting a % terraform import aws_cloudformation_stack_set_instance.example example,ou-sdas-123123123/ou-sdas-789789789,us-east-1,DELEGATED_ADMIN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudtrail_event_data_store.html.markdown b/website/docs/cdktf/python/r/cloudtrail_event_data_store.html.markdown index 458d53938ae..80023b8c334 100644 --- a/website/docs/cdktf/python/r/cloudtrail_event_data_store.html.markdown +++ b/website/docs/cdktf/python/r/cloudtrail_event_data_store.html.markdown @@ -91,6 +91,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: - `name` - (Required) The name of the event data store. +- `billing_mode` - (Optional) The billing mode for the event data store. The valid values are `EXTENDABLE_RETENTION_PRICING` and `FIXED_RETENTION_PRICING`. Defaults to `EXTENDABLE_RETENTION_PRICING`. - `advanced_event_selector` - (Optional) The advanced event selectors to use to select the events for the data store. For more information about how to use advanced event selectors, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide. - `multi_region_enabled` - (Optional) Specifies whether the event data store includes events from all regions, or only from the region in which the event data store is created. Default: `true`. - `organization_enabled` - (Optional) Specifies whether an event data store collects events logged for an organization in AWS Organizations. Default: `false`. @@ -151,4 +152,4 @@ Using `terraform import`, import event data stores using their `arn`. For exampl % terraform import aws_cloudtrail_event_data_store.example arn:aws:cloudtrail:us-east-1:123456789123:eventdatastore/22333815-4414-412c-b155-dd254033gfhf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_log_account_policy.html.markdown b/website/docs/cdktf/python/r/cloudwatch_log_account_policy.html.markdown new file mode 100644 index 00000000000..177749019bc --- /dev/null +++ b/website/docs/cdktf/python/r/cloudwatch_log_account_policy.html.markdown @@ -0,0 +1,127 @@ +--- +subcategory: "CloudWatch Logs" +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_account_policy" +description: |- + Provides a CloudWatch Log Account Policy resource. +--- + + + +# Resource: aws_cloudwatch_log_account_policy + +Provides a CloudWatch Log Account Policy resource. + +## Example Usage + +### Account Data Protection Policy + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_log_account_policy import CloudwatchLogAccountPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CloudwatchLogAccountPolicy(self, "data_protection", + policy_document=Token.as_string( + Fn.jsonencode({ + "Name": "DataProtection", + "Statement": [{ + "DataIdentifier": ["arn:aws:dataprotection::aws:data-identifier/EmailAddress" + ], + "Operation": { + "Audit": { + "FindingsDestination": {} + } + }, + "Sid": "Audit" + }, { + "DataIdentifier": ["arn:aws:dataprotection::aws:data-identifier/EmailAddress" + ], + "Operation": { + "Deidentify": { + "MaskConfig": {} + } + }, + "Sid": "Redact" + } + ], + "Version": "2021-06-01" + })), + policy_name="data-protection", + policy_type="DATA_PROTECTION_POLICY" + ) +``` + +### Subscription Filter Policy + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_log_account_policy import CloudwatchLogAccountPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CloudwatchLogAccountPolicy(self, "subscription_filter", + policy_document=Token.as_string( + Fn.jsonencode({ + "DestinationArn": test.arn, + "FilterPattern": "test" + })), + policy_name="subscription-filter", + policy_type="SUBSCRIPTION_FILTER_POLICY", + selection_criteria="LogGroupName NOT IN [\\\"excluded_log_group_name\\\"]" + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `policy_document` - (Required) Text of the account policy. Refer to the [AWS docs](https://docs.aws.amazon.com/cli/latest/reference/logs/put-account-policy.html) for more information. +* `policy_type` - (Required) Type of account policy. Either `DATA_PROTECTION_POLICY` or `SUBSCRIPTION_FILTER_POLICY`. You can have one account policy per type in an account. +* `policy_name` - (Required) Name of the account policy. +* `scope` - (Optional) Currently defaults to and only accepts the value: `ALL`. +* `selection_criteria` - (Optional) - Criteria for applying a subscription filter policy to a selection of log groups. The only allowable criteria selector is `LogGroupName NOT IN []`. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import this resource using the `policy_name` and `policy_type` fields separated by `:`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cloudwatch_log_account_policy import CloudwatchLogAccountPolicy +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + CloudwatchLogAccountPolicy.generate_config_for_import(self, "example", "my-account-policy:SUBSCRIPTION_FILTER_POLICY") +``` + +Using `terraform import`, import this resource using the `policy_name` and `policy_type` separated by `:`. For example: + +```console +% terraform import aws_cloudwatch_log_account_policy.example "my-account-policy:SUBSCRIPTION_FILTER_POLICY" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/cloudwatch_metric_alarm.html.markdown b/website/docs/cdktf/python/r/cloudwatch_metric_alarm.html.markdown index 7b1507be9de..14c89bc58c0 100644 --- a/website/docs/cdktf/python/r/cloudwatch_metric_alarm.html.markdown +++ b/website/docs/cdktf/python/r/cloudwatch_metric_alarm.html.markdown @@ -220,7 +220,7 @@ You must choose one or the other See [related part of AWS Docs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html) for details about valid values. -This argument supports the following arguments: +This resource supports the following arguments: * `alarm_name` - (Required) The descriptive name for the alarm. This name must be unique within the user's AWS account * `comparison_operator` - (Required) The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: `GreaterThanOrEqualToThreshold`, `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`. Additionally, the values `LessThanLowerOrGreaterThanUpperThreshold`, `LessThanLowerThreshold`, and `GreaterThanUpperThreshold` are used only for alarms based on anomaly detection models. @@ -317,4 +317,4 @@ Using `terraform import`, import CloudWatch Metric Alarm using the `alarm_name`. % terraform import aws_cloudwatch_metric_alarm.test alarm-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codebuild_project.html.markdown b/website/docs/cdktf/python/r/codebuild_project.html.markdown index dc84f0ec5d3..268ee894071 100644 --- a/website/docs/cdktf/python/r/codebuild_project.html.markdown +++ b/website/docs/cdktf/python/r/codebuild_project.html.markdown @@ -412,4 +412,4 @@ Using `terraform import`, import CodeBuild Project using the `name`. For example % terraform import aws_codebuild_project.name project-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codebuild_webhook.html.markdown b/website/docs/cdktf/python/r/codebuild_webhook.html.markdown index cc7e67adf4b..37910547360 100644 --- a/website/docs/cdktf/python/r/codebuild_webhook.html.markdown +++ b/website/docs/cdktf/python/r/codebuild_webhook.html.markdown @@ -108,7 +108,7 @@ This resource supports the following arguments: `filter` supports the following: * `type` - (Required) The webhook filter group's type. Valid values for this parameter are: `EVENT`, `BASE_REF`, `HEAD_REF`, `ACTOR_ACCOUNT_ID`, `FILE_PATH`, `COMMIT_MESSAGE`, `WORKFLOW_NAME`, `TAG_NAME`, `RELEASE_NAME`. At least one filter group must specify `EVENT` as its type. -* `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. +* `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED`, `WORKFLOW_JOB_QUEUED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. * `exclude_matched_pattern` - (Optional) If set to `true`, the specified filter does *not* trigger a build. Defaults to `false`. ## Attribute Reference @@ -147,4 +147,4 @@ Using `terraform import`, import CodeBuild Webhooks using the CodeBuild Project % terraform import aws_codebuild_webhook.example MyProjectName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/codepipeline.html.markdown b/website/docs/cdktf/python/r/codepipeline.html.markdown index 9e3ed3cba22..98cef3e2334 100644 --- a/website/docs/cdktf/python/r/codepipeline.html.markdown +++ b/website/docs/cdktf/python/r/codepipeline.html.markdown @@ -212,7 +212,7 @@ A `trigger` block supports the following arguments: A `git_configuration` block supports the following arguments: -* `source_action_name` - (Required) The name of the pipeline source action where the trigger configuration. +* `source_action_name` - (Required) The name of the pipeline source action where the trigger configuration, such as Git tags, is specified. The trigger configuration will start the pipeline upon the specified change only. * `pull_request` - (Optional) The field where the repository event that will start the pipeline is specified as pull requests. A `pull_request` block is documented below. * `push` - (Optional) The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details. A `push` block is documented below. @@ -284,4 +284,4 @@ Using `terraform import`, import CodePipelines using the name. For example: % terraform import aws_codepipeline.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/config_conformance_pack.html.markdown b/website/docs/cdktf/python/r/config_conformance_pack.html.markdown index b2117b171c1..795642e62cf 100644 --- a/website/docs/cdktf/python/r/config_conformance_pack.html.markdown +++ b/website/docs/cdktf/python/r/config_conformance_pack.html.markdown @@ -86,7 +86,7 @@ class MyConvertedCode(TerraformStack): ~> **Note:** If both `template_body` and `template_s3_uri` are specified, AWS Config uses the `template_s3_uri` and ignores the `template_body`. -This argument supports the following arguments: +This resource supports the following arguments: * `name` - (Required, Forces new resource) The name of the conformance pack. Must begin with a letter and contain from 1 to 256 alphanumeric characters and hyphens. * `delivery_s3_bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates. Maximum length of 63. @@ -133,4 +133,4 @@ Using `terraform import`, import Config Conformance Packs using the `name`. For % terraform import aws_config_conformance_pack.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/controltower_control.html.markdown b/website/docs/cdktf/python/r/controltower_control.html.markdown index 3417581c37a..5ee348d561f 100644 --- a/website/docs/cdktf/python/r/controltower_control.html.markdown +++ b/website/docs/cdktf/python/r/controltower_control.html.markdown @@ -40,6 +40,11 @@ class MyConvertedCode(TerraformStack): current = DataAwsRegion(self, "current") aws_controltower_control_example = ControltowerControl(self, "example_3", control_identifier="arn:aws:controltower:${" + current.name + "}::control/AWS-GR_EC2_VOLUME_INUSE_CHECK", + parameters=[ControltowerControlParameters( + key="AllowedRegions", + value=Token.as_string(Fn.jsonencode(["us-east-1"])) + ) + ], target_identifier=Token.as_string( Fn.lookup_nested("${[ for x in ${" + data_aws_organizations_organizational_units_example.children + "} : x.arn if x.name == \"Infrastructure\"]}", ["0"])) ) @@ -49,15 +54,25 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +This following arguments are required: * `control_identifier` - (Required) The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny guardrail. * `target_identifier` - (Required) The ARN of the organizational unit. +The following arguments are optional: + +* `parameters` - (Optional) Parameter values which are specified to configure the control when you enable it. See [Parameters](#parameters) for more details. + +### Parameters + +* `key` - (Required) The name of the parameter. +* `value` - (Required) The value of the parameter. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: +* `arn` - The ARN of the EnabledControl resource. * `id` - The ARN of the organizational unit. ## Import @@ -85,4 +100,4 @@ Using `terraform import`, import Control Tower Controls using their `organizatio % terraform import aws_controltower_control.example arn:aws:organizations::123456789101:ou/o-qqaejywet/ou-qg5o-ufbhdtv3,arn:aws:controltower:us-east-1::control/WTDSMKDKDNLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_instance.html.markdown b/website/docs/cdktf/python/r/db_instance.html.markdown index 0ff47a2b372..72845b570eb 100644 --- a/website/docs/cdktf/python/r/db_instance.html.markdown +++ b/website/docs/cdktf/python/r/db_instance.html.markdown @@ -351,7 +351,7 @@ class MyConvertedCode(TerraformStack): For more detailed documentation about each argument, refer to the [AWS official documentation](http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `allocated_storage` - (Required unless a `snapshot_identifier` or `replicate_source_db` is provided) The allocated storage in gibibytes. If `max_allocated_storage` is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs. If `replicate_source_db` is set, the value is ignored during the creation of the instance. * `allow_major_version_upgrade` - (Optional) Indicates that major version @@ -653,4 +653,4 @@ Using `terraform import`, import DB Instances using the `identifier`. For exampl % terraform import aws_db_instance.default mydb-rds-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/db_proxy_default_target_group.html.markdown b/website/docs/cdktf/python/r/db_proxy_default_target_group.html.markdown index 9632fe73ddd..947b23e8be9 100644 --- a/website/docs/cdktf/python/r/db_proxy_default_target_group.html.markdown +++ b/website/docs/cdktf/python/r/db_proxy_default_target_group.html.markdown @@ -77,7 +77,7 @@ This resource supports the following arguments: * `init_query` - (Optional) One or more SQL statements for the proxy to run when opening each new database connection. Typically used with `SET` statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single `SET` statement, such as `SET x=1, y=2`. * `max_connections_percent` - (Optional) The maximum size of the connection pool for each target in a target group. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. * `max_idle_connections_percent` - (Optional) Controls how actively the proxy closes idle database connections in the connection pool. A high value enables the proxy to leave a high percentage of idle connections open. A low value causes the proxy to close idle client connections and return the underlying database connections to the connection pool. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. -* `session_pinning_filters` - (Optional) Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is `EXCLUDE_VARIABLE_SETS`. +* `session_pinning_filters` - (Optional) Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. This setting is only supported for MySQL engine family databases. Currently, the only allowed value is `EXCLUDE_VARIABLE_SETS`. ## Attribute Reference @@ -119,4 +119,4 @@ Using `terraform import`, import DB proxy default target groups using the `db_pr % terraform import aws_db_proxy_default_target_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/detective_organization_configuration.html.markdown b/website/docs/cdktf/python/r/detective_organization_configuration.html.markdown index f807e2ce0d2..31adadd6e2b 100644 --- a/website/docs/cdktf/python/r/detective_organization_configuration.html.markdown +++ b/website/docs/cdktf/python/r/detective_organization_configuration.html.markdown @@ -35,7 +35,7 @@ class MyConvertedCode(TerraformStack): aws_detective_organization_configuration_example = DetectiveOrganizationConfiguration(self, "example_1", auto_enable=True, - graph_arn=example.id + graph_arn=example.graph_arn ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_detective_organization_configuration_example.override_logical_id("example") @@ -56,7 +56,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_detective_organization_admin_account` using the Detective Graph ID. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_detective_organization_admin_account` using the behavior graph ARN. For example: ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -70,13 +70,13 @@ from imports.aws.detective_organization_configuration import DetectiveOrganizati class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - DetectiveOrganizationConfiguration.generate_config_for_import(self, "example", "00b00fd5aecc0ab60a708659477e9617") + DetectiveOrganizationConfiguration.generate_config_for_import(self, "example", "arn:aws:detective:us-east-1:123456789012:graph:00b00fd5aecc0ab60a708659477e9617") ``` -Using `terraform import`, import `aws_detective_organization_admin_account` using the Detective Graph ID. For example: +Using `terraform import`, import `aws_detective_organization_admin_account` using the behavior graph ARN. For example: ```console -% terraform import aws_detective_organization_configuration.example 00b00fd5aecc0ab60a708659477e9617 +% terraform import aws_detective_organization_configuration.example arn:aws:detective:us-east-1:123456789012:graph:00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_cluster.html.markdown b/website/docs/cdktf/python/r/docdb_cluster.html.markdown index ba1990f7e44..cec13e4dfd8 100644 --- a/website/docs/cdktf/python/r/docdb_cluster.html.markdown +++ b/website/docs/cdktf/python/r/docdb_cluster.html.markdown @@ -53,7 +53,7 @@ class MyConvertedCode(TerraformStack): For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-cluster.html). -This argument supports the following arguments: +This resource supports the following arguments: * `allow_major_version_upgrade` - (Optional) A value that indicates whether major version upgrades are allowed. Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version. * `apply_immediately` - (Optional) Specifies whether any cluster modifications @@ -66,7 +66,7 @@ This argument supports the following arguments: * `cluster_identifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `db_subnet_group_name` - (Optional) A DB subnet group to associate with this DB instance. * `db_cluster_parameter_group_name` - (Optional) A cluster parameter group to associate with the cluster. -* `deletion_protection` - (Optional) A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. +* `deletion_protection` - (Optional) A boolean value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. Defaults to `false`. * `enabled_cloudwatch_logs_exports` - (Optional) List of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `profiler`. * `engine_version` - (Optional) The database engine version. Updating this argument results in an outage. @@ -83,6 +83,7 @@ This argument supports the following arguments: * `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 +* `restore_to_point_in_time` - (Optional, Forces new resource) A configuration block for restoring a DB instance to an arbitrary point in time. Requires the `identifier` argument to be set with the name of the new DB instance to be created. See [Restore To Point In Time](#restore-to-point-in-time) below for details. * `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`. * `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. Automated snapshots **should not** be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. * `storage_encrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false`. @@ -91,6 +92,15 @@ Default: A 30-minute window selected at random from an 8-hour block of time per * `vpc_security_group_ids` - (Optional) List of VPC security groups to associate with the Cluster +### Restore To Point In Time + +The `restore_to_point_in_time` block supports the following arguments: + +* `restore_to_time` - (Optional) The date and time to restore from. Value must be a time in Universal Coordinated Time (UTC) format and must be before the latest restorable time for the DB instance. Cannot be specified with `use_latest_restorable_time`. +* `restore_type` - (Optional) The type of restore to be performed. Valid values are `full-copy`, `copy-on-write`. +* `source_cluster_identifier` - (Required) The identifier of the source DB cluster from which to restore. Must match the identifier of an existing DB cluster. +* `use_latest_restorable_time` - (Optional) A boolean value that indicates whether the DB cluster is restored from the latest backup time. Defaults to `false`. Cannot be specified with `restore_to_time`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -138,4 +148,4 @@ Using `terraform import`, import DocumentDB Clusters using the `cluster_identifi % terraform import aws_docdb_cluster.docdb_cluster docdb-prod-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/docdb_cluster_instance.html.markdown b/website/docs/cdktf/python/r/docdb_cluster_instance.html.markdown index 7d18c09a8f4..0b5e53d20cd 100644 --- a/website/docs/cdktf/python/r/docdb_cluster_instance.html.markdown +++ b/website/docs/cdktf/python/r/docdb_cluster_instance.html.markdown @@ -58,7 +58,7 @@ class MyConvertedCode(TerraformStack): For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-instance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `apply_immediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. @@ -157,4 +157,4 @@ Using `terraform import`, import DocumentDB Cluster Instances using the `identif % terraform import aws_docdb_cluster_instance.prod_instance_1 aurora-cluster-instance-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/drs_replication_configuration_template.html.markdown b/website/docs/cdktf/python/r/drs_replication_configuration_template.html.markdown new file mode 100644 index 00000000000..8b9caf65ba9 --- /dev/null +++ b/website/docs/cdktf/python/r/drs_replication_configuration_template.html.markdown @@ -0,0 +1,144 @@ +--- +subcategory: "DRS (Elastic Disaster Recovery)" +layout: "aws" +page_title: "AWS: drs_replication_configuration_template" +description: |- + Provides an Elastic Disaster Recovery replication configuration template resource. +--- + + + +# Resource: aws_drs_replication_configuration_template + +Provides an Elastic Disaster Recovery replication configuration template resource. Before using DRS, your account must be [initialized](https://docs.aws.amazon.com/drs/latest/userguide/getting-started-initializing.html). + +~> **NOTE:** Your configuration must use the PIT policy shown in the [basic configuration](#basic-configuration) due to AWS rules. The only value that you can change is the `retention_duration` of `rule_id` 3. + +## Example Usage + +### Basic configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.drs_replication_configuration_template import DrsReplicationConfigurationTemplate +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name, *, ebsEncryption): + super().__init__(scope, name) + DrsReplicationConfigurationTemplate(self, "example", + associate_default_security_group=False, + bandwidth_throttling=12, + create_public_ip=False, + data_plane_routing="PRIVATE_IP", + default_large_staging_disk_type="GP2", + ebs_ecryption="DEFAULT", + ebs_encryption_key_arn="arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + pit_policy=[DrsReplicationConfigurationTemplatePitPolicy( + enabled=True, + interval=10, + retention_duration=60, + rule_id=1, + units="MINUTE" + ), DrsReplicationConfigurationTemplatePitPolicy( + enabled=True, + interval=1, + retention_duration=24, + rule_id=2, + units="HOUR" + ), DrsReplicationConfigurationTemplatePitPolicy( + enabled=True, + interval=1, + retention_duration=3, + rule_id=3, + units="DAY" + ) + ], + replication_server_instance_type="t3.small", + replication_servers_security_groups_ids=Token.as_list( + Fn.lookup_nested(aws_security_group_example, ["*", "id"])), + staging_area_subnet_id=Token.as_string(aws_subnet_example.id), + use_dedicated_replication_server=False, + ebs_encryption=ebs_encryption + ) +``` + +## Argument Reference + +The following arguments are required: + +* `associate_default_security_group` - (Required) Whether to associate the default Elastic Disaster Recovery Security group with the Replication Configuration Template. +* `bandwidth_throttling` - (Required) Configure bandwidth throttling for the outbound data transfer rate of the Source Server in Mbps. +* `create_public_ip` - (Required) Whether to create a Public IP for the Recovery Instance by default. +* `data_plane_routing` - (Required) Data plane routing mechanism that will be used for replication. Valid values are `PUBLIC_IP` and `PRIVATE_IP`. +* `default_large_staging_disk_type` - (Required) Staging Disk EBS volume type to be used during replication. Valid values are `GP2`, `GP3`, `ST1`, or `AUTO`. +* `ebs_encryption` - (Required) Type of EBS encryption to be used during replication. Valid values are `DEFAULT` and `CUSTOM`. +* `ebs_encryption_key_arn` - (Required) ARN of the EBS encryption key to be used during replication. +* `pit_policy` - (Required) Configuration block for Point in time (PIT) policy to manage snapshots taken during replication. [See below](#pit_policy). +* `replication_server_instance_type` - (Required) Instance type to be used for the replication server. +* `replication_servers_security_groups_ids` - (Required) Security group IDs that will be used by the replication server. +* `staging_area_subnet_id` - (Required) Subnet to be used by the replication staging area. +* `staging_area_tags` - (Required) Set of tags to be associated with all resources created in the replication staging area: EC2 replication server, EBS volumes, EBS snapshots, etc. +* `use_dedicated_replication_server` - (Required) Whether to use a dedicated Replication Server in the replication staging area. + +The following arguments are optional: + +* `auto_replicate_new_disks` - (Optional) Whether to allow the AWS replication agent to automatically replicate newly added disks. +* `tags` - (Optional) Set of tags to be associated with the Replication Configuration Template resource. + +### `pit_policy` + +The PIT policies _must_ be specified as shown in the [basic configuration example](#basic-configuration) above. The only value that you can change is the `retention_duration` of `rule_id` 3. + +* `enabled` - (Optional) Whether this rule is enabled or not. +* `interval` - (Required) How often, in the chosen units, a snapshot should be taken. +* `retention_duration` - (Required) Duration to retain a snapshot for, in the chosen `units`. +* `rule_id` - (Optional) ID of the rule. Valid values are integers. +* `units` - (Required) Units used to measure the `interval` and `retention_duration`. Valid values are `MINUTE`, `HOUR`, and `DAY`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Replication configuration template ARN. +* `id` - Replication configuration template ID. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `20m`) +- `update` - (Default `20m`) +- `delete` - (Default `20m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DRS Replication Configuration Template using the `id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.drs_replication_configuration_template import DrsReplicationConfigurationTemplate +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + DrsReplicationConfigurationTemplate.generate_config_for_import(self, "example", "templateid") +``` + +Using `terraform import`, import DRS Replication Configuration Template using the `id`. For example: + +```console +% terraform import aws_drs_replication_configuration_template.example templateid +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dx_gateway_association.html.markdown b/website/docs/cdktf/python/r/dx_gateway_association.html.markdown index e685dff8b77..ab1da9b11c7 100644 --- a/website/docs/cdktf/python/r/dx_gateway_association.html.markdown +++ b/website/docs/cdktf/python/r/dx_gateway_association.html.markdown @@ -137,7 +137,7 @@ A full example of how to create a VPN Gateway in one AWS account, create a Direc ~> **NOTE:** If the `associated_gateway_id` is in another region, an [alias](https://developer.hashicorp.com/terraform/language/providers/configuration#alias-multiple-provider-configurations) in a new provider block for that region should be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `dx_gateway_id` - (Required) The ID of the Direct Connect gateway. * `associated_gateway_id` - (Optional) The ID of the VGW or transit gateway with which to associate the Direct Connect gateway. @@ -190,4 +190,4 @@ Using `terraform import`, import Direct Connect gateway associations using `dx_g % terraform import aws_dx_gateway_association.example 345508c3-7215-4aef-9832-07c125d5bd0f/vgw-98765432 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_table.html.markdown b/website/docs/cdktf/python/r/dynamodb_table.html.markdown index a1fcce52b00..c54528d5c6c 100644 --- a/website/docs/cdktf/python/r/dynamodb_table.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_table.html.markdown @@ -76,7 +76,7 @@ class MyConvertedCode(TerraformStack): }, ttl=DynamodbTableTtl( attribute_name="TimeToExist", - enabled=False + enabled=True ), write_capacity=20 ) @@ -290,8 +290,10 @@ Optional arguments: ### `ttl` -* `enabled` - (Required) Whether TTL is enabled. -* `attribute_name` - (Required) Name of the table attribute to store the TTL timestamp in. +* `attribute_name` - (Optional) Name of the table attribute to store the TTL timestamp in. + Required if `enabled` is `true`, must not be set otherwise. +* `enabled` - (Optional) Whether TTL is enabled. + Default value is `false`. ## Attribute Reference @@ -341,4 +343,4 @@ Using `terraform import`, import DynamoDB tables using the `name`. For example: % terraform import aws_dynamodb_table.basic-dynamodb-table GameScores ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/dynamodb_table_item.html.markdown b/website/docs/cdktf/python/r/dynamodb_table_item.html.markdown index f70f51f5588..10c63bbace5 100644 --- a/website/docs/cdktf/python/r/dynamodb_table_item.html.markdown +++ b/website/docs/cdktf/python/r/dynamodb_table_item.html.markdown @@ -54,7 +54,7 @@ class MyConvertedCode(TerraformStack): ~> **Note:** Names included in `item` are represented internally with everything but letters removed. There is the possibility of collisions if two names, once filtered, are the same. For example, the names `your-name-here` and `yournamehere` will overlap and cause an error. -This argument supports the following arguments: +This resource supports the following arguments: * `hash_key` - (Required) Hash key to use for lookups and identification of the item * `item` - (Required) JSON representation of a map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item. @@ -69,4 +69,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import DynamoDB table items. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_capacity_block_reservation.html.markdown b/website/docs/cdktf/python/r/ec2_capacity_block_reservation.html.markdown new file mode 100644 index 00000000000..ca487739e56 --- /dev/null +++ b/website/docs/cdktf/python/r/ec2_capacity_block_reservation.html.markdown @@ -0,0 +1,83 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_ec2_capacity_block_reservation" +description: |- + Provides an EC2 Capacity Block Reservation. This allows you to purchase capacity block for your Amazon EC2 instances in a specific Availability Zone for machine learning (ML) Workloads. +--- + + + +# Resource: aws_ec2_capacity_block_reservation + +Provides an EC2 Capacity Block Reservation. This allows you to purchase capacity block for your Amazon EC2 instances in a specific Availability Zone for machine learning (ML) Workloads. + +~> **NOTE:** Once created, a reservation is valid for the `duration` of the provided `capacity_block_offering_id` and cannot be deleted. Performing a `destroy` will only remove the resource from state. For more information see [EC2 Capacity Block Reservation Documentation](https://aws.amazon.com/ec2/instance-types/p5/) and [PurchaseReservedDBInstancesOffering](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-blocks-pricing-billing.html). + +~> **NOTE:** Due to the expense of testing this resource, we provide it as best effort. If you find it useful, and have the ability to help test or notice issues, consider reaching out to us on [GitHub](https://github.com/hashicorp/terraform-provider-aws). + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.data_aws_ec2_capacity_block_offering import DataAwsEc2CapacityBlockOffering +from imports.aws.ec2_capacity_block_reservation import Ec2CapacityBlockReservation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name, *, capacityDurationHours): + super().__init__(scope, name) + Ec2CapacityBlockReservation(self, "example", + capacity_block_offering_id=Token.as_string(test.id), + instance_platform="Linux/UNIX", + tags={ + "Environment": "dev" + } + ) + data_aws_ec2_capacity_block_offering_example = + DataAwsEc2CapacityBlockOffering(self, "example_1", + capacity_duration=24, + end_date="2024-05-30T15:04:05Z", + instance_count=1, + instance_platform="Linux/UNIX", + instance_type="p4d.24xlarge", + start_date="2024-04-28T15:04:05Z", + capacity_duration_hours=capacity_duration_hours + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + data_aws_ec2_capacity_block_offering_example.override_logical_id("example") +``` + +## Argument Reference + +This resource supports the following arguments: + +* `capacity_block_offering_id` - (Required) The Capacity Block Reservation ID. +* `instance_platform` - (Required) The type of operating system for which to reserve capacity. Valid options are `Linux/UNIX`, `Red Hat Enterprise Linux`, `SUSE Linux`, `Windows`, `Windows with SQL Server`, `Windows with SQL Server Enterprise`, `Windows with SQL Server Standard` or `Windows with SQL Server Web`. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The ARN of the reservation. +* `availability_zone` - The Availability Zone in which to create the Capacity Block Reservation. +* `created_date` - The date and time at which the Capacity Block Reservation was created. +* `ebs_optimized` - Indicates whether the Capacity Reservation supports EBS-optimized instances. +* `end_date` - The date and time at which the Capacity Block Reservation expires. When a Capacity Block Reservation expires, the reserved capacity is released and you can no longer launch instances into it. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) +* `end_date_type` - Indicates the way in which the Capacity Reservation ends. +* `id` - The ID of the Capacity Block Reservation. +* `instance_count` - The number of instances for which to reserve capacity. +* `instance_type` - The instance type for which to reserve capacity. +* `outpost_arn` - The ARN of the Outpost on which to create the Capacity Block Reservation. +* `placement_group_arn` - The ARN of the placement group in which to create the Capacity Block Reservation. +* `reservation_type` - The type of Capacity Reservation. +* `start_date` - The date and time at which the Capacity Block Reservation starts. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) +* `tenancy` - Indicates the tenancy of the Capacity Block Reservation. Specify either `default` or `dedicated`. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_capacity_reservation.html.markdown b/website/docs/cdktf/python/r/ec2_capacity_reservation.html.markdown index 6b56d1de741..80d63abc83c 100644 --- a/website/docs/cdktf/python/r/ec2_capacity_reservation.html.markdown +++ b/website/docs/cdktf/python/r/ec2_capacity_reservation.html.markdown @@ -61,6 +61,14 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - The ARN of the Capacity Reservation. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `10m`) +- `update` - (Default `10m`) +- `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Capacity Reservations using the `id`. For example: @@ -86,4 +94,4 @@ Using `terraform import`, import Capacity Reservations using the `id`. For examp % terraform import aws_ec2_capacity_reservation.web cr-0123456789abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_fleet.html.markdown b/website/docs/cdktf/python/r/ec2_fleet.html.markdown index f53d2a6c3fa..15bdc26800e 100644 --- a/website/docs/cdktf/python/r/ec2_fleet.html.markdown +++ b/website/docs/cdktf/python/r/ec2_fleet.html.markdown @@ -151,6 +151,7 @@ This configuration block supports the following: * `instance_generations` - (Optional) Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Valid values are `current` and `previous`. Default is `current` and `previous` generation instance types. * `local_storage` - (Optional) Indicate whether instance types with local storage volumes are `included`, `excluded`, or `required`. Default is `included`. * `local_storage_types` - (Optional) List of local storage type names. Valid values are `hdd` and `ssd`. Default any storage type. +* `max_spot_price_as_percentage_of_optimal_on_demand_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spot_max_price_percentage_over_lowest_price` * `memory_gib_per_vcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. * `min` - (Optional) The minimum amount of memory per vCPU, in GiB. To specify no minimum limit, omit this parameter. * `max` - (Optional) The maximum amount of memory per vCPU, in GiB. To specify no maximum limit, omit this parameter. @@ -168,7 +169,7 @@ This configuration block supports the following: If you set `target_capacity_unit_type` to `vcpu` or `memory-mib`, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. * `require_hibernate_support` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -* `spot_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +* `spot_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `max_spot_price_as_percentage_of_optimal_on_demand_price` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. @@ -269,4 +270,4 @@ Using `terraform import`, import `aws_ec2_fleet` using the Fleet identifier. For % terraform import aws_ec2_fleet.example fleet-b9b55d27-c5fc-41ac-a6f3-48fcc91f080c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_network_insights_path.html.markdown b/website/docs/cdktf/python/r/ec2_network_insights_path.html.markdown index 5f6f4108e0b..170fc0c025a 100644 --- a/website/docs/cdktf/python/r/ec2_network_insights_path.html.markdown +++ b/website/docs/cdktf/python/r/ec2_network_insights_path.html.markdown @@ -38,7 +38,7 @@ class MyConvertedCode(TerraformStack): The following arguments are required: * `source` - (Required) ID or ARN of the resource which is the source of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. -* `destination` - (Required) ID or ARN of the resource which is the destination of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. +* `destination` - (Optional) ID or ARN of the resource which is the destination of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. * `protocol` - (Required) Protocol to use for analysis. Valid options are `tcp` or `udp`. The following arguments are optional: @@ -83,4 +83,4 @@ Using `terraform import`, import Network Insights Paths using the `id`. For exam % terraform import aws_ec2_network_insights_path.test nip-00edfba169923aefd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment.html.markdown b/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment.html.markdown index 7b98e70f984..050c5d2c2a3 100644 --- a/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment.html.markdown +++ b/website/docs/cdktf/python/r/ec2_transit_gateway_peering_attachment.html.markdown @@ -75,9 +75,16 @@ This resource supports the following arguments: * `peer_account_id` - (Optional) Account ID of EC2 Transit Gateway to peer with. Defaults to the account ID the [AWS provider][1] is currently connected to. * `peer_region` - (Required) Region of EC2 Transit Gateway to peer with. * `peer_transit_gateway_id` - (Required) Identifier of EC2 Transit Gateway to peer with. +* `options` - (Optional) Describes whether dynamic routing is enabled or disabled for the transit gateway peering request. See [options](#options) below for more details! * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Peering Attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `transit_gateway_id` - (Required) Identifier of EC2 Transit Gateway. +### options + +The `options` block supports the following: + +* `dynamic_routing` - (Optional) Indicates whether dynamic routing is enabled or disabled.. Supports `enable` and `disable`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -112,4 +119,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_peering_attachment` us [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/eks_cluster.html.markdown b/website/docs/cdktf/python/r/eks_cluster.html.markdown index e3a1ad87172..922109026e6 100644 --- a/website/docs/cdktf/python/r/eks_cluster.html.markdown +++ b/website/docs/cdktf/python/r/eks_cluster.html.markdown @@ -131,7 +131,7 @@ class MyConvertedCode(TerraformStack): ### Enabling IAM Roles for Service Accounts -Only available on Kubernetes version 1.13 and 1.14 clusters created or upgraded on or after September 3, 2019. For more information about this feature, see the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). +For more information about this feature, see the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). ```python # DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -290,6 +290,7 @@ The following arguments are required: The following arguments are optional: * `access_config` - (Optional) Configuration block for the access config associated with your cluster, see [Amazon EKS Access Entries](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html). +* `bootstrap_self_managed_addons` - (Optional) Install default unmanaged add-ons, such as `aws-cni`, `kube-proxy`, and CoreDNS during cluster creation. If `false`, you must manually install desired add-ons. Changing this value will force a new cluster to be created. Defaults to `true`. * `enabled_cluster_log_types` - (Optional) List of the desired control plane logging to enable. For more information, see [Amazon EKS Control Plane Logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). * `encryption_config` - (Optional) Configuration block with encryption configuration for the cluster. Only available on Kubernetes 1.13 and above clusters created after March 6, 2020. Detailed below. * `kubernetes_network_config` - (Optional) Configuration block with kubernetes network configuration for the cluster. Detailed below. If removed, Terraform will only perform drift detection if a configuration value is provided. @@ -302,7 +303,7 @@ The following arguments are optional: The `access_config` configuration block supports the following arguments: * `authentication_mode` - (Optional) The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` -* `bootstrap_cluster_creator_admin_permissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `true`. +* `bootstrap_cluster_creator_admin_permissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `false`. ### encryption_config @@ -427,4 +428,4 @@ Using `terraform import`, import EKS Clusters using the `name`. For example: % terraform import aws_eks_cluster.my_cluster my_cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elastictranscoder_pipeline.html.markdown b/website/docs/cdktf/python/r/elastictranscoder_pipeline.html.markdown index 24d4bcc69a6..84995920569 100644 --- a/website/docs/cdktf/python/r/elastictranscoder_pipeline.html.markdown +++ b/website/docs/cdktf/python/r/elastictranscoder_pipeline.html.markdown @@ -45,7 +45,7 @@ class MyConvertedCode(TerraformStack): See ["Create Pipeline"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-pipeline.html) in the AWS docs for reference. -This argument supports the following arguments: +This resource supports the following arguments: * `aws_kms_key_arn` - (Optional) The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. * `content_config` - (Optional) The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below) @@ -136,4 +136,4 @@ Using `terraform import`, import Elastic Transcoder pipelines using the `id`. Fo % terraform import aws_elastictranscoder_pipeline.basic_pipeline 1407981661351-cttk8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/elastictranscoder_preset.html.markdown b/website/docs/cdktf/python/r/elastictranscoder_preset.html.markdown index 4290b98b4e3..be1381436ff 100644 --- a/website/docs/cdktf/python/r/elastictranscoder_preset.html.markdown +++ b/website/docs/cdktf/python/r/elastictranscoder_preset.html.markdown @@ -88,7 +88,7 @@ class MyConvertedCode(TerraformStack): See ["Create Preset"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-preset.html) in the AWS docs for reference. -This argument supports the following arguments: +This resource supports the following arguments: * `audio` - (Optional, Forces new resource) Audio parameters object (documented below). * `audio_codec_options` - (Optional, Forces new resource) Codec options for the audio parameters (documented below) @@ -198,4 +198,4 @@ Using `terraform import`, import Elastic Transcoder presets using the `id`. For % terraform import aws_elastictranscoder_preset.basic_preset 1407981661351-cttk8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/flow_log.html.markdown b/website/docs/cdktf/python/r/flow_log.html.markdown index fe448115553..288c4797253 100644 --- a/website/docs/cdktf/python/r/flow_log.html.markdown +++ b/website/docs/cdktf/python/r/flow_log.html.markdown @@ -232,7 +232,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** One of `eni_id`, `subnet_id`, `transit_gateway_id`, `transit_gateway_attachment_id`, or `vpc_id` must be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `traffic_type` - (Required) The type of traffic to capture. Valid values: `ACCEPT`,`REJECT`, `ALL`. * `deliver_cross_account_role` - (Optional) ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. @@ -294,4 +294,4 @@ Using `terraform import`, import Flow Logs using the `id`. For example: % terraform import aws_flow_log.test_flow_log fl-1a2b3c4d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fms_resource_set.html.markdown b/website/docs/cdktf/python/r/fms_resource_set.html.markdown new file mode 100644 index 00000000000..2566298aa5f --- /dev/null +++ b/website/docs/cdktf/python/r/fms_resource_set.html.markdown @@ -0,0 +1,94 @@ +--- +subcategory: "FMS (Firewall Manager)" +layout: "aws" +page_title: "AWS: aws_fms_resource_set" +description: |- + Terraform resource for managing an AWS FMS (Firewall Manager) Resource Set. +--- + + + +# Resource: aws_fms_resource_set + +Terraform resource for managing an AWS FMS (Firewall Manager) Resource Set. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.fms_resource_set import FmsResourceSet +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + FmsResourceSet(self, "example", + resource_set=[FmsResourceSetResourceSet( + name="testing", + resource_type_list=["AWS::NetworkFirewall::Firewall"] + ) + ] + ) +``` + +## Argument Reference + +The following arguments are required: + +* `resource_set` - (Required) Details about the resource set to be created or updated. See [`resource_set` Attribute Reference](#resource_set-attribute-reference) below. + +### `resource_set` Attribute Reference + +* `name` - (Required) Descriptive name of the resource set. You can't change the name of a resource set after you create it. +* `resource_type_list` - (Required) Determines the resources that can be associated to the resource set. Depending on your setting for max results and the number of resource sets, a single call might not return the full list. +* `description` - (Optional) Description of the resource set. +* `last_update_time` - (Optional) Last time that the reosurce set was changed. +* `resource_set_status` - (Optional) Indicates whether the resource set is in or out of the admin's Region scope. Valid values are `ACTIVE` (Admin can manage and delete the resource set) or `OUT_OF_ADMIN_SCOPE` (Admin can view the resource set, but theyy can't edit or delete the resource set.) + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Resource Set. +* `id` - Unique identifier for the resource set. It's returned in the responses to create and list commands. You provide it to operations like update and delete. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import FMS (Firewall Manager) Resource Set using the `id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.fms_resource_set import FmsResourceSet +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + FmsResourceSet.generate_config_for_import(self, "example", "resource_set-id-12345678") +``` + +Using `terraform import`, import FMS (Firewall Manager) Resource Set using the `id`. For example: + +```console +% terraform import aws_fms_resource_set.example resource_set-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/fsx_ontap_file_system.html.markdown b/website/docs/cdktf/python/r/fsx_ontap_file_system.html.markdown index 28023de5eea..4fefd2fa17e 100644 --- a/website/docs/cdktf/python/r/fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/python/r/fsx_ontap_file_system.html.markdown @@ -49,11 +49,56 @@ class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) FsxOntapFileSystem(self, "testhapairs", - deployment_type="SINGLE_AZ_2", + deployment_type="SINGLE_AZ_1", + ha_pairs=2, preferred_subnet_id=test1.id, storage_capacity=2048, subnet_ids=[test1.id], - throughput_capacity_per_ha_pair=3072 + throughput_capacity_per_ha_pair=128 + ) +``` + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.fsx_ontap_file_system import FsxOntapFileSystem +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + FsxOntapFileSystem(self, "testsingleazgen2", + deployment_type="SINGLE_AZ_2", + ha_pairs=4, + preferred_subnet_id=test1.id, + storage_capacity=4096, + subnet_ids=[test1.id], + throughput_capacity_per_ha_pair=384 + ) +``` + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.fsx_ontap_file_system import FsxOntapFileSystem +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + FsxOntapFileSystem(self, "testmultiazgen2", + deployment_type="MULTI_AZ_2", + ha_pairs=1, + preferred_subnet_id=test1.id, + storage_capacity=1024, + subnet_ids=[test1.id, test2.id], + throughput_capacity_per_ha_pair=384 ) ``` @@ -61,24 +106,24 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `1024` and `196608` for file systems with deployment_type `SINGLE_AZ_1` and `MULTI_AZ_1`. Valid values between `2048` (`1024` per ha pair) and `1048576` for file systems with deployment_type `SINGLE_AZ_2`. +* `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `1024` and `196608` for file systems with deployment_type `SINGLE_AZ_1` and `MULTI_AZ_1`. Valid values are between `1024` and `524288` for `MULTI_AZ_2`. Valid values between `1024` (`1024` per ha pair) and `1048576` for file systems with deployment_type `SINGLE_AZ_2`. For `SINGLE_AZ_2`, the `1048576` (1PB) maximum is only supported when using 2 or more ha_pairs, the maximum is `524288` (512TB) when using 1 ha_pair. * `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. Up to 2 subnets can be provided. * `preferred_subnet_id` - (Required) The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. -* `deployment_type` - (Optional) - The filesystem deployment type. Supports `MULTI_AZ_1`, `SINGLE_AZ_1`, and `SINGLE_AZ_2`. +* `deployment_type` - (Optional) - The filesystem deployment type. Supports `MULTI_AZ_1`, `MULTI_AZ_2`, `SINGLE_AZ_1`, and `SINGLE_AZ_2`. * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set. * `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See [Disk Iops Configuration](#disk-iops-configuration) below. * `endpoint_ip_address_range` - (Optional) Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range. -* `ha_pairs` - (Optional) - The number of ha_pairs to deploy for the file system. Valid values are 1 through 12. Value of 2 or greater required for `SINGLE_AZ_2`. Only value of 1 is supported with `SINGLE_AZ_1` or `MULTI_AZ_1` but not required. +* `ha_pairs` - (Optional) - The number of ha_pairs to deploy for the file system. Valid value is 1 for `SINGLE_AZ_1` or `MULTI_AZ_1` and `MULTI_AZ_2`. Valid values are 1 through 12 for `SINGLE_AZ_2`. * `storage_type` - (Optional) - The filesystem storage type. defaults to `SSD`. * `fsx_admin_password` - (Optional) The ONTAP administrative password for the fsxadmin user that you can use to administer your file system using the ONTAP CLI and REST API. * `route_table_ids` - (Optional) Specifies the VPC route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `throughput_capacity` - (Optional) Sets the throughput capacity (in MBps) for the file system that you're creating. Valid values are `128`, `256`, `512`, `1024`, `2048`, and `4096`. This parameter is only supported when not using the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. -* `throughput_capacity_per_ha_pair` - (Optional) Sets the throughput capacity (in MBps) for the file system that you're creating. Valid value when using 1 ha_pair are `128`, `256`, `512`, `1024`, `2048`, and `4096`. Valid values when using 2 or more ha_pairs are `3072`,`6144`. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. +* `throughput_capacity_per_ha_pair` - (Optional) Sets the per-HA-pair throughput capacity (in MBps) for the file system that you're creating, as opposed to `throughput_capacity` which specifies the total throughput capacity for the file system. Valid value for `MULTI_AZ_1` and `SINGLE_AZ_1` are `128`, `256`, `512`, `1024`, `2048`, and `4096`. Valid values for deployment type `MULTI_AZ_2` and `SINGLE_AZ_2` are `384`,`768`,`1536`,`3072`,`6144` where `ha_pairs` is `1`. Valid values for deployment type `SINGLE_AZ_2` are `1536`, `3072`, and `6144` where `ha_pairs` is greater than 1. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. ### Disk Iops Configuration @@ -90,7 +135,9 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name of the file system. -* `dns_name` - DNS name for the file system, e.g., `fs-12345678.fsx.us-west-2.amazonaws.com` +* `dns_name` - DNS name for the file system. + + **Note:** This attribute does not apply to FSx for ONTAP file systems and is consequently not set. You can access your FSx for ONTAP file system and volumes via a [Storage Virtual Machine (SVM)](fsx_ontap_storage_virtual_machine.html) using its DNS name or IP address. * `endpoints` - The endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. See [Endpoints](#endpoints) below. * `id` - Identifier of the file system, e.g., `fs-12345678` * `network_interface_ids` - Set of Elastic Network Interface identifiers from which the file system is accessible The first network interface returned is the primary network interface. @@ -168,4 +215,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/globalaccelerator_cross_account_attachment.html.markdown b/website/docs/cdktf/python/r/globalaccelerator_cross_account_attachment.html.markdown index aa5256cdb1f..43238b01a0b 100644 --- a/website/docs/cdktf/python/r/globalaccelerator_cross_account_attachment.html.markdown +++ b/website/docs/cdktf/python/r/globalaccelerator_cross_account_attachment.html.markdown @@ -68,6 +68,7 @@ The following arguments are optional: * `principals` - (Optional) List of AWS account IDs that are allowed to associate resources with the accelerator. * `resource` - (Optional) List of resources to be associated with the accelerator. + * `cidr_block` - (Optional) IP address range, in CIDR format, that is specified as resource. * `endpoint_id` - (Optional) The endpoint ID for the endpoint that is specified as a AWS resource. * `region` - (Optional) The AWS Region where a shared endpoint resource is located. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -115,4 +116,4 @@ Using `terraform import`, import Global Accelerator Cross Account Attachment usi % terraform import aws_globalaccelerator_cross_account_attachment.example arn:aws:globalaccelerator::012345678910:attachment/01234567-abcd-8910-efgh-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_catalog_table.html.markdown b/website/docs/cdktf/python/r/glue_catalog_table.html.markdown index 96cc61c679a..21fb12e69d3 100644 --- a/website/docs/cdktf/python/r/glue_catalog_table.html.markdown +++ b/website/docs/cdktf/python/r/glue_catalog_table.html.markdown @@ -146,6 +146,7 @@ To add an index to an existing table, see the [`glue_partition_index` resource]( ### storage_descriptor +* `additional_locations` - (Optional) List of locations that point to the path where a Delta table is located. * `bucket_columns` - (Optional) List of reducer grouping columns, clustering columns, and bucketing columns in the table. * `columns` - (Optional) Configuration block for columns in the table. See [`columns`](#columns) below. * `compressed` - (Optional) Whether the data in the table is compressed. @@ -235,4 +236,4 @@ Using `terraform import`, import Glue Tables using the catalog ID (usually AWS a % terraform import aws_glue_catalog_table.MyTable 123456789012:MyDatabase:MyTable ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_connection.html.markdown b/website/docs/cdktf/python/r/glue_connection.html.markdown index 0c1baa7b8bb..7d00f417b5f 100644 --- a/website/docs/cdktf/python/r/glue_connection.html.markdown +++ b/website/docs/cdktf/python/r/glue_connection.html.markdown @@ -48,12 +48,12 @@ from cdktf import Token, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsSecretmanagerSecret +from imports.aws.data_aws_secretsmanager_secret import DataAwsSecretsmanagerSecret from imports.aws.glue_connection import GlueConnection class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - example = DataAwsSecretmanagerSecret(self, "example", + example = DataAwsSecretsmanagerSecret(self, "example", name="example-secret" ) aws_glue_connection_example = GlueConnection(self, "example_1", @@ -108,7 +108,7 @@ from cdktf import Token, TerraformStack # Provider bindings are generated by running `cdktf get`. # See https://cdk.tf/provider-generation for more details. # -from imports.aws. import DataAwsSecretmanagerSecret +from imports.aws.data_aws_secretsmanager_secret import DataAwsSecretsmanagerSecret from imports.aws.glue_connection import GlueConnection class MyConvertedCode(TerraformStack): def __init__(self, scope, name): @@ -124,7 +124,7 @@ class MyConvertedCode(TerraformStack): match_criteria=["template-connection"], name="example_connector" ) - example = DataAwsSecretmanagerSecret(self, "example", + example = DataAwsSecretsmanagerSecret(self, "example", name="example-secret" ) GlueConnection(self, "example_connection", @@ -193,4 +193,4 @@ Using `terraform import`, import Glue Connections using the `CATALOG-ID` (AWS ac % terraform import aws_glue_connection.MyConnection 123456789012:MyConnection ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_crawler.html.markdown b/website/docs/cdktf/python/r/glue_crawler.html.markdown index dadbc203347..474d2ac2b56 100644 --- a/website/docs/cdktf/python/r/glue_crawler.html.markdown +++ b/website/docs/cdktf/python/r/glue_crawler.html.markdown @@ -189,7 +189,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** Must specify at least one of `dynamodb_target`, `jdbc_target`, `s3_target`, `mongodb_target` or `catalog_target`. -This argument supports the following arguments: +This resource supports the following arguments: * `database_name` (Required) Glue database where results are written. * `name` (Required) Name of the crawler. @@ -326,4 +326,4 @@ Using `terraform import`, import Glue Crawlers using `name`. For example: % terraform import aws_glue_crawler.MyJob MyJob ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/glue_job.html.markdown b/website/docs/cdktf/python/r/glue_job.html.markdown index d373aa0a5ce..f0cdc904019 100644 --- a/website/docs/cdktf/python/r/glue_job.html.markdown +++ b/website/docs/cdktf/python/r/glue_job.html.markdown @@ -163,6 +163,7 @@ This resource supports the following arguments: * `execution_property` – (Optional) Execution property of the job. Defined below. * `glue_version` - (Optional) The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). * `execution_class` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. +* `maintenance_window` – (Optional) Specifies the day of the week and hour for the maintenance window for streaming jobs. * `max_capacity` – (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. `Required` when `pythonshell` is set, accept either `0.0625` or `1.0`. Use `number_of_workers` and `worker_type` arguments instead with `glue_version` `2.0` and above. * `max_retries` – (Optional) The maximum number of times to retry this job if it fails. * `name` – (Required) The name you assign to this job. It must be unique in your account. @@ -229,4 +230,4 @@ Using `terraform import`, import Glue Jobs using `name`. For example: % terraform import aws_glue_job.MyJob MyJob ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_workspace_service_account.html.markdown b/website/docs/cdktf/python/r/grafana_workspace_service_account.html.markdown new file mode 100644 index 00000000000..ee7e3e49ea9 --- /dev/null +++ b/website/docs/cdktf/python/r/grafana_workspace_service_account.html.markdown @@ -0,0 +1,80 @@ +--- +subcategory: "Managed Grafana" +layout: "aws" +page_title: "AWS: aws_grafana_workspace_service_account" +description: |- + Terraform resource for managing an Amazon Managed Grafana Workspace Service Account. +--- + + + +# Resource: aws_grafana_workspace_service_account + +-> **Note:** You cannot update a service account. If you change any attribute, Terraform +will delete the current and create a new one. + +Read about Service Accounts in the [Amazon Managed Grafana user guide](https://docs.aws.amazon.com/grafana/latest/userguide/service-accounts.html). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import GrafanaWorkspaceServiceAccount +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + GrafanaWorkspaceServiceAccount(self, "example", + grafana_role="ADMIN", + name="example-admin", + workspace_id=aws_grafana_workspace_example.id + ) +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the service account. The name must be unique within the workspace, as it determines the ID associated with the service account. +* `grafana_role` - (Required) The permission level to use for this service account. For more information about the roles and the permissions each has, see the [User roles](https://docs.aws.amazon.com/grafana/latest/userguide/Grafana-user-roles.html) documentation. +* `workspace_id` - (Required) The Grafana workspace with which the service account is associated. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `service_account_id` - Identifier of the service account in the given Grafana workspace + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Managed Grafana Workspace Service Account using the `workspace_id` and `service_account_id` separated by a comma (`,`). For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import GrafanaWorkspaceServiceAccount +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + GrafanaWorkspaceServiceAccount.generate_config_for_import(self, "example", "g-abc12345,1") +``` + +Using `terraform import`, import Managed Grafana Workspace Service Account using the `workspace_id` and `service_account_id` separated by a comma (`,`). For example: + +```console +% terraform import aws_grafana_workspace_service_account.example g-abc12345,1 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/grafana_workspace_service_account_token.html.markdown b/website/docs/cdktf/python/r/grafana_workspace_service_account_token.html.markdown new file mode 100644 index 00000000000..0036c578961 --- /dev/null +++ b/website/docs/cdktf/python/r/grafana_workspace_service_account_token.html.markdown @@ -0,0 +1,68 @@ +--- +subcategory: "Managed Grafana" +layout: "aws" +page_title: "AWS: aws_grafana_workspace_service_account_token" +description: |- + Terraform resource for managing an Amazon Managed Grafana Workspace Service Account Token. +--- + + + +# Resource: aws_grafana_workspace_service_account_token + +-> **Note:** You cannot update a service account token. If you change any attribute, Terraform +will delete the current and create a new one. + +Read about Service Accounts Tokens in the [Amazon Managed Grafana user guide](https://docs.aws.amazon.com/grafana/latest/userguide/service-accounts.html#service-account-tokens). + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import GrafanaWorkspaceServiceAccount, GrafanaWorkspaceServiceAccountToken +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = GrafanaWorkspaceServiceAccount(self, "example", + grafana_role="ADMIN", + name="example-admin", + workspace_id=aws_grafana_workspace_example.id + ) + aws_grafana_workspace_service_account_token_example = + GrafanaWorkspaceServiceAccountToken(self, "example_1", + name="example-key", + seconds_to_live=3600, + service_account_id=example.service_account_id, + workspace_id=aws_grafana_workspace_example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_grafana_workspace_service_account_token_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the token to create. The name must be unique within the workspace. +* `seconds_to_live` - (Required) Sets how long the token will be valid, in seconds. You can set the time up to 30 days in the future. +* `service_account_id` - (Required) The ID of the service account for which to create a token. +* `workspace_id` - (Required) The Grafana workspace with which the service account token is associated. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `service_account_token_id` - Identifier of the service account token in the given Grafana workspace. +* `created_at` - Specifies when the service account token was created. +* `expires_at` - Specifies when the service account token will expire. +* `key` - The key for the service account token. Used when making calls to the Grafana HTTP APIs to authenticate and authorize the requests. + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_detector_feature.html.markdown b/website/docs/cdktf/python/r/guardduty_detector_feature.html.markdown index 641b397bbb6..dcc5b8f880d 100644 --- a/website/docs/cdktf/python/r/guardduty_detector_feature.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_detector_feature.html.markdown @@ -49,19 +49,19 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: * `detector_id` - (Required) Amazon GuardDuty detector ID. -* `name` - (Required) The name of the detector feature. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. +* `name` - (Required) The name of the detector feature. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. Only one of two features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING` can be added, adding both features will cause an error. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. * `status` - (Required) The status of the detector feature. Valid values: `ENABLED`, `DISABLED`. -* `additional_configuration` - (Optional) Additional feature configuration block. See [below](#additional-configuration). +* `additional_configuration` - (Optional) Additional feature configuration block for features`EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING`. See [below](#additional-configuration). ### Additional Configuration The `additional_configuration` block supports the following: -* `name` - (Required) The name of the additional configuration. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorAdditionalConfiguration.html) for the current list of supported values. +* `name` - (Required) The name of the additional configuration for a feature. Valid values: `EKS_ADDON_MANAGEMENT`, `ECS_FARGATE_AGENT_MANAGEMENT`, `EC2_AGENT_MANAGEMENT`. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorAdditionalConfiguration.html) for the current list of supported values. * `status` - (Required) The status of the additional configuration. Valid values: `ENABLED`, `DISABLED`. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_malware_protection_plan.html.markdown b/website/docs/cdktf/python/r/guardduty_malware_protection_plan.html.markdown new file mode 100644 index 00000000000..8491bffe8fc --- /dev/null +++ b/website/docs/cdktf/python/r/guardduty_malware_protection_plan.html.markdown @@ -0,0 +1,112 @@ +--- +subcategory: "GuardDuty" +layout: "aws" +page_title: "AWS: aws_guardduty_malware_protection_plan" +description: |- + Provides a resource to manage a GuardDuty Malware Protection Plan +--- + + + +# Resource: aws_guardduty_malware_protection_plan + +Provides a resource to manage a GuardDuty malware protection plan. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.guardduty_malware_protection_plan import GuarddutyMalwareProtectionPlan +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + GuarddutyMalwareProtectionPlan(self, "example", + actions=[GuarddutyMalwareProtectionPlanActions( + tagging=[GuarddutyMalwareProtectionPlanActionsTagging( + status="ENABLED" + ) + ] + ) + ], + protected_resource=[GuarddutyMalwareProtectionPlanProtectedResource( + s3_bucket=[GuarddutyMalwareProtectionPlanProtectedResourceS3Bucket( + bucket_name=Token.as_string(aws_s3_bucket_example.id), + object_prefixes=["example1", "example2"] + ) + ] + ) + ], + role=Token.as_string(aws_iam_role_example.arn), + tags={ + "Name": "example" + } + ) +``` + +## Argument Reference + +This resource supports the following arguments: + +* `actions` - (Optional) Information about whether the tags will be added to the S3 object after scanning. See [`actions`](#actions-argument-reference) below. +* `protected_resource` - (Required) Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource. See [`protected_resource`](#protected_resource-argument-reference) below. +* `role` - (Required) The IAM role that includes the permissions required to scan and add tags to the associated protected resource. +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### `actions` argument reference + +* `tagging` - (Required) Indicates whether the scanned S3 object will have tags about the scan result. See [`tagging`](#tagging-argument-reference) below. + +#### `tagging` argument reference + +* `status` - (Required) Indicates whether or not the tags will added. Valid values are `DISABLED` and `ENABLED`. Defaults to `DISABLED` + +### `protected_resource` argument reference + +* `s3_bucket` - (Required) Information about the protected S3 bucket resource. See [`s3_bucket`](#s3_bucket-argument-reference) below. + +#### `s3_bucket` argument reference + +* `bucket_name` - (Required, Forces new resource) Name of the S3 bucket. +* `object_prefixes` - (Optional) The list of object prefixes that specify the S3 objects that will be scanned. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The ARN of the GuardDuty malware protection plan +* `created_at` - The timestamp when the Malware Protection plan resource was created. +* `id` - The ID of the GuardDuty malware protection plan +* `status` - The GuardDuty malware protection plan status. Valid values are `ACTIVE`, `WARNING`, and `ERROR`. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import GuardDuty malware protection plans using their IDs. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.guardduty_malware_protection_plan import GuarddutyMalwareProtectionPlan +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + GuarddutyMalwareProtectionPlan.generate_config_for_import(self, "example", "1234567890abcdef0123") +``` + +Using `terraform import`, import GuardDuty malware protection plans using their IDs. For example: + +```console +% terraform import aws_guardduty_malware_protection_plan.example 1234567890abcdef0123 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_organization_configuration.html.markdown b/website/docs/cdktf/python/r/guardduty_organization_configuration.html.markdown index 58407a09539..41d43872af3 100644 --- a/website/docs/cdktf/python/r/guardduty_organization_configuration.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_organization_configuration.html.markdown @@ -62,7 +62,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** One of `auto_enable` or `auto_enable_organization_members` must be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `auto_enable` - (Optional) *Deprecated:* Use `auto_enable_organization_members` instead. When this setting is enabled, all new accounts that are created in, or added to, the organization are added as a member accounts of the organization’s GuardDuty delegated administrator and GuardDuty is enabled in that AWS Region. * `auto_enable_organization_members` - (Optional) Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. Valid values are `ALL`, `NEW`, `NONE`. @@ -147,4 +147,4 @@ Using `terraform import`, import GuardDuty Organization Configurations using the % terraform import aws_guardduty_organization_configuration.example 00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/guardduty_organization_configuration_feature.html.markdown b/website/docs/cdktf/python/r/guardduty_organization_configuration_feature.html.markdown index 31c13e2e9e7..dad87a91329 100644 --- a/website/docs/cdktf/python/r/guardduty_organization_configuration_feature.html.markdown +++ b/website/docs/cdktf/python/r/guardduty_organization_configuration_feature.html.markdown @@ -50,18 +50,18 @@ This resource supports the following arguments: * `auto_enable` - (Required) The status of the feature that is configured for the member accounts within the organization. Valid values: `NEW`, `ALL`, `NONE`. * `detector_id` - (Required) The ID of the detector that configures the delegated administrator. -* `name` - (Required) The name of the feature that will be configured for the organization. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. -* `additional_configuration` - (Optional) The additional information that will be configured for the organization See [below](#additional-configuration). +* `name` - (Required) The name of the feature that will be configured for the organization. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. Only one of two features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING` can be added, adding both features will cause an error. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. +* `additional_configuration` - (Optional) Additional feature configuration block for features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING`. See [below](#additional-configuration). ### Additional Configuration The `additional_configuration` block supports the following: * `auto_enable` - (Required) The status of the additional configuration that will be configured for the organization. Valid values: `NEW`, `ALL`, `NONE`. -* `name` - (Required) The name of the additional configuration that will be configured for the organization. Valid values: `EKS_ADDON_MANAGEMENT`, `ECS_FARGATE_AGENT_MANAGEMENT`, `EC2_AGENT_MANAGEMENT`. +* `name` - (Required) The name of the additional configuration for a feature that will be configured for the organization. Valid values: `EKS_ADDON_MANAGEMENT`, `ECS_FARGATE_AGENT_MANAGEMENT`, `EC2_AGENT_MANAGEMENT`. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorAdditionalConfiguration.html) for the current list of supported values. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iam_server_certificate.html.markdown b/website/docs/cdktf/python/r/iam_server_certificate.html.markdown index e35c76f5d2a..dfe4dd39606 100644 --- a/website/docs/cdktf/python/r/iam_server_certificate.html.markdown +++ b/website/docs/cdktf/python/r/iam_server_certificate.html.markdown @@ -147,6 +147,12 @@ This resource exports the following attributes in addition to the arguments abov * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `upload_date` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) when the server certificate was uploaded. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `delete` - (Default `15m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Server Certificates using the `name`. For example: @@ -176,4 +182,4 @@ Using `terraform import`, import IAM Server Certificates using the `name`. For e [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingServerCerts.html [lifecycle]: /docs/configuration/resources.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/imagebuilder_image_pipeline.html.markdown b/website/docs/cdktf/python/r/imagebuilder_image_pipeline.html.markdown index d5f23b66da4..4552bcbbc11 100644 --- a/website/docs/cdktf/python/r/imagebuilder_image_pipeline.html.markdown +++ b/website/docs/cdktf/python/r/imagebuilder_image_pipeline.html.markdown @@ -49,11 +49,13 @@ The following arguments are optional: * `description` - (Optional) Description of the image pipeline. * `distribution_configuration_arn` - (Optional) Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. * `enhanced_image_metadata_enabled` - (Optional) Whether additional information about the image being created is collected. Defaults to `true`. +* `execution_role` - (Optional) Amazon Resource Name (ARN) of the service-linked role to be used by Image Builder to [execute workflows](https://docs.aws.amazon.com/imagebuilder/latest/userguide/manage-image-workflows.html). * `image_recipe_arn` - (Optional) Amazon Resource Name (ARN) of the image recipe. * `image_scanning_configuration` - (Optional) Configuration block with image scanning configuration. Detailed below. * `image_tests_configuration` - (Optional) Configuration block with image tests configuration. Detailed below. * `schedule` - (Optional) Configuration block with schedule settings. Detailed below. * `status` - (Optional) Status of the image pipeline. Valid values are `DISABLED` and `ENABLED`. Defaults to `ENABLED`. +* `workflow` - (Optional) Configuration block with the workflow configuration. Detailed below. * `tags` - (Optional) Key-value map of resource tags for the image pipeline. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### image_scanning_configuration @@ -89,6 +91,25 @@ The following arguments are optional: * `timezone` - (Optional) The timezone that applies to the scheduling expression. For example, "Etc/UTC", "America/Los_Angeles" in the [IANA timezone format](https://www.joda.org/joda-time/timezones.html). If not specified this defaults to UTC. +### workflow + +The following arguments are required: + +* `workflow_arn` - (Required) Amazon Resource Name (ARN) of the Image Builder Workflow. + +The following arguments are optional: + +* `on_failure` - (Optional) The action to take if the workflow fails. Must be one of `CONTINUE` or `ABORT`. +* `parallel_group` - (Optional) The parallel group in which to run a test Workflow. +* `parameter` - (Optional) Configuration block for the workflow parameters. Detailed below. + +### parameter + +The following arguments are required: + +* `name` - (Required) The name of the Workflow parameter. +* `value` - (Required) The value of the Workflow parameter. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -126,4 +147,4 @@ Using `terraform import`, import `aws_imagebuilder_image_pipeline` resources usi % terraform import aws_imagebuilder_image_pipeline.example arn:aws:imagebuilder:us-east-1:123456789012:image-pipeline/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_authorizer.html.markdown b/website/docs/cdktf/python/r/iot_authorizer.html.markdown index 49e818fecc1..d97b22b1c21 100644 --- a/website/docs/cdktf/python/r/iot_authorizer.html.markdown +++ b/website/docs/cdktf/python/r/iot_authorizer.html.markdown @@ -31,6 +31,9 @@ class MyConvertedCode(TerraformStack): name="example", signing_disabled=False, status="ACTIVE", + tags={ + "Name": "example" + }, token_key_name="Token-Header", token_signing_public_keys={ "Key1": Token.as_string( @@ -46,6 +49,7 @@ class MyConvertedCode(TerraformStack): * `name` - (Required) The name of the authorizer. * `signing_disabled` - (Optional) Specifies whether AWS IoT validates the token signature in an authorization request. Default: `false`. * `status` - (Optional) The status of Authorizer request at creation. Valid values: `ACTIVE`, `INACTIVE`. Default: `ACTIVE`. +* `tags` - (Optional) Map of tags to assign to this resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `token_key_name` - (Optional) The name of the token key used to extract the token from the HTTP headers. This value is required if signing is enabled in your authorizer. * `token_signing_public_keys` - (Optional) The public keys used to verify the digital signature returned by your custom authentication service. This value is required if signing is enabled in your authorizer. @@ -54,6 +58,7 @@ class MyConvertedCode(TerraformStack): This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the authorizer. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). ## Import @@ -80,4 +85,4 @@ Using `terraform import`, import IOT Authorizers using the name. For example: % terraform import aws_iot_authorizer.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/iot_topic_rule.html.markdown b/website/docs/cdktf/python/r/iot_topic_rule.html.markdown index 9c05c3a08fd..c1bb93d31ad 100644 --- a/website/docs/cdktf/python/r/iot_topic_rule.html.markdown +++ b/website/docs/cdktf/python/r/iot_topic_rule.html.markdown @@ -108,6 +108,7 @@ The `cloudwatch_alarm` object takes the following arguments: The `cloudwatch_logs` object takes the following arguments: +* `batch_mode` - (Optional) The payload that contains a JSON array of records will be sent to CloudWatch via a batch call. * `log_group_name` - (Required) The CloudWatch log group name. * `role_arn` - (Required) The IAM role ARN that allows access to the CloudWatch alarm. @@ -275,4 +276,4 @@ Using `terraform import`, import IoT Topic Rules using the `name`. For example: % terraform import aws_iot_topic_rule.rule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/kinesisanalyticsv2_application.html.markdown b/website/docs/cdktf/python/r/kinesisanalyticsv2_application.html.markdown index 4372fdd9d21..3156a113756 100644 --- a/website/docs/cdktf/python/r/kinesisanalyticsv2_application.html.markdown +++ b/website/docs/cdktf/python/r/kinesisanalyticsv2_application.html.markdown @@ -277,7 +277,7 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: * `name` - (Required) The name of the application. -* `runtime_environment` - (Required) The runtime environment for the application. Valid values: `SQL-1_0`, `FLINK-1_6`, `FLINK-1_8`, `FLINK-1_11`, `FLINK-1_13`, `FLINK-1_15`, `FLINK-1_18`. +* `runtime_environment` - (Required) The runtime environment for the application. Valid values: `SQL-1_0`, `FLINK-1_6`, `FLINK-1_8`, `FLINK-1_11`, `FLINK-1_13`, `FLINK-1_15`, `FLINK-1_18`, `FLINK-1_19`. * `service_execution_role` - (Required) The ARN of the [IAM role](/docs/providers/aws/r/iam_role.html) used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources. * `application_configuration` - (Optional) The application's configuration * `application_mode` - (Optional) The application's mode. Valid values are `STREAMING`, `INTERACTIVE`. @@ -538,4 +538,4 @@ Using `terraform import`, import `aws_kinesisanalyticsv2_application` using the % terraform import aws_kinesisanalyticsv2_application.example arn:aws:kinesisanalytics:us-west-2:123456789012:application/example-sql-application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lakeformation_data_lake_settings.html.markdown b/website/docs/cdktf/python/r/lakeformation_data_lake_settings.html.markdown index b4e1f2d0576..de8dac00846 100644 --- a/website/docs/cdktf/python/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/cdktf/python/r/lakeformation_data_lake_settings.html.markdown @@ -81,6 +81,7 @@ class MyConvertedCode(TerraformStack): LakeformationDataLakeSettings(self, "example", admins=[test.arn, Token.as_string(aws_iam_role_test.arn)], allow_external_data_filtering=True, + allow_full_table_external_data_access=True, authorized_session_tag_value_list=["Amazon EMR"], create_database_default_permissions=[LakeformationDataLakeSettingsCreateDatabaseDefaultPermissions( permissions=["SELECT", "ALTER", "DROP"], @@ -112,6 +113,7 @@ The following arguments are optional: * `allow_external_data_filtering` - (Optional) Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `external_data_filtering_allow_list` - (Optional) A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `authorized_session_tag_value_list` - (Optional) Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. +* `allow_full_table_external_data_access` - (Optional) Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. ~> **NOTE:** Although optional, not including `admins`, `create_database_default_permissions`, `create_table_default_permissions`, and/or `trusted_resource_owners` results in the setting being cleared. @@ -133,4 +135,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lambda_event_source_mapping.html.markdown b/website/docs/cdktf/python/r/lambda_event_source_mapping.html.markdown index 30dd44f71dd..200a8adfbc0 100644 --- a/website/docs/cdktf/python/r/lambda_event_source_mapping.html.markdown +++ b/website/docs/cdktf/python/r/lambda_event_source_mapping.html.markdown @@ -286,7 +286,7 @@ class MyConvertedCode(TerraformStack): ### scaling_config Configuration Block -* `maximum_concurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between `2` and `1000`. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). +* `maximum_concurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to `2`. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase the concurrency beyond 1000. ### self_managed_event_source Configuration Block @@ -340,4 +340,4 @@ Using `terraform import`, import Lambda event source mappings using the `UUID` ( % terraform import aws_lambda_event_source_mapping.event_source_mapping 12345kxodurf3443 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/launch_template.html.markdown b/website/docs/cdktf/python/r/launch_template.html.markdown index 870e3e8665b..ac5316abc53 100644 --- a/website/docs/cdktf/python/r/launch_template.html.markdown +++ b/website/docs/cdktf/python/r/launch_template.html.markdown @@ -122,7 +122,7 @@ This resource supports the following arguments: * `hibernation_options` - (Optional) The hibernation options for the instance. See [Hibernation Options](#hibernation-options) below for more details. * `iam_instance_profile` - (Optional) The IAM Instance Profile to launch the instance with. See [Instance Profile](#instance-profile) below for more details. -* `image_id` - (Optional) The AMI from which to launch the instance. +* `image_id` - (Optional) The AMI from which to launch the instance or use a Systems Manager parameter convention e.g. `resolve:ssm:parameter-name`. See [docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id) for more details. * `instance_initiated_shutdown_behavior` - (Optional) Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`). * `instance_market_options` - (Optional) The market (purchasing) option for the instance. See [Market Options](#market-options) @@ -224,7 +224,7 @@ Attach an elastic GPU the instance. The `elastic_gpu_specifications` block supports the following: -* `type` - The [Elastic GPU Type](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-gpus.html#elastic-gpus-basics) +* `type` - The [Elastic GPU Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-graphics.html#elastic-graphics-basics) ### Elastic Inference Accelerator @@ -343,6 +343,7 @@ This configuration block supports the following: * ssd - solid state drive ``` +* `max_spot_price_as_percentage_of_optimal_on_demand_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spot_max_price_percentage_over_lowest_price` * `memory_gib_per_vcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. * `min` - (Optional) Minimum. May be a decimal number, e.g. `0.5`. * `max` - (Optional) Maximum. May be a decimal number, e.g. `0.5`. @@ -359,7 +360,7 @@ This configuration block supports the following: If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. * `require_hibernate_support` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -* `spot_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +* `spot_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `max_spot_price_as_percentage_of_optimal_on_demand_price` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. * `total_local_storage_gb` - (Optional) Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. @@ -518,4 +519,4 @@ Using `terraform import`, import Launch Templates using the `id`. For example: % terraform import aws_launch_template.web lt-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lb_target_group.html.markdown b/website/docs/cdktf/python/r/lb_target_group.html.markdown index 8f416b846e8..d6365ee1dc2 100644 --- a/website/docs/cdktf/python/r/lb_target_group.html.markdown +++ b/website/docs/cdktf/python/r/lb_target_group.html.markdown @@ -138,6 +138,41 @@ class MyConvertedCode(TerraformStack): ) ``` +### Target group with health requirements + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.lb_target_group import LbTargetGroup +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + LbTargetGroup(self, "tcp-example", + name="tf-example-lb-nlb-tg", + port=80, + protocol="TCP", + target_group_health=[{ + "dns_failover": [{ + "minimum_healthy_targets_count": "1", + "minimum_healthy_targets_percentage": "off" + } + ], + "unhealthy_state_routing": [{ + "minimum_healthy_targets_count": "1", + "minimum_healthy_targets_percentage": "off" + } + ] + } + ], + vpc_id=main.id + ) +``` + ## Argument Reference This resource supports the following arguments: @@ -164,6 +199,7 @@ This resource supports the following arguments: * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `target_failover` - (Optional) Target failover block. Only applicable for Gateway Load Balancer target groups. See [target_failover](#target_failover) for more information. * `target_health_state` - (Optional) Target health state block. Only applicable for Network Load Balancer target groups when `protocol` is `TCP` or `TLS`. See [target_health_state](#target_health_state) for more information. +* `target_group_health` - (Optional) Target health requirements block. See [target_group_health](#target_group_health) for more information. * `target_type` - (Optional, Forces new resource) Type of target that you must specify when registering targets with this target group. See [doc](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateTargetGroup.html) for supported values. The default is `instance`. @@ -228,6 +264,29 @@ This resource supports the following arguments: * `enable_unhealthy_connection_termination` - (Optional) Indicates whether the load balancer terminates connections to unhealthy targets. Possible values are `true` or `false`. Default: `true`. +### target_group_health + +~> **NOTE:** This block is only supported by Application Load Balancers and Network Load Balancers. + +The `target_group_health` block supports the following: + +* `dns_failover` - (Optional) Block to configure DNS Failover requirements. See [DNS Failover](#dns_failover) below for details on attributes. +* `unhealthy_state_routing` - (Optional) Block to configure Unhealthy State Routing requirements. See [Unhealthy State Routing](#unhealthy_state_routing) below for details on attributes. + +### dns_failover + +The `dns_failover` block supports the following: + +* `minimum_healthy_targets_count` - (Optional) The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from `1` to the maximum number of targets. The default is `off`. +* `minimum_healthy_targets_percentage` - (Optional) The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from `1` to `100`. The default is `off`. + +### unhealthy_state_routing + +The `unhealthy_state_routing` block supports the following: + +* `minimum_healthy_targets_count` - (Optional) The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `1` to the maximum number of targets. The default is `1`. +* `minimum_healthy_targets_percentage` - (Optional) The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from `1` to `100`. The default is `off`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -264,4 +323,4 @@ Using `terraform import`, import Target Groups using their ARN. For example: % terraform import aws_lb_target_group.app_front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:targetgroup/app-front-end/20cfe21448b66314 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/lightsail_container_service.html.markdown b/website/docs/cdktf/python/r/lightsail_container_service.html.markdown index 8eb45711500..6ea0783ee6a 100644 --- a/website/docs/cdktf/python/r/lightsail_container_service.html.markdown +++ b/website/docs/cdktf/python/r/lightsail_container_service.html.markdown @@ -131,7 +131,7 @@ class MyConvertedCode(TerraformStack): container service. For more information, see [Enabling and managing custom domains for your Amazon Lightsail container services](https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-container-services-certificates). -This argument supports the following arguments: +This resource supports the following arguments: * `name` - (Required) The name for the container service. Names must be of length 1 to 63, and be unique within each AWS Region in your Lightsail account. @@ -218,4 +218,4 @@ Using `terraform import`, import Lightsail Container Service using the `name`. F % terraform import aws_lightsail_container_service.my_container_service container-service-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/medialive_channel.html.markdown b/website/docs/cdktf/python/r/medialive_channel.html.markdown index f7234f72403..a3a34212312 100644 --- a/website/docs/cdktf/python/r/medialive_channel.html.markdown +++ b/website/docs/cdktf/python/r/medialive_channel.html.markdown @@ -152,8 +152,8 @@ The following arguments are optional: ### Input Settings -* `audio_selectors` - (Optional) Used to select the audio stream to decode for inputs that have multiple. See [Audio Selectors](#audio-selectors) for more details. -* `caption_selectors` - (Optional) Used to select the caption input to use for inputs that have multiple available. See [Caption Selectors](#caption-selectors) for more details. +* `audio_selector` - (Optional) Used to select the audio stream to decode for inputs that have multiple. See [Audio Selectors](#audio-selectors) for more details. +* `caption_selector` - (Optional) Used to select the caption input to use for inputs that have multiple available. See [Caption Selectors](#caption-selectors) for more details. * `deblock_filter` - (Optional) Enable or disable the deblock filter when filtering. * `denoise_filter` - (Optional) Enable or disable the denoise filter when filtering. * `filter_strength` - (Optional) Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest). @@ -811,4 +811,4 @@ Using `terraform import`, import MediaLive Channel using the `channel_id`. For e % terraform import aws_medialive_channel.example 1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mskconnect_connector.html.markdown b/website/docs/cdktf/python/r/mskconnect_connector.html.markdown index a9fb55448ce..24f2e9facde 100644 --- a/website/docs/cdktf/python/r/mskconnect_connector.html.markdown +++ b/website/docs/cdktf/python/r/mskconnect_connector.html.markdown @@ -77,105 +77,145 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `capacity` - (Required) Information about the capacity allocated to the connector. See below. +* `capacity` - (Required) Information about the capacity allocated to the connector. See [`capacity` Block](#capacity-block) for details. * `connector_configuration` - (Required) A map of keys to values that represent the configuration for the connector. -* `description` - (Optional) A summary description of the connector. -* `kafka_cluster` - (Required) Specifies which Apache Kafka cluster to connect to. See below. -* `kafka_cluster_client_authentication` - (Required) Details of the client authentication used by the Apache Kafka cluster. See below. -* `kafka_cluster_encryption_in_transit` - (Required) Details of encryption in transit to the Apache Kafka cluster. See below. +* `kafka_cluster` - (Required) Specifies which Apache Kafka cluster to connect to. See [`kafka_cluster` Block](#kafka_cluster-block) for details. +* `kafka_cluster_client_authentication` - (Required) Details of the client authentication used by the Apache Kafka cluster. See [`kafka_cluster_client_authentication` Block](#kafka_cluster_client_authentication-block) for details. +* `kafka_cluster_encryption_in_transit` - (Required) Details of encryption in transit to the Apache Kafka cluster. See [`kafka_cluster_encryption_in_transit` Block](#kafka_cluster_encryption_in_transit-block) for details. * `kafkaconnect_version` - (Required) The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins. -* `log_delivery` - (Optional) Details about log delivery. See below. * `name` - (Required) The name of the connector. -* `plugin` - (Required) Specifies which plugins to use for the connector. See below. +* `plugin` - (Required) Specifies which plugins to use for the connector. See [`plugin` Block](#plugin-block) for details. * `service_execution_role_arn` - (Required) The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket. -* `worker_configuration` - (Optional) Specifies which worker configuration to use with the connector. See below. -### capacity Configuration Block +The following arguments are optional: + +* `description` - (Optional) A summary description of the connector. +* `log_delivery` - (Optional) Details about log delivery. See [`log_delivery` Block](#log_delivery-block) for details. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `worker_configuration` - (Optional) Specifies which worker configuration to use with the connector. See [`worker_configuration` Block](#worker_configuration-block) for details. + +### `capacity` Block + +The `capacity` configuration block supports the following arguments: + +* `autoscaling` - (Optional) Information about the auto scaling parameters for the connector. See [`autoscaling` Block](#autoscaling-block) for details. +* `provisioned_capacity` - (Optional) Details about a fixed capacity allocated to a connector. See [`provisioned_capacity` Block](#provisioned_capacity-block) for details. -* `autoscaling` - (Optional) Information about the auto scaling parameters for the connector. See below. -* `provisioned_capacity` - (Optional) Details about a fixed capacity allocated to a connector. See below. +### `autoscaling` Block -### autoscaling Configuration Block +The `autoscaling` configuration block supports the following arguments: * `max_worker_count` - (Required) The maximum number of workers allocated to the connector. * `mcu_count` - (Optional) The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: `1`, `2`, `4`, `8`. The default value is `1`. * `min_worker_count` - (Required) The minimum number of workers allocated to the connector. -* `scale_in_policy` - (Optional) The scale-in policy for the connector. See below. -* `scale_out_policy` - (Optional) The scale-out policy for the connector. See below. +* `scale_in_policy` - (Optional) The scale-in policy for the connector. See [`scale_in_policy` Block](#scale_in_policy-block) for details. +* `scale_out_policy` - (Optional) The scale-out policy for the connector. See [`scale_out_policy` Block](#scale_out_policy-block) for details. -### scale_in_policy Configuration Block +### `scale_in_policy` Block + +The `scale_in_policy` configuration block supports the following arguments: * `cpu_utilization_percentage` - (Required) Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered. -### scale_out_policy Configuration Block +### `scale_out_policy` Block + +The `scale_out_policy` configuration block supports the following arguments: * `cpu_utilization_percentage` - (Required) The CPU utilization percentage threshold at which you want connector scale out to be triggered. -### provisioned_capacity Configuration Block +### `provisioned_capacity` Block + +The `provisioned_capacity` configuration block supports the following arguments: * `mcu_count` - (Optional) The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: `1`, `2`, `4`, `8`. The default value is `1`. * `worker_count` - (Required) The number of workers that are allocated to the connector. -### kafka_cluster Configuration Block +### `kafka_cluster` Block + +The `kafka_cluster` configuration block supports the following arguments: + +* `apache_kafka_cluster` - (Required) The Apache Kafka cluster to which the connector is connected. See [`apache_kafka_cluster` Block](#apache_kafka_cluster-block) for details. -* `apache_kafka_cluster` - (Required) The Apache Kafka cluster to which the connector is connected. +### `apache_kafka_cluster` Block -### apache_kafka_cluster Configuration Block +The `apache_kafka_cluster` configuration block supports the following arguments: * `bootstrap_servers` - (Required) The bootstrap servers of the cluster. -* `vpc` - (Required) Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. +* `vpc` - (Required) Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See [`vpc` Block](#vpc-block) for details. -### vpc Configuration Block +### `vpc` Block + +The `vpc` configuration block supports the following arguments: * `security_groups` - (Required) The security groups for the connector. * `subnets` - (Required) The subnets for the connector. -### kafka_cluster_client_authentication Configuration Block +### `kafka_cluster_client_authentication` Block + +The `kafka_cluster_client_authentication` configuration block supports the following arguments: * `authentication_type` - (Optional) The type of client authentication used to connect to the Apache Kafka cluster. Valid values: `IAM`, `NONE`. A value of `NONE` means that no client authentication is used. The default value is `NONE`. -### kafka_cluster_encryption_in_transit Configuration Block +### `kafka_cluster_encryption_in_transit` Block + +The `kafka_cluster_encryption_in_transit` configuration block supports the following arguments: * `encryption_type` - (Optional) The type of encryption in transit to the Apache Kafka cluster. Valid values: `PLAINTEXT`, `TLS`. The default values is `PLAINTEXT`. -### log_delivery Configuration Block +### `log_delivery` Block + +The `log_delivery` configuration block supports the following arguments: + +* `worker_log_delivery` - (Required) The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See [`worker_log_delivery` Block](#worker_log_delivery-block) for details. + +### `worker_log_delivery` Block -* `worker_log_delivery` - (Required) The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below. +The `worker_log_delivery` configuration block supports the following arguments: -### worker_log_delivery Configuration Block +* `cloudwatch_logs` - (Optional) Details about delivering logs to Amazon CloudWatch Logs. See [`cloudwatch_logs` Block](#cloudwatch_logs-block) for details. +* `firehose` - (Optional) Details about delivering logs to Amazon Kinesis Data Firehose. See [`firehose` Block](#firehose-block) for details. +* `s3` - (Optional) Details about delivering logs to Amazon S3. See [`s3` Block](#s3-block) for deetails. -* `cloudwatch_logs` - (Optional) Details about delivering logs to Amazon CloudWatch Logs. See below. -* `firehose` - (Optional) Details about delivering logs to Amazon Kinesis Data Firehose. See below. -* `s3` - (Optional) Details about delivering logs to Amazon S3. See below. +### `cloudwatch_logs` Block -### cloudwatch_logs Configuration Block +The `cloudwatch_logs` configuration block supports the following arguments: * `enabled` - (Optional) Whether log delivery to Amazon CloudWatch Logs is enabled. * `log_group` - (Required) The name of the CloudWatch log group that is the destination for log delivery. -### firehose Configuration Block +### `firehose` Block + +The `firehose` configuration block supports the following arguments: * `delivery_stream` - (Optional) The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery. * `enabled` - (Required) Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose. -### s3 Configuration Block +### `s3` Block + +The `s3` configuration block supports the following arguments: * `bucket` - (Optional) The name of the S3 bucket that is the destination for log delivery. * `enabled` - (Required) Specifies whether connector logs get sent to the specified Amazon S3 destination. * `prefix` - (Optional) The S3 prefix that is the destination for log delivery. -### plugin Configuration Block +### `plugin` Block + +The `plugin` configuration block supports the following argumens: -* `custom_plugin` - (Required) Details about a custom plugin. See below. +* `custom_plugin` - (Required) Details about a custom plugin. See [`custom_plugin` Block](#custom_plugin-block) for details. -### custom_plugin Configuration Block +### `custom_plugin` Block + +The `custom_plugin` configuration block supports the following arguments: * `arn` - (Required) The Amazon Resource Name (ARN) of the custom plugin. * `revision` - (Required) The revision of the custom plugin. -### worker_configuration Configuration Block +### `worker_configuration` Block + +The `worker_configuration` configuration block supports the following arguments: * `arn` - (Required) The Amazon Resource Name (ARN) of the worker configuration. * `revision` - (Required) The revision of the worker configuration. @@ -185,6 +225,7 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - The Amazon Resource Name (ARN) of the connector. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `version` - The current version of the connector. ## Timeouts @@ -220,4 +261,4 @@ Using `terraform import`, import MSK Connect Connector using the connector's `ar % terraform import aws_mskconnect_connector.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:connector/example/264edee4-17a3-412e-bd76-6681cfc93805-3' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mskconnect_custom_plugin.html.markdown b/website/docs/cdktf/python/r/mskconnect_custom_plugin.html.markdown index 78b3ab69bd8..f547c15edc0 100644 --- a/website/docs/cdktf/python/r/mskconnect_custom_plugin.html.markdown +++ b/website/docs/cdktf/python/r/mskconnect_custom_plugin.html.markdown @@ -58,23 +58,28 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `name` - (Required) The name of the custom plugin.. -* `content_type` - (Required) The type of the plugin file. Allowed values are `ZIP` and `JAR`. -* `location` - (Required) Information about the location of a custom plugin. See below. +* `name` - (Required, Forces new resource) The name of the custom plugin.. +* `content_type` - (Required, Forces new resource) The type of the plugin file. Allowed values are `ZIP` and `JAR`. +* `location` - (Required, Forces new resource) Information about the location of a custom plugin. See [`location` Block](#location-block) for details. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. The following arguments are optional: -* `description` - (Optional) A summary description of the custom plugin. +* `description` - (Optional, Forces new resource) A summary description of the custom plugin. -### location Argument Reference +### `location` Block -* `s3` - (Required) Information of the plugin file stored in Amazon S3. See below. +The `location` configuration block supports the following arguments: -#### location s3 Argument Reference +* `s3` - (Required, Forces new resource) Information of the plugin file stored in Amazon S3. See [`s3` Block](#s3-block) for details.. -* `bucket_arn` - (Required) The Amazon Resource Name (ARN) of an S3 bucket. -* `file_key` - (Required) The file key for an object in an S3 bucket. -* `object_version` - (Optional) The version of an object in an S3 bucket. +### `s3` Block + +The `s3` configuration Block supports the following arguments: + +* `bucket_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of an S3 bucket. +* `file_key` - (Required, Forces new resource) The file key for an object in an S3 bucket. +* `object_version` - (Optional, Forces new resource) The version of an object in an S3 bucket. ## Attribute Reference @@ -83,6 +88,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - the Amazon Resource Name (ARN) of the custom plugin. * `latest_revision` - an ID of the latest successfully created revision of the custom plugin. * `state` - the state of the custom plugin. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -116,4 +122,4 @@ Using `terraform import`, import MSK Connect Custom Plugin using the plugin's `a % terraform import aws_mskconnect_custom_plugin.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:custom-plugin/debezium-example/abcdefgh-1234-5678-9abc-defghijklmno-4' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mskconnect_worker_configuration.html.markdown b/website/docs/cdktf/python/r/mskconnect_worker_configuration.html.markdown index dd7f1891c93..9c180cc43ee 100644 --- a/website/docs/cdktf/python/r/mskconnect_worker_configuration.html.markdown +++ b/website/docs/cdktf/python/r/mskconnect_worker_configuration.html.markdown @@ -38,12 +38,13 @@ class MyConvertedCode(TerraformStack): The following arguments are required: -* `name` - (Required) The name of the worker configuration. -* `properties_file_content` - (Required) Contents of connect-distributed.properties file. The value can be either base64 encoded or in raw format. +* `name` - (Required, Forces new resource) The name of the worker configuration. +* `properties_file_content` - (Required, Forces new resource) Contents of connect-distributed.properties file. The value can be either base64 encoded or in raw format. The following arguments are optional: -* `description` - (Optional) A summary description of the worker configuration. +* `description` - (Optional, Forces new resource) A summary description of the worker configuration. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -51,6 +52,13 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - the Amazon Resource Name (ARN) of the worker configuration. * `latest_revision` - an ID of the latest successfully created revision of the worker configuration. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `delete` - (Default `10m`) ## Import @@ -77,4 +85,4 @@ Using `terraform import`, import MSK Connect Worker Configuration using the plug % terraform import aws_mskconnect_worker_configuration.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:worker-configuration/example/8848493b-7fcc-478c-a646-4a52634e3378-4' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/mwaa_environment.html.markdown b/website/docs/cdktf/python/r/mwaa_environment.html.markdown index 5c1c6189380..21b5e6155c9 100644 --- a/website/docs/cdktf/python/r/mwaa_environment.html.markdown +++ b/website/docs/cdktf/python/r/mwaa_environment.html.markdown @@ -159,6 +159,7 @@ This resource supports the following arguments: * `airflow_configuration_options` - (Optional) The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options. * `airflow_version` - (Optional) Airflow version of your environment, will be set by default to the latest version that MWAA supports. * `dag_s3_path` - (Required) The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). +* `endpoint_management` - (Optional) Defines whether the VPC endpoints configured for the environment are created and managed by the customer or by AWS. If set to `SERVICE`, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to `CUSTOMER`, you must create, and manage, the VPC endpoints for your VPC. Defaults to `SERVICE` if not set. * `environment_class` - (Optional) Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes. * `execution_role_arn` - (Required) The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification. * `kms_key` - (Optional) The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information. @@ -250,4 +251,4 @@ Using `terraform import`, import MWAA Environment using `Name`. For example: % terraform import aws_mwaa_environment.example MyAirflowEnvironment ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkfirewall_tls_inspection_configuration.html.markdown b/website/docs/cdktf/python/r/networkfirewall_tls_inspection_configuration.html.markdown new file mode 100644 index 00000000000..37fcf35aea5 --- /dev/null +++ b/website/docs/cdktf/python/r/networkfirewall_tls_inspection_configuration.html.markdown @@ -0,0 +1,473 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_tls_inspection_configuration" +description: |- + Terraform resource for managing an AWS Network Firewall TLS Inspection Configuration. +--- + + + +# Resource: aws_networkfirewall_tls_inspection_configuration + +Terraform resource for managing an AWS Network Firewall TLS Inspection Configuration. + +## Example Usage + +~> **NOTE:** You must configure either inbound inspection, outbound inspection, or both. + +### Basic inbound/ingress inspection + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import NetworkfirewallTlsInspectionConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkfirewallTlsInspectionConfiguration(self, "example", + description="example", + encryption_configuration=[{ + "key_id": "AWS_OWNED_KMS_KEY", + "type": "AWS_OWNED_KMS_KEY" + } + ], + name="example", + tls_inspection_configuration=[{ + "server_certificate_configuration": [{ + "scope": [{ + "destination": [{ + "address_definition": "0.0.0.0/0" + } + ], + "destination_ports": [{ + "from_port": 443, + "to_port": 443 + } + ], + "protocols": [6], + "source": [{ + "address_definition": "0.0.0.0/0" + } + ], + "source_ports": [{ + "from_port": 0, + "to_port": 65535 + } + ] + } + ], + "server_certificate": [{ + "resource_arn": example1.arn + } + ] + } + ] + } + ] + ) +``` + +### Basic outbound/engress inspection + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import NetworkfirewallTlsInspectionConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkfirewallTlsInspectionConfiguration(self, "example", + description="example", + encryption_configuration=[{ + "key_id": "AWS_OWNED_KMS_KEY", + "type": "AWS_OWNED_KMS_KEY" + } + ], + name="example", + tls_inspection_configuration=[{ + "server_certificate_configuration": [{ + "certificate_authority_arn": example1.arn, + "check_certificate_revocation_status": [{ + "revoked_status_action": "REJECT", + "unknown_status_action": "PASS" + } + ], + "scope": [{ + "destination": [{ + "address_definition": "0.0.0.0/0" + } + ], + "destination_ports": [{ + "from_port": 443, + "to_port": 443 + } + ], + "protocols": [6], + "source": [{ + "address_definition": "0.0.0.0/0" + } + ], + "source_ports": [{ + "from_port": 0, + "to_port": 65535 + } + ] + } + ] + } + ] + } + ] + ) +``` + +### Inbound with encryption configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import NetworkfirewallTlsInspectionConfiguration +from imports.aws.kms_key import KmsKey +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = KmsKey(self, "example", + deletion_window_in_days=7, + description="example" + ) + aws_networkfirewall_tls_inspection_configuration_example = + NetworkfirewallTlsInspectionConfiguration(self, "example_1", + description="example", + encryption_configuration=[{ + "key_id": example.arn, + "type": "CUSTOMER_KMS" + } + ], + name="example", + tls_inspection_configuration=[{ + "server_certificate_configuration": [{ + "scopes": [{ + "destination_ports": [{ + "from_port": 443, + "to_port": 443 + } + ], + "destinations": [{ + "address_definition": "0.0.0.0/0" + } + ], + "protocols": [6], + "source_ports": [{ + "from_port": 0, + "to_port": 65535 + } + ], + "sources": [{ + "address_definition": "0.0.0.0/0" + } + ] + } + ], + "server_certificate": [{ + "resource_arn": example1.arn + } + ] + } + ] + } + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_networkfirewall_tls_inspection_configuration_example.override_logical_id("example") +``` + +### Outbound with encryption configuration + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import NetworkfirewallTlsInspectionConfiguration +from imports.aws.kms_key import KmsKey +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = KmsKey(self, "example", + deletion_window_in_days=7, + description="example" + ) + aws_networkfirewall_tls_inspection_configuration_example = + NetworkfirewallTlsInspectionConfiguration(self, "example_1", + description="example", + encryption_configuration=[{ + "key_id": example.arn, + "type": "CUSTOMER_KMS" + } + ], + name="example", + tls_inspection_configuration=[{ + "server_certificate_configurations": [{ + "certificate_authority_arn": example1.arn, + "check_certificate_revocation_status": [{ + "revoked_status_action": "REJECT", + "unknown_status_action": "PASS" + } + ], + "scope": [{ + "destination": [{ + "address_definition": "0.0.0.0/0" + } + ], + "destination_ports": [{ + "from_port": 443, + "to_port": 443 + } + ], + "protocols": [6], + "source": [{ + "address_definition": "0.0.0.0/0" + } + ], + "source_ports": [{ + "from_port": 0, + "to_port": 65535 + } + ] + } + ] + } + ] + } + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_networkfirewall_tls_inspection_configuration_example.override_logical_id("example") +``` + +### Combined inbound and outbound + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import NetworkfirewallTlsInspectionConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkfirewallTlsInspectionConfiguration(self, "example", + description="example", + encryption_configuration=[{ + "key_id": "AWS_OWNED_KMS_KEY", + "type": "AWS_OWNED_KMS_KEY" + } + ], + name="example", + tls_inspection_configuration=[{ + "server_certificate_configuration": [{ + "certificate_authority_arn": example1.arn, + "check_certificate_revocation_status": [{ + "revoked_status_action": "REJECT", + "unknown_status_action": "PASS" + } + ], + "scope": [{ + "destination": [{ + "address_definition": "0.0.0.0/0" + } + ], + "destination_ports": [{ + "from_port": 443, + "to_port": 443 + } + ], + "protocols": [6], + "source": [{ + "address_definition": "0.0.0.0/0" + } + ], + "source_ports": [{ + "from_port": 0, + "to_port": 65535 + } + ] + } + ], + "server_certificate": [{ + "resource_arn": example2.arn + } + ] + } + ] + } + ] + ) +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required, Forces new resource) Descriptive name of the TLS inspection configuration. +* `tls_inspection_configuration` - (Required) TLS inspection configuration block. Detailed below. + +The following arguments are optional: + +* `description` - (Optional) Description of the TLS inspection configuration. +* `encryption_configuration` - (Optional) Encryption configuration block. Detailed below. + +### Encryption Configuration + +* `key_id` - (Optional) ARN of the Amazon Web Services Key Management Service (KMS) customer managed key. +* `type` - (Optional) Type of KMS key to use for encryption of your Network Firewall resources. Valid values: `AWS_OWNED_KMS_KEY`, `CUSTOMER_KMS`. + +### TLS Inspection Configuration + +* `server_certificate_configuration` - (Required) Server certificate configurations that are associated with the TLS configuration. Detailed below. + +### Server Certificate Configuration + +The `server_certificate_configuration` block supports the following arguments: + +* `certificate_authority_arn` - (Optional) ARN of the imported certificate authority (CA) certificate within Certificate Manager (ACM) to use for outbound SSL/TLS inspection. See [Using SSL/TLS certificates with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html) for limitations on CA certificates. +* `check_certificate_revocation_status` (Optional) - Check Certificate Revocation Status block. Detailed below. +* `scope` (Required) - Scope block. Detailed below. +* `server_certificate` - (Optional) Server certificates to use for inbound SSL/TLS inspection. See [Using SSL/TLS certificates with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html). + +### Check Certificate Revocation Status + +The `check_certificate_revocation_status` block supports the following arguments: + +~> **NOTE To check the certificate revocation status, you must also specify a `certificate_authority_arn` in `server_certificate_configuration`. + +`revoked_status_action` - (Optional) how Network Firewall processes traffic when it determines that the certificate presented by the server in the SSL/TLS connection has a revoked status. See [Checking certificate revocation status](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html#tls-inspection-check-certificate-revocation-status) for details. Valid values: `PASS`, `DROP`, `REJECT`. +`unknown_status_action` - (Optional) How Network Firewall processes traffic when it determines that the certificate presented by the server in the SSL/TLS connection has an unknown status, or a status that cannot be determined for any other reason, including when the service is unable to connect to the OCSP and CRL endpoints for the certificate. See [Checking certificate revocation status](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html#tls-inspection-check-certificate-revocation-status) for details. Valid values: `PASS`, `DROP`, `REJECT`. + +### Scopes + +The `scope` block supports the following arguments: + +* `destination` - (Required) Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See [Destination](#destination) below for details. +* `destination_ports` - (Optional) Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See [Destination Ports](#destination-ports) below for details. +* `protocols` - (Optional) Set of protocols to inspect for, specified using the protocol's assigned internet protocol number (IANA). Network Firewall currently supports TCP only. Valid values: `6` +* `source` - (Optional) Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See [Source](#source) below for details. +* `source_ports` - (Optional) Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See [Source Ports](#source-ports) below for details. + +### Destination + +The `destination` block supports the following argument: + +* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + +### Destination Ports + +The `destination_ports` block supports the following arguments: + +* `from_ports` - (Required) The lower limit of the port range. This must be less than or equal to the `to_port`. +* `to_ports` - (Optional) The upper limit of the port range. This must be greater than or equal to the `from_port`. + +### Source + +The `source` block supports the following argument: + +* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + +### Source Ports + +The `source_ports` block supports the following arguments: + +* `from_port` - (Required) The lower limit of the port range. This must be less than or equal to the `to_port`. +* `to_port` - (Optional) The upper limit of the port range. This must be greater than or equal to the `from_port`. + +### Server Certificates + +The `server_certificate` block supports the following arguments: + +* `resource_arn` - (Optional) ARN of the Certificate Manager SSL/TLS server certificate that's used for inbound SSL/TLS inspection. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the TLS Inspection Configuration. +* `certificate_authority` - Certificate Manager certificate block. See [Certificate Authority](#certificate-authority) below for details. +* `certificates` - List of certificate blocks describing certificates associated with the TLS inspection configuration. See [Certificates](#certificates) below for details. +* `number_of_associations` - Number of firewall policies that use this TLS inspection configuration. +* `tls_inspection_configuration_id` - A unique identifier for the TLS inspection configuration. +* `update_token` - String token used when updating the rule group. + +### Certificate Authority + +The `certificate_authority` block exports the following attributes: + +* `certificate_arn` - ARN of the certificate. +* `certificate_serial` - Serial number of the certificate. +* `status` - Status of the certificate. +* `status_message` - Details about the certificate status, including information about certificate errors. + +### Certificates + +The `certificates` block exports the following attributes: + +* `certificate_arn` - ARN of the certificate. +* `certificate_serial` - Serial number of the certificate. +* `status` - Status of the certificate. +* `status_message` - Details about the certificate status, including information about certificate errors. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall TLS Inspection Configuration using the `arn`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import NetworkfirewallTlsInspectionConfiguration +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkfirewallTlsInspectionConfiguration.generate_config_for_import(self, "example", "arn:aws:network-firewall::::tls-configuration/example") +``` + +Using `terraform import`, import Network Firewall TLS Inspection Configuration using the `arn`. For example: + +```console +% terraform import aws_networkfirewall_tls_inspection_configuration.example arn:aws:network-firewall::::tls-configuration/example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmonitor_monitor.html.markdown b/website/docs/cdktf/python/r/networkmonitor_monitor.html.markdown new file mode 100644 index 00000000000..ec8d2ceee22 --- /dev/null +++ b/website/docs/cdktf/python/r/networkmonitor_monitor.html.markdown @@ -0,0 +1,80 @@ +--- +subcategory: "CloudWatch Network Monitor" +layout: "aws" +page_title: "AWS: aws_networkmonitor_monitor" +description: |- + Terraform resource for managing an Amazon Network Monitor Monitor. +--- + + + +# Resource: aws_networkmonitor_monitor + +Terraform resource for managing an AWS Network Monitor Monitor. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkmonitor_monitor import NetworkmonitorMonitor +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkmonitorMonitor(self, "example", + aggregation_period=30, + monitor_name="example" + ) +``` + +## Argument Reference + +The following arguments are required: + +- `monitor_name` - (Required) The name of the monitor. + +The following arguments are optional: + +- `aggregation_period` - (Optional) The time, in seconds, that metrics are aggregated and sent to Amazon CloudWatch. Valid values are either 30 or 60. +- `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +- `arn` - The ARN of the monitor. +- `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmonitor_monitor` using the monitor name. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkmonitor_monitor import NetworkmonitorMonitor +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkmonitorMonitor.generate_config_for_import(self, "example", "monitor-7786087912324693644") +``` + +Using `terraform import`, import `aws_networkmonitor_monitor` using the monitor name. For example: + +```console +% terraform import aws_networkmonitor_monitor.example monitor-7786087912324693644 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/networkmonitor_probe.html.markdown b/website/docs/cdktf/python/r/networkmonitor_probe.html.markdown new file mode 100644 index 00000000000..75e75077711 --- /dev/null +++ b/website/docs/cdktf/python/r/networkmonitor_probe.html.markdown @@ -0,0 +1,96 @@ +--- +subcategory: "CloudWatch Network Monitor" +layout: "aws" +page_title: "AWS: aws_networkmonitor_probe" +description: |- + Terraform resource for managing an Amazon Network Monitor Probe. +--- + + + +# Resource: aws_networkmonitor_probe + +Terraform resource for managing an AWS Network Monitor Probe. + +## Example Usage + +### Basic Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkmonitor_monitor import NetworkmonitorMonitor +from imports.aws.networkmonitor_probe import NetworkmonitorProbe +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = NetworkmonitorMonitor(self, "example", + aggregation_period=30, + monitor_name="example" + ) + aws_networkmonitor_probe_example = NetworkmonitorProbe(self, "example_1", + destination="127.0.0.1", + destination_port=80, + monitor_name=example.monitor_name, + packet_size=200, + protocol="TCP", + source_arn=Token.as_string(aws_subnet_example.arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_networkmonitor_probe_example.override_logical_id("example") +``` + +## Argument Reference + +The following arguments are required: + +- `destination` - (Required) The destination IP address. This must be either IPV4 or IPV6. +- `destination_port` - (Optional) The port associated with the destination. This is required only if the protocol is TCP and must be a number between 1 and 65536. +- `monitor_name` - (Required) The name of the monitor. +- `protocol` - (Required) The protocol used for the network traffic between the source and destination. This must be either TCP or ICMP. +- `source_arn` - (Required) The ARN of the subnet. +- `packet_size` - (Optional) The size of the packets sent between the source and destination. This must be a number between 56 and 8500. + +The following arguments are optional: + +- `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +- `arn` - The ARN of the attachment. +- `source_arn` - The ARN of the subnet. +- `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmonitor_probe` using the monitor name and probe id. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.networkmonitor_probe import NetworkmonitorProbe +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + NetworkmonitorProbe.generate_config_for_import(self, "example", "monitor-7786087912324693644,probe-3qm8p693i4fi1h8lqylzkbp42e") +``` + +Using `terraform import`, import `aws_networkmonitor_probe` using the monitor name and probe id. For example: + +```console +% terraform import aws_networkmonitor_probe.example monitor-7786087912324693644,probe-3qm8p693i4fi1h8lqylzkbp42e +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/oam_link.html.markdown b/website/docs/cdktf/python/r/oam_link.html.markdown index 592d8b4d770..4d11a696951 100644 --- a/website/docs/cdktf/python/r/oam_link.html.markdown +++ b/website/docs/cdktf/python/r/oam_link.html.markdown @@ -38,6 +38,58 @@ class MyConvertedCode(TerraformStack): ) ``` +### Log Group Filtering + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.oam_link import OamLink +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OamLink(self, "example", + label_template="$AccountName", + link_configuration=OamLinkLinkConfiguration( + log_group_configuration=OamLinkLinkConfigurationLogGroupConfiguration( + filter="LogGroupName LIKE 'aws/lambda/%' OR LogGroupName LIKE 'AWSLogs%'" + ) + ), + resource_types=["AWS::Logs::LogGroup"], + sink_identifier=test.id + ) +``` + +### Metric Filtering + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.oam_link import OamLink +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + OamLink(self, "example", + label_template="$AccountName", + link_configuration=OamLinkLinkConfiguration( + metric_configuration=OamLinkLinkConfigurationMetricConfiguration( + filter="Namespace IN ('AWS/EC2', 'AWS/ELB', 'AWS/S3')" + ) + ), + resource_types=["AWS::CloudWatch::Metric"], + sink_identifier=test.id + ) +``` + ## Argument Reference The following arguments are required: @@ -48,13 +100,34 @@ The following arguments are required: The following arguments are optional: +* `link_configuration` - (Optional) Configuration for creating filters that specify that only some metric namespaces or log groups are to be shared from the source account to the monitoring account. See [`link_configuration` Block](#link_configuration-block) for details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### `link_configuration` Block + +The `link_configuration` configuration block supports the following arguments: + +* `log_group_configuration` - (Optional) Configuration for filtering which log groups are to send log events from the source account to the monitoring account. See [`log_group_configuration` Block](#log_group_configuration-block) for details. +* `metric_configuration` - (Optional) Configuration for filtering which metric namespaces are to be shared from the source account to the monitoring account. See [`metric_configuration` Block](#metric_configuration-block) for details. + +### `log_group_configuration` Block + +The `log_group_configuration` configuration block supports the following arguments: + +* `filter` - (Required) Filter string that specifies which log groups are to share their log events with the monitoring account. See [LogGroupConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_LogGroupConfiguration.html) for details. + +### `metric_configuration` Block + +The `metric_configuration` configuration block supports the following arguments: + +* `filter` - (Required) Filter string that specifies which metrics are to be shared with the monitoring account. See [MetricConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_MetricConfiguration.html) for details. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the link. +* `id` - ARN of the link. * `label` - Label that is assigned to this link. * `link_id` - ID string that AWS generated as part of the link ARN. * `sink_arn` - ARN of the sink that is used for this link. @@ -92,4 +165,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Link us % terraform import aws_oam_link.example arn:aws:oam:us-west-2:123456789012:link/link-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/oam_sink.html.markdown b/website/docs/cdktf/python/r/oam_sink.html.markdown index df144bc07b3..2ee2f1c021c 100644 --- a/website/docs/cdktf/python/r/oam_sink.html.markdown +++ b/website/docs/cdktf/python/r/oam_sink.html.markdown @@ -51,6 +51,7 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Sink. +* `id` - ARN of the Sink. * `sink_id` - ID string that AWS generated as part of the sink ARN. ## Timeouts @@ -86,4 +87,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Sink us % terraform import aws_oam_sink.example arn:aws:oam:us-west-2:123456789012:sink/sink-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/oam_sink_policy.html.markdown b/website/docs/cdktf/python/r/oam_sink_policy.html.markdown index 132a53f87d9..c93c22a4396 100644 --- a/website/docs/cdktf/python/r/oam_sink_policy.html.markdown +++ b/website/docs/cdktf/python/r/oam_sink_policy.html.markdown @@ -70,6 +70,7 @@ The following arguments are required: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Sink. +* `id` - ARN of the sink to attach this policy to. * `sink_id` - ID string that AWS generated as part of the sink ARN. ## Timeouts @@ -104,4 +105,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Sink Po % terraform import aws_oam_sink_policy.example arn:aws:oam:us-west-2:123456789012:sink/sink-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/opensearch_domain.html.markdown b/website/docs/cdktf/python/r/opensearch_domain.html.markdown index 3e11652c455..819e8d9006e 100644 --- a/website/docs/cdktf/python/r/opensearch_domain.html.markdown +++ b/website/docs/cdktf/python/r/opensearch_domain.html.markdown @@ -395,6 +395,7 @@ The following arguments are optional: * `engine_version` - (Optional) Either `Elasticsearch_X.Y` or `OpenSearch_X.Y` to specify the engine version for the Amazon OpenSearch Service domain. For example, `OpenSearch_1.0` or `Elasticsearch_7.9`. See [Creating and managing Amazon OpenSearch Service domains](http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomains). Defaults to the lastest version of OpenSearch. +* `ip_address_type` - (Optional) The IP address type for the endpoint. Valid values are `ipv4` and `dualstack`. * `encrypt_at_rest` - (Optional) Configuration block for encrypt at rest options. Only available for [certain instance types](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/encryption-at-rest.html). Detailed below. * `log_publishing_options` - (Optional) Configuration block for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. Detailed below. * `node_to_node_encryption` - (Optional) Configuration block for node-to-node encryption options. Detailed below. @@ -580,4 +581,4 @@ Using `terraform import`, import OpenSearch domains using the `domain_name`. For % terraform import aws_opensearch_domain.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/quicksight_account_subscription.html.markdown b/website/docs/cdktf/python/r/quicksight_account_subscription.html.markdown index a7a5815eac0..766054e91f2 100644 --- a/website/docs/cdktf/python/r/quicksight_account_subscription.html.markdown +++ b/website/docs/cdktf/python/r/quicksight_account_subscription.html.markdown @@ -53,6 +53,7 @@ The following arguments are optional: * `directory_id` - (Optional) Active Directory ID that is associated with your Amazon QuickSight account. * `email_address` - (Optional) Email address of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `first_name` - (Optional) First name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. +* `iam_identity_center_instance_arn` - (Optional) The Amazon Resource Name (ARN) for the IAM Identity Center instance. * `last_name` - (Optional) Last name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `reader_group` - (Optional) Reader group associated with your Active Direcrtory. * `realm` - (Optional) Realm of the Active Directory that is associated with your Amazon QuickSight account. @@ -74,4 +75,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_certificate.html.markdown b/website/docs/cdktf/python/r/rds_certificate.html.markdown new file mode 100644 index 00000000000..9f5c6227491 --- /dev/null +++ b/website/docs/cdktf/python/r/rds_certificate.html.markdown @@ -0,0 +1,71 @@ +--- +subcategory: "RDS (Relational Database)" +layout: "aws" +page_title: "AWS: aws_rds_certificate" +description: |- + Terraform resource for managing an AWS RDS (Relational Database) Certificate. +--- + + + +# Resource: aws_rds_certificate + +Provides a resource to override the system-default Secure Sockets Layer/Transport Layer Security (SSL/TLS) certificate for Amazon RDS for new DB instances in the current AWS region. + +~> **NOTE:** Removing this Terraform resource removes the override. New DB instances will use the system-default certificate for the current AWS region. + +## Example Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import RdsCertificate +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + RdsCertificate(self, "example", + certificate_identifier="rds-ca-rsa4096-g1" + ) +``` + +## Argument Reference + +The following arguments are required: + +* `certificate_identifier` - (Required) Certificate identifier. For example, `rds-ca-rsa4096-g1`. Refer to [AWS RDS (Relational Database) Certificate Identifier](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.CertificateIdentifier) for more information. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the RDS certificate override. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws. import RdsCertificate +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + RdsCertificate.generate_config_for_import(self, "example", "${default}") +``` + +Using `terraform import`, import the default EBS encryption state. For example: + +```console +% terraform import aws_rds_certificate.example default +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster.html.markdown b/website/docs/cdktf/python/r/rds_cluster.html.markdown index bb3355238c5..67cdf535cfd 100644 --- a/website/docs/cdktf/python/r/rds_cluster.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster.html.markdown @@ -18,6 +18,8 @@ Changes to an RDS Cluster can occur when you manually change a parameter, such a ~> **Note:** Multi-AZ DB clusters are supported only for the MySQL and PostgreSQL DB engines. +~> **Note:** `ca_certificate_identifier` is only supported for Multi-AZ DB clusters. + ~> **Note:** using `apply_immediately` can result in a brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][4] for more information. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. @@ -291,7 +293,7 @@ the AWS official documentation : * [create-db-cluster](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-cluster.html) * [modify-db-cluster](https://docs.aws.amazon.com/cli/latest/reference/rds/modify-db-cluster.html) -This argument supports the following arguments: +This resource supports the following arguments: * `allocated_storage` - (Optional, Required for Multi-AZ DB cluster) The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. * `allow_major_version_upgrade` - (Optional) Enable to allow major engine version upgrades when changing engine versions. Defaults to `false`. @@ -302,6 +304,7 @@ This argument supports the following arguments: A maximum of 3 AZs can be configured. * `backtrack_window` - (Optional) Target backtrack window, in seconds. Only available for `aurora` and `aurora-mysql` engines currently. To disable backtracking, set this value to `0`. Defaults to `0`. Must be between `0` and `259200` (72 hours) * `backup_retention_period` - (Optional) Days to retain backups for. Default `1` +* `ca_certificate_identifier` - (Optional) The CA certificate identifier to use for the DB cluster's server certificate. * `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifier`. * `cluster_identifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `copy_tags_to_snapshot` – (Optional, boolean) Copy all Cluster `tags` to snapshots. Default is `false`. @@ -501,6 +504,8 @@ This resource exports the following attributes in addition to the arguments abov * `cluster_members` – List of RDS Instances that are a part of this cluster * `availability_zones` - Availability zone of the instance * `backup_retention_period` - Backup retention period +* `ca_certificate_identifier` - CA identifier of the CA certificate used for the DB instance's server certificate +* `ca_certificate_valid_till` - Expiration date of the DB instance’s server certificate * `preferred_backup_window` - Daily time range during which the backups happen * `preferred_maintenance_window` - Maintenance window * `endpoint` - DNS address of the RDS instance @@ -565,4 +570,4 @@ Using `terraform import`, import RDS Clusters using the `cluster_identifier`. Fo % terraform import aws_rds_cluster.aurora_cluster aurora-prod-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_activity_stream.html.markdown b/website/docs/cdktf/python/r/rds_cluster_activity_stream.html.markdown index 8b095a7a7c0..5f61060eaeb 100644 --- a/website/docs/cdktf/python/r/rds_cluster_activity_stream.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_activity_stream.html.markdown @@ -74,7 +74,7 @@ class MyConvertedCode(TerraformStack): For more detailed documentation about each argument, refer to the [AWS official documentation][3]. -This argument supports the following arguments: +This resource supports the following arguments: * `resource_arn` - (Required, Forces new resources) The Amazon Resource Name (ARN) of the DB cluster. * `mode` - (Required, Forces new resources) Specifies the mode of the database activity stream. Database events such as a change or access generate an activity stream event. The database session can handle these events either synchronously or asynchronously. One of: `sync`, `async`. @@ -117,4 +117,4 @@ Using `terraform import`, import RDS Aurora Cluster Database Activity Streams us [2]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartActivityStream.html [3]: https://docs.aws.amazon.com/cli/latest/reference/rds/start-activity-stream.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_endpoint.html.markdown b/website/docs/cdktf/python/r/rds_cluster_endpoint.html.markdown index e230b5f40d7..562199bb35f 100644 --- a/website/docs/cdktf/python/r/rds_cluster_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_endpoint.html.markdown @@ -10,7 +10,7 @@ description: |- # Resource: aws_rds_cluster_endpoint -Manages an RDS Aurora Cluster Endpoint. +Manages an RDS Aurora Cluster Custom Endpoint. You can refer to the [User Guide][1]. ## Example Usage @@ -82,7 +82,7 @@ class MyConvertedCode(TerraformStack): For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-cluster-endpoint.html). -This argument supports the following arguments: +This resource supports the following arguments: * `cluster_identifier` - (Required, Forces new resources) The cluster identifier. * `cluster_endpoint_identifier` - (Required, Forces new resources) The identifier to use for the new endpoint. This parameter is stored as a lowercase string. @@ -127,4 +127,4 @@ Using `terraform import`, import RDS Clusters Endpoint using the `cluster_endpoi [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html#Aurora.Endpoints.Cluster - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/rds_cluster_instance.html.markdown b/website/docs/cdktf/python/r/rds_cluster_instance.html.markdown index 8c59073a643..a32adb8da45 100644 --- a/website/docs/cdktf/python/r/rds_cluster_instance.html.markdown +++ b/website/docs/cdktf/python/r/rds_cluster_instance.html.markdown @@ -70,7 +70,7 @@ class MyConvertedCode(TerraformStack): For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `apply_immediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. * `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Default `true`. @@ -160,4 +160,4 @@ Using `terraform import`, import RDS Cluster Instances using the `identifier`. F % terraform import aws_rds_cluster_instance.prod_instance_1 aurora-cluster-instance-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53_resolver_firewall_rule.html.markdown b/website/docs/cdktf/python/r/route53_resolver_firewall_rule.html.markdown index 8156eca6c85..1693861e28e 100644 --- a/website/docs/cdktf/python/r/route53_resolver_firewall_rule.html.markdown +++ b/website/docs/cdktf/python/r/route53_resolver_firewall_rule.html.markdown @@ -67,8 +67,10 @@ This resource supports the following arguments: * `block_override_ttl` - (Required if `block_response` is `OVERRIDE`) The recommended amount of time, in seconds, for the DNS resolver or web browser to cache the provided override record. Minimum value of 0. Maximum value of 604800. * `block_response` - (Required if `action` is `BLOCK`) The way that you want DNS Firewall to block the request. Valid values: `NODATA`, `NXDOMAIN`, `OVERRIDE`. * `firewall_domain_list_id` - (Required) The ID of the domain list that you want to use in the rule. +* `firewall_domain_redirection_action` - (Optional) Evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. Valid values are `INSPECT_REDIRECTION_DOMAIN` and `TRUST_REDIRECTION_DOMAIN`. Default value is `INSPECT_REDIRECTION_DOMAIN`. * `firewall_rule_group_id` - (Required) The unique identifier of the firewall rule group where you want to create the rule. * `priority` - (Required) The setting that determines the processing order of the rule in the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from the lowest setting. +* `q_type` - (Optional) The query type you want the rule to evaluate. Additional details can be found [here](https://en.wikipedia.org/wiki/List_of_DNS_record_types) ## Attribute Reference @@ -101,4 +103,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall rules using the % terraform import aws_route53_resolver_firewall_rule.example rslvr-frg-0123456789abcdef:rslvr-fdl-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53domains_delegation_signer_record.html.markdown b/website/docs/cdktf/python/r/route53domains_delegation_signer_record.html.markdown index 5c7a24d4f6c..580572942fc 100644 --- a/website/docs/cdktf/python/r/route53domains_delegation_signer_record.html.markdown +++ b/website/docs/cdktf/python/r/route53domains_delegation_signer_record.html.markdown @@ -120,7 +120,7 @@ class MyConvertedCode(TerraformStack): ## Argument Reference -This argument supports the following arguments: +This resource supports the following arguments: * `domain_name` - (Required) The name of the domain that will have its parent DNS zone updated with the Delegation Signer record. * `signing_attributes` - (Required) The information about a key, including the algorithm, public key-value, and flags. @@ -166,4 +166,4 @@ Using `terraform import`, import delegation signer records using the domain name % terraform import aws_route53domains_delegation_signer_record.example example.com,40DE3534F5324DBDAC598ACEDB5B1E26A5368732D9C791D1347E4FBDDF6FC343 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route53domains_registered_domain.html.markdown b/website/docs/cdktf/python/r/route53domains_registered_domain.html.markdown index 7b908e0d242..176bf4a1ec6 100644 --- a/website/docs/cdktf/python/r/route53domains_registered_domain.html.markdown +++ b/website/docs/cdktf/python/r/route53domains_registered_domain.html.markdown @@ -48,7 +48,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** You must specify the same privacy setting for `admin_privacy`, `registrant_privacy` and `tech_privacy`. -This argument supports the following arguments: +This resource supports the following arguments: * `admin_contact` - (Optional) Details about the domain administrative contact. See [Contact Blocks](#contact-blocks) for more details. * `admin_privacy` - (Optional) Whether domain administrative contact information is concealed from WHOIS queries. Default: `true`. @@ -139,4 +139,4 @@ Using `terraform import`, import domains using the domain name. For example: % terraform import aws_route53domains_registered_domain.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/route_table_association.html.markdown b/website/docs/cdktf/python/r/route_table_association.html.markdown index 0522acf9a26..54d0dae8ea9 100644 --- a/website/docs/cdktf/python/r/route_table_association.html.markdown +++ b/website/docs/cdktf/python/r/route_table_association.html.markdown @@ -55,7 +55,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** Please note that one of either `subnet_id` or `gateway_id` is required. -This argument supports the following arguments: +This resource supports the following arguments: * `subnet_id` - (Optional) The subnet ID to create an association. Conflicts with `gateway_id`. * `gateway_id` - (Optional) The gateway ID to create an association. Conflicts with `subnet_id`. @@ -129,4 +129,4 @@ With EC2 Internet Gateways: % terraform import aws_route_table_association.assoc igw-01b3a60780f8d034a/rtb-656c65616e6f72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/s3_bucket_object_lock_configuration.html.markdown b/website/docs/cdktf/python/r/s3_bucket_object_lock_configuration.html.markdown index a529cd64c6f..c98029f9e4e 100644 --- a/website/docs/cdktf/python/r/s3_bucket_object_lock_configuration.html.markdown +++ b/website/docs/cdktf/python/r/s3_bucket_object_lock_configuration.html.markdown @@ -67,8 +67,8 @@ This resource supports the following arguments: * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `object_lock_enabled` - (Optional, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. Defaults to `Enabled`. Valid values: `Enabled`. * `rule` - (Optional) Configuration block for specifying the Object Lock rule for the specified object. [See below](#rule). -* `token` - (Optional) Token to allow Object Lock to be enabled for an existing bucket. You must contact AWS support for the bucket's "Object Lock token". -The token is generated in the back-end when [versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/manage-versioning-examples.html) is enabled on a bucket. For more details on versioning, see the [`aws_s3_bucket_versioning` resource](s3_bucket_versioning.html.markdown). +* `token` - (Optional,Deprecated) This argument is deprecated and no longer needed to enable Object Lock. +To enable Object Lock for an existing bucket, you must first enable versioning on the bucket and then enable Object Lock. For more details on versioning, see the [`aws_s3_bucket_versioning` resource](s3_bucket_versioning.html.markdown). ### rule @@ -132,4 +132,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_object_lock_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_domain.html.markdown b/website/docs/cdktf/python/r/sagemaker_domain.html.markdown index 748c096aaf2..3ee39fc1adf 100644 --- a/website/docs/cdktf/python/r/sagemaker_domain.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_domain.html.markdown @@ -81,9 +81,10 @@ class MyConvertedCode(TerraformStack): example = SagemakerAppImageConfig(self, "example", app_image_config_name="example", kernel_gateway_image_config=SagemakerAppImageConfigKernelGatewayImageConfig( - kernel_spec=SagemakerAppImageConfigKernelGatewayImageConfigKernelSpec( + kernel_spec=[SagemakerAppImageConfigKernelGatewayImageConfigKernelSpec( name="example" ) + ] ) ) aws_sagemaker_image_example = SagemakerImage(self, "example_1", @@ -185,6 +186,7 @@ The following arguments are optional: #### `canvas_app_settings` Block * `direct_deploy_settings` - (Optional) The model deployment settings for the SageMaker Canvas application. See [`direct_deploy_settings` Block](#direct_deploy_settings-block) below. +* `generative_ai_settings` - (Optional) The generative AI settings for the SageMaker Canvas application. See [`generative_ai_settings` Block](#generative_ai_settings-block) below. * `identity_provider_oauth_settings` - (Optional) The settings for connecting to an external data source with OAuth. See [`identity_provider_oauth_settings` Block](#identity_provider_oauth_settings-block) below. * `kendra_settings` - (Optional) The settings for document querying. See [`kendra_settings` Block](#kendra_settings-block) below. * `model_register_settings` - (Optional) The model registry settings for the SageMaker Canvas application. See [`model_register_settings` Block](#model_register_settings-block) below. @@ -199,7 +201,11 @@ The following arguments are optional: ##### `direct_deploy_settings` Block -* `status` - (Optional)Describes whether model deployment permissions are enabled or disabled in the Canvas application. Valid values are `ENABLED` and `DISABLED`. +* `status` - (Optional) Describes whether model deployment permissions are enabled or disabled in the Canvas application. Valid values are `ENABLED` and `DISABLED`. + +##### `generative_ai_settings` Block + +* `amazon_bedrock_role_arn` - (Optional) The ARN of an Amazon Web Services IAM role that allows fine-tuning of large language models (LLMs) in Amazon Bedrock. The IAM role should have Amazon S3 read and write permissions, as well as a trust relationship that establishes bedrock.amazonaws.com as a service principal. ##### `kendra_settings` Block @@ -253,6 +259,7 @@ The following arguments are optional: * `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [`default_resource_spec` Block](#default_resource_spec-block) below. * `lifecycle_config_arns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. +* `custom_image` - (Optional) A list of custom SageMaker images that are configured to run as a CodeEditor app. see [`custom_image` Block](#custom_image-block) below. ##### `code_repository` Block @@ -342,4 +349,4 @@ Using `terraform import`, import SageMaker Domains using the `id`. For example: % terraform import aws_sagemaker_domain.test_domain d-8jgsjtilstu8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_endpoint_configuration.html.markdown b/website/docs/cdktf/python/r/sagemaker_endpoint_configuration.html.markdown index e15b31d09c7..8c38e5fab59 100644 --- a/website/docs/cdktf/python/r/sagemaker_endpoint_configuration.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_endpoint_configuration.html.markdown @@ -62,6 +62,7 @@ This resource supports the following arguments: * `container_startup_health_check_timeout_in_seconds` - (Optional) The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see [How Your Container Should Respond to Health Check (Ping) Requests](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html#your-algorithms-inference-algo-ping-requests). Valid values between `60` and `3600`. * `core_dump_config` - (Optional) Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. * `enable_ssm_access` - (Optional) You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. +* `inference_ami_version` - (Optional) Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. * `initial_instance_count` - (Optional) Initial number of instances used for auto-scaling. * `instance_type` - (Optional) The type of instance to start. * `initial_variant_weight` - (Optional) Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to `1.0`. @@ -98,7 +99,7 @@ This resource supports the following arguments: #### capture_options -* `capture_mode` - (Required) Specifies the data to be captured. Should be one of `Input` or `Output`. +* `capture_mode` - (Required) Specifies the data to be captured. Should be one of `Input`, `Output` or `InputAndOutput`. #### capture_content_type_header @@ -160,4 +161,4 @@ Using `terraform import`, import endpoint configurations using the `name`. For e % terraform import aws_sagemaker_endpoint_configuration.test_endpoint_config endpoint-config-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_user_profile.html.markdown b/website/docs/cdktf/python/r/sagemaker_user_profile.html.markdown index 2b86374b85b..ba2f8d3e25c 100644 --- a/website/docs/cdktf/python/r/sagemaker_user_profile.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_user_profile.html.markdown @@ -105,6 +105,7 @@ This resource supports the following arguments: * `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default_resource_spec) below. * `lifecycle_config_arns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. +* `custom_image` - (Optional) A list of custom SageMaker images that are configured to run as a CodeEditor app. see [Custom Image](#custom_image) below. #### r_session_app_settings @@ -144,6 +145,7 @@ This resource supports the following arguments: #### canvas_app_settings * `direct_deploy_settings` - (Optional)The model deployment settings for the SageMaker Canvas application. See [Direct Deploy Settings](#direct_deploy_settings) below. +* `generative_ai_settings` - (Optional) The generative AI settings for the SageMaker Canvas application. See [Generative AI Settings](#generative_ai_settings) below. * `identity_provider_oauth_settings` - (Optional) The settings for connecting to an external data source with OAuth. See [Identity Provider OAuth Settings](#identity_provider_oauth_settings) below. * `kendra_settings` - (Optional) The settings for document querying. See [Kendra Settings](#kendra_settings) below. * `model_register_settings` - (Optional) The model registry settings for the SageMaker Canvas application. See [Model Register Settings](#model_register_settings) below. @@ -158,7 +160,11 @@ This resource supports the following arguments: ##### direct_deploy_settings -* `status` - (Optional)Describes whether model deployment permissions are enabled or disabled in the Canvas application. Valid values are `ENABLED` and `DISABLED`. +* `status` - (Optional) Describes whether model deployment permissions are enabled or disabled in the Canvas application. Valid values are `ENABLED` and `DISABLED`. + +##### generative_ai_settings + +* `amazon_bedrock_role_arn` - (Optional) The ARN of an Amazon Web Services IAM role that allows fine-tuning of large language models (LLMs) in Amazon Bedrock. The IAM role should have Amazon S3 read and write permissions, as well as a trust relationship that establishes bedrock.amazonaws.com as a service principal. ##### kendra_settings @@ -232,4 +238,4 @@ Using `terraform import`, import SageMaker User Profiles using the `arn`. For ex % terraform import aws_sagemaker_user_profile.test_user_profile arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_workforce.html.markdown b/website/docs/cdktf/python/r/sagemaker_workforce.html.markdown index 581d068cc7d..3bcb12ec04e 100644 --- a/website/docs/cdktf/python/r/sagemaker_workforce.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_workforce.html.markdown @@ -104,12 +104,14 @@ This resource supports the following arguments: ### Oidc Config +* `authentication_request_extra_params` - (Optional) A string to string map of identifiers specific to the custom identity provider (IdP) being used. * `authorization_endpoint` - (Required) The OIDC IdP authorization endpoint used to configure your private workforce. * `client_id` - (Required) The OIDC IdP client ID used to configure your private workforce. * `client_secret` - (Required) The OIDC IdP client secret used to configure your private workforce. * `issuer` - (Required) The OIDC IdP issuer used to configure your private workforce. * `jwks_uri` - (Required) The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. * `logout_endpoint` - (Required) The OIDC IdP logout endpoint used to configure your private workforce. +* `scope` - (Optional) An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access. * `token_endpoint` - (Required) The OIDC IdP token endpoint used to configure your private workforce. * `user_info_endpoint` - (Required) The OIDC IdP user information endpoint used to configure your private workforce. @@ -157,4 +159,4 @@ Using `terraform import`, import SageMaker Workforces using the `workforce_name` % terraform import aws_sagemaker_workforce.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sagemaker_workteam.html.markdown b/website/docs/cdktf/python/r/sagemaker_workteam.html.markdown index a7360c85433..df8db179ad9 100644 --- a/website/docs/cdktf/python/r/sagemaker_workteam.html.markdown +++ b/website/docs/cdktf/python/r/sagemaker_workteam.html.markdown @@ -79,6 +79,7 @@ This resource supports the following arguments: * `workteam_name` - (Required) The name of the workforce. * `member_definition` - (Required) A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see [Member Definition](#member-definition) details below. * `notification_configuration` - (Optional) Configures notification of workers regarding available or expiring work items. see [Notification Configuration](#notification-configuration) details below. +* `worker_access_configuration` - (Optional) Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL. see [Worker Access Configuration](#worker-access-configuration) details below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Member Definition @@ -100,6 +101,19 @@ This resource supports the following arguments: * `notification_topic_arn` - (Required) The ARN for the SNS topic to which notifications should be published. +### Worker Access Configuration + +* `s3_presign` - (Required) Defines any Amazon S3 resource constraints. see [S3 Presign](#s3-presign) details below. + +#### S3 Presign + +* `iam_policy_constraints` - (Required) Use this parameter to specify the allowed request source. Possible sources are either SourceIp or VpcSourceIp. see [IAM Policy Constraints](#iam-policy-constraints) details below. + +##### IAM Policy Constraints + +* `source_ip` - (Optional) When SourceIp is Enabled the worker's IP address when a task is rendered in the worker portal is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. This IP address is checked by Amazon S3 and must match in order for the Amazon S3 resource to be rendered in the worker portal. Valid values are `Enabled` or `Disabled` +* `vpc_source_ip` - (Optional) When VpcSourceIp is Enabled the worker's IP address when a task is rendered in private worker portal inside the VPC is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. To render the task successfully Amazon S3 checks that the presigned URL is being accessed over an Amazon S3 VPC Endpoint, and that the worker's IP address matches the IP address in the IAM policy. To learn more about configuring private worker portal, see [Use Amazon VPC mode from a private worker portal](https://docs.aws.amazon.com/sagemaker/latest/dg/samurai-vpc-worker-portal.html). Valid values are `Enabled` or `Disabled` + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -134,4 +148,4 @@ Using `terraform import`, import SageMaker Workteams using the `workteam_name`. % terraform import aws_sagemaker_workteam.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/service_discovery_service.html.markdown b/website/docs/cdktf/python/r/service_discovery_service.html.markdown index e66427f6b5f..68e9a229ed7 100644 --- a/website/docs/cdktf/python/r/service_discovery_service.html.markdown +++ b/website/docs/cdktf/python/r/service_discovery_service.html.markdown @@ -101,44 +101,44 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: -* `name` - (Required, ForceNew) The name of the service. +* `name` - (Required, Forces new resource) The name of the service. * `description` - (Optional) The description of the service. -* `dns_config` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. -* `health_check_config` - (Optional) A complex type that contains settings for an optional health check. Only for Public DNS namespaces. -* `force_destroy` - (Optional, Default:false ) A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. -* `health_check_custom_config` - (Optional, ForceNew) A complex type that contains settings for ECS managed health checks. +* `dns_config` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dns_config` Block](#dns_config-block) for details. +* `health_check_config` - (Optional) A complex type that contains settings for an optional health check. Only for Public DNS namespaces. See [`health_check_config` Block](#health_check_config-block) for details. +* `force_destroy` - (Optional) A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. Defaults to `false`. +* `health_check_custom_config` - (Optional, Forces new resource) A complex type that contains settings for ECS managed health checks. See [`health_check_custom_config` Block](#health_check_custom_config-block) for details. * `namespace_id` - (Optional) The ID of the namespace that you want to use to create the service. * `type` - (Optional) If present, specifies that the service instances are only discoverable using the `DiscoverInstances` API operation. No DNS records is registered for the service instances. The only valid value is `HTTP`. * `tags` - (Optional) A map of tags to assign to the service. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### dns_config +### `dns_config` Block -This argument supports the following arguments: +The `dns_config` configuration block supports the following arguments: -* `namespace_id` - (Required, ForceNew) The ID of the namespace to use for DNS configuration. -* `dns_records` - (Required) An array that contains one DnsRecord object for each resource record set. +* `namespace_id` - (Required, Forces new resource) The ID of the namespace to use for DNS configuration. +* `dns_records` - (Required) An array that contains one DnsRecord object for each resource record set. See [`dns_records` Block](#dns_records-block) for details. * `routing_policy` - (Optional) The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED -#### dns_records +#### `dns_records` Block -This argument supports the following arguments: +The `dns_records` configuration block supports the following arguments: * `ttl` - (Required) The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. -* `type` - (Required, ForceNew) The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME +* `type` - (Required, Forces new resource) The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME -### health_check_config +### `health_check_config` Block -This argument supports the following arguments: +The `health_check_config` configuration block supports the following arguments: * `failure_threshold` - (Optional) The number of consecutive health checks. Maximum value of 10. * `resource_path` - (Optional) The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. -* `type` - (Optional, ForceNew) The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP +* `type` - (Optional, Forces new resource) The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP -### health_check_custom_config +### `health_check_custom_config` Block -This argument supports the following arguments: +The `health_check_custom_config` configuration block supports the following arguments: -* `failure_threshold` - (Optional, ForceNew) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. +* `failure_threshold` - (Optional, Forces new resource) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. ## Attribute Reference @@ -173,4 +173,4 @@ Using `terraform import`, import Service Discovery Service using the service ID. % terraform import aws_service_discovery_service.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_product.html.markdown b/website/docs/cdktf/python/r/servicecatalog_product.html.markdown index c65d03999f0..e3c8516fb43 100644 --- a/website/docs/cdktf/python/r/servicecatalog_product.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_product.html.markdown @@ -51,7 +51,7 @@ The following arguments are required: * `name` - (Required) Name of the product. * `owner` - (Required) Owner of the product. -* `provisioning_artifact_parameters` - (Required) Configuration block for provisioning artifact (i.e., version) parameters. Detailed below. +* `provisioning_artifact_parameters` - (Required) Configuration block for provisioning artifact (i.e., version) parameters. See [`provisioning_artifact_parameters` Block](#provisioning_artifact_parameters-block) for details. * `type` - (Required) Type of product. See [AWS Docs](https://docs.aws.amazon.com/servicecatalog/latest/dg/API_CreateProduct.html#API_CreateProduct_RequestSyntax) for valid list of values. The following arguments are optional: @@ -64,9 +64,9 @@ The following arguments are optional: * `support_url` - (Optional) Contact URL for product support. * `tags` - (Optional) Tags to apply to the product. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### provisioning_artifact_parameters +### `provisioning_artifact_parameters` Block -This argument supports the following arguments: +The `provisioning_artifact_parameters` configuration block supports the following arguments: * `description` - (Optional) Description of the provisioning artifact (i.e., version), including how it differs from the previous provisioning artifact. * `disable_template_validation` - (Optional) Whether AWS Service Catalog stops validating the specified provisioning artifact template even if it is invalid. @@ -120,4 +120,4 @@ Using `terraform import`, import `aws_servicecatalog_product` using the product % terraform import aws_servicecatalog_product.example prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/servicecatalog_provisioned_product.html.markdown b/website/docs/cdktf/python/r/servicecatalog_provisioned_product.html.markdown index 8002d4bf99b..f64dcbc6cf3 100644 --- a/website/docs/cdktf/python/r/servicecatalog_provisioned_product.html.markdown +++ b/website/docs/cdktf/python/r/servicecatalog_provisioned_product.html.markdown @@ -68,24 +68,24 @@ The following arguments are optional: * `product_name` - (Optional) Name of the product. You must provide `product_id` or `product_name`, but not both. * `provisioning_artifact_id` - (Optional) Identifier of the provisioning artifact. For example, `pa-4abcdjnxjj6ne`. You must provide the `provisioning_artifact_id` or `provisioning_artifact_name`, but not both. * `provisioning_artifact_name` - (Optional) Name of the provisioning artifact. You must provide the `provisioning_artifact_id` or `provisioning_artifact_name`, but not both. -* `provisioning_parameters` - (Optional) Configuration block with parameters specified by the administrator that are required for provisioning the product. See details below. +* `provisioning_parameters` - (Optional) Configuration block with parameters specified by the administrator that are required for provisioning the product. See [`provisioning_parameters` Block](#provisioning_parameters-block) for details. * `retain_physical_resources` - (Optional) _Only applies to deleting._ Whether to delete the Service Catalog provisioned product but leave the CloudFormation stack, stack set, or the underlying resources of the deleted provisioned product. The default value is `false`. -* `stack_set_provisioning_preferences` - (Optional) Configuration block with information about the provisioning preferences for a stack set. See details below. +* `stack_set_provisioning_preferences` - (Optional) Configuration block with information about the provisioning preferences for a stack set. See [`stack_set_provisioning_preferences` Block](#stack_set_provisioning_preferences-block) for details. * `tags` - (Optional) Tags to apply to the provisioned product. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### provisioning_parameters +### `provisioning_parameters` Block -This argument supports the following arguments: +The `provisioning_parameters` configuration block supports the following arguments: * `key` - (Required) Parameter key. * `use_previous_value` - (Optional) Whether to ignore `value` and keep the previous parameter value. Ignored when initially provisioning a product. * `value` - (Optional) Parameter value. -### stack_set_provisioning_preferences +### `stack_set_provisioning_preferences` Block All of the `stack_set_provisioning_preferences` are only applicable to a `CFN_STACKSET` provisioned product type. -This argument supports the following arguments: +The `stack_set_provisioning_preferences` configuration block supports the following arguments: * `accounts` - (Optional) One or more AWS accounts that will have access to the provisioned product. The AWS accounts specified should be within the list of accounts in the STACKSET constraint. To get the list of accounts in the STACKSET constraint, use the `aws_servicecatalog_provisioning_parameters` data source. If no values are specified, the default value is all accounts from the STACKSET constraint. * `failure_tolerance_count` - (Optional) Number of accounts, per region, for which this operation can fail before AWS Service Catalog stops the operation in that region. If the operation is stopped in a region, AWS Service Catalog doesn't attempt the operation in any subsequent regions. You must specify either `failure_tolerance_count` or `failure_tolerance_percentage`, but not both. The default value is 0 if no value is specified. @@ -157,4 +157,4 @@ Using `terraform import`, import `aws_servicecatalog_provisioned_product` using % terraform import aws_servicecatalog_provisioned_product.example pp-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/sesv2_configuration_set.html.markdown b/website/docs/cdktf/python/r/sesv2_configuration_set.html.markdown index 202b59450d6..cf1c48d02d6 100644 --- a/website/docs/cdktf/python/r/sesv2_configuration_set.html.markdown +++ b/website/docs/cdktf/python/r/sesv2_configuration_set.html.markdown @@ -53,51 +53,61 @@ class MyConvertedCode(TerraformStack): This resource supports the following arguments: * `configuration_set_name` - (Required) The name of the configuration set. -* `delivery_options` - (Optional) An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. -* `reputation_options` - (Optional) An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. -* `sending_options` - (Optional) An object that defines whether or not Amazon SES can send email that you send using the configuration set. -* `suppression_options` - (Optional) An object that contains information about the suppression list preferences for your account. +* `delivery_options` - (Optional) An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. See [`delivery_options` Block](#delivery_options-block) for details. +* `reputation_options` - (Optional) An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. See [`reputation_options` Block](#reputation_options-block) for details. +* `sending_options` - (Optional) An object that defines whether or not Amazon SES can send email that you send using the configuration set. See [`sending_options` Block](#sending_options-block) for details. +* `suppression_options` - (Optional) An object that contains information about the suppression list preferences for your account. See [`suppression_options` Block](#suppression_options-block) for details. * `tags` - (Optional) A map of tags to assign to the service. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `tracking_options` - (Optional) An object that defines the open and click tracking options for emails that you send using the configuration set. -* `vdm_options` - (Optional) An object that defines the VDM settings that apply to emails that you send using the configuration set. +* `tracking_options` - (Optional) An object that defines the open and click tracking options for emails that you send using the configuration set. See [`tracking_options` Block](#tracking_options-block) for details. +* `vdm_options` - (Optional) An object that defines the VDM settings that apply to emails that you send using the configuration set. See [`vdm_options` Block](#vdm_options-block) for details. -### delivery_options +### `delivery_options` Block -This argument supports the following arguments: +The `delivery_options` configuration block supports the following arguments: * `sending_pool_name` - (Optional) The name of the dedicated IP pool to associate with the configuration set. * `tls_policy` - (Optional) Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Valid values: `REQUIRE`, `OPTIONAL`. -### reputation_options +### `reputation_options` Block -This argument supports the following arguments: +The `reputation_options` configuration block supports the following arguments: * `reputation_metrics_enabled` - (Optional) If `true`, tracking of reputation metrics is enabled for the configuration set. If `false`, tracking of reputation metrics is disabled for the configuration set. -### sending_options +### `sending_options` Block -This argument supports the following arguments: +The `sending_options` configuration block supports the following arguments: * `sending_enabled` - (Optional) If `true`, email sending is enabled for the configuration set. If `false`, email sending is disabled for the configuration set. -### suppression_options +### `suppression_options` Block + +The `suppression_options` configuration block supports the following arguments: * `suppressed_reasons` - (Optional) A list that contains the reasons that email addresses are automatically added to the suppression list for your account. Valid values: `BOUNCE`, `COMPLAINT`. -### tracking_options +### `tracking_options` Block + +The `tracking_options` configuration block supports the following arguments: * `custom_redirect_domain` - (Required) The domain to use for tracking open and click events. -### vdm_options +### `vdm_options` Block + +The `vdm_options` configuration block supports the following arguments: -* `dashboard_options` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Dashboard. -* `guardian_options` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Guardian. +* `dashboard_options` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Dashboard. See [`dashboard_options` Block](#dashboard_options-block) for details. +* `guardian_options` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Guardian. See [`guardian_options` Block](#guardian_options-block) for details. -### dashboard_options +### `dashboard_options` Block + +The `dashboard_options` configuration block supports the following arguments: * `engagement_metrics` - (Optional) Specifies the status of your VDM engagement metrics collection. Valid values: `ENABLED`, `DISABLED`. -### guardian_options +### `guardian_options` Block + +The `guardian_options` configuration block supports the following arguments: * `optimized_shared_delivery` - (Optional) Specifies the status of your VDM optimized shared delivery. Valid values: `ENABLED`, `DISABLED`. @@ -134,4 +144,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Configuration Set using % terraform import aws_sesv2_configuration_set.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssm_association.html.markdown b/website/docs/cdktf/python/r/ssm_association.html.markdown index 077b468c65b..62d36b94ec3 100644 --- a/website/docs/cdktf/python/r/ssm_association.html.markdown +++ b/website/docs/cdktf/python/r/ssm_association.html.markdown @@ -134,6 +134,7 @@ This resource supports the following arguments: * `parameters` - (Optional) A block of arbitrary string parameters to pass to the SSM document. * `schedule_expression` - (Optional) A [cron or rate expression](https://docs.aws.amazon.com/systems-manager/latest/userguide/reference-cron-and-rate-expressions.html) that specifies when the association runs. * `sync_compliance` - (Optional) The mode for generating association compliance. You can specify `AUTO` or `MANUAL`. +* `tags` - (Optional) A map of tags to assign to the object. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `targets` - (Optional) A block containing the targets of the SSM association. Targets are documented below. AWS currently supports a maximum of 5 targets. * `wait_for_success_timeout_seconds` - (Optional) The number of seconds to wait for the association status to be `Success`. If `Success` status is not reached within the given time, create opration will fail. @@ -157,6 +158,7 @@ This resource exports the following attributes in addition to the arguments abov * `instance_id` - The instance id that the SSM document was applied to. * `name` - The name of the SSM document to apply. * `parameters` - Additional parameters passed to the SSM document. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -183,4 +185,4 @@ Using `terraform import`, import SSM associations using the `association_id`. Fo % terraform import aws_ssm_association.test-association 10abcdef-0abc-1234-5678-90abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_account_assignment.html.markdown b/website/docs/cdktf/python/r/ssoadmin_account_assignment.html.markdown index 69141694278..710012394c2 100644 --- a/website/docs/cdktf/python/r/ssoadmin_account_assignment.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_account_assignment.html.markdown @@ -85,28 +85,27 @@ from imports.aws.ssoadmin_permission_set import SsoadminPermissionSet class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - example = IdentitystoreGroup(self, "example", + example = DataAwsSsoadminInstances(self, "example") + aws_identitystore_group_example = IdentitystoreGroup(self, "example_1", description="Admin Group", display_name="Admin", identity_store_id=Token.as_string( - Fn.lookup_nested(Fn.tolist(sso_instance.identity_store_ids), ["0"])) + Fn.lookup_nested(Fn.tolist(example.identity_store_ids), ["0"])) ) - data_aws_ssoadmin_instances_example = DataAwsSsoadminInstances(self, "example_1") # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. - data_aws_ssoadmin_instances_example.override_logical_id("example") + aws_identitystore_group_example.override_logical_id("example") aws_ssoadmin_permission_set_example = SsoadminPermissionSet(self, "example_2", instance_arn=Token.as_string( - Fn.lookup_nested(Fn.tolist(data_aws_ssoadmin_instances_example.arns), ["0" - ])), + Fn.lookup_nested(Fn.tolist(example.arns), ["0"])), name="Example" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_ssoadmin_permission_set_example.override_logical_id("example") SsoadminAccountAssignment(self, "account_assignment", instance_arn=Token.as_string( - Fn.lookup_nested(Fn.tolist(data_aws_ssoadmin_instances_example.arns), ["0"])), + Fn.lookup_nested(Fn.tolist(example.arns), ["0"])), permission_set_arn=Token.as_string(aws_ssoadmin_permission_set_example.arn), - principal_id=example.group_id, + principal_id=Token.as_string(aws_identitystore_group_example.group_id), principal_type="GROUP", target_id="123456789012", target_type="AWS_ACCOUNT" @@ -115,8 +114,7 @@ class MyConvertedCode(TerraformStack): SsoadminManagedPolicyAttachment(self, "example_4", depends_on=[aws_ssoadmin_account_assignment_example], instance_arn=Token.as_string( - Fn.lookup_nested(Fn.tolist(data_aws_ssoadmin_instances_example.arns), ["0" - ])), + Fn.lookup_nested(Fn.tolist(example.arns), ["0"])), managed_policy_arn="arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup", permission_set_arn=Token.as_string(aws_ssoadmin_permission_set_example.arn) ) @@ -173,4 +171,4 @@ Using `terraform import`, import SSO Account Assignments using the `principal_id % terraform import aws_ssoadmin_account_assignment.example f81d4fae-7dec-11d0-a765-00a0c91e6bf6,GROUP,1234567890,AWS_ACCOUNT,arn:aws:sso:::permissionSet/ssoins-0123456789abcdef/ps-0123456789abcdef,arn:aws:sso:::instance/ssoins-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/ssoadmin_managed_policy_attachment.html.markdown b/website/docs/cdktf/python/r/ssoadmin_managed_policy_attachment.html.markdown index de6efdcbf6b..41337f2f65f 100644 --- a/website/docs/cdktf/python/r/ssoadmin_managed_policy_attachment.html.markdown +++ b/website/docs/cdktf/python/r/ssoadmin_managed_policy_attachment.html.markdown @@ -71,28 +71,27 @@ from imports.aws.ssoadmin_permission_set import SsoadminPermissionSet class MyConvertedCode(TerraformStack): def __init__(self, scope, name): super().__init__(scope, name) - example = IdentitystoreGroup(self, "example", + example = DataAwsSsoadminInstances(self, "example") + aws_identitystore_group_example = IdentitystoreGroup(self, "example_1", description="Admin Group", display_name="Admin", identity_store_id=Token.as_string( - Fn.lookup_nested(Fn.tolist(sso_instance.identity_store_ids), ["0"])) + Fn.lookup_nested(Fn.tolist(example.identity_store_ids), ["0"])) ) - data_aws_ssoadmin_instances_example = DataAwsSsoadminInstances(self, "example_1") # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. - data_aws_ssoadmin_instances_example.override_logical_id("example") + aws_identitystore_group_example.override_logical_id("example") aws_ssoadmin_permission_set_example = SsoadminPermissionSet(self, "example_2", instance_arn=Token.as_string( - Fn.lookup_nested(Fn.tolist(data_aws_ssoadmin_instances_example.arns), ["0" - ])), + Fn.lookup_nested(Fn.tolist(example.arns), ["0"])), name="Example" ) # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. aws_ssoadmin_permission_set_example.override_logical_id("example") SsoadminAccountAssignment(self, "account_assignment", instance_arn=Token.as_string( - Fn.lookup_nested(Fn.tolist(data_aws_ssoadmin_instances_example.arns), ["0"])), + Fn.lookup_nested(Fn.tolist(example.arns), ["0"])), permission_set_arn=Token.as_string(aws_ssoadmin_permission_set_example.arn), - principal_id=example.group_id, + principal_id=Token.as_string(aws_identitystore_group_example.group_id), principal_type="GROUP", target_id="123456789012", target_type="AWS_ACCOUNT" @@ -101,8 +100,7 @@ class MyConvertedCode(TerraformStack): SsoadminManagedPolicyAttachment(self, "example_4", depends_on=[aws_ssoadmin_account_assignment_example], instance_arn=Token.as_string( - Fn.lookup_nested(Fn.tolist(data_aws_ssoadmin_instances_example.arns), ["0" - ])), + Fn.lookup_nested(Fn.tolist(example.arns), ["0"])), managed_policy_arn="arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup", permission_set_arn=Token.as_string(aws_ssoadmin_permission_set_example.arn) ) @@ -157,4 +155,4 @@ Using `terraform import`, import SSO Managed Policy Attachments using the `manag % terraform import aws_ssoadmin_managed_policy_attachment.example arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup,arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/storagegateway_gateway.html.markdown b/website/docs/cdktf/python/r/storagegateway_gateway.html.markdown index f863e33daf6..9b7484e60b4 100644 --- a/website/docs/cdktf/python/r/storagegateway_gateway.html.markdown +++ b/website/docs/cdktf/python/r/storagegateway_gateway.html.markdown @@ -173,7 +173,7 @@ class MyConvertedCode(TerraformStack): ~> **NOTE:** One of `activation_key` or `gateway_ip_address` must be provided for resource creation (gateway activation). Neither is required for resource import. If using `gateway_ip_address`, Terraform must be able to make an HTTP (port 80) GET request to the specified IP address from where it is running. -This argument supports the following arguments: +This resource supports the following arguments: * `gateway_name` - (Required) Name of the gateway. * `gateway_timezone` - (Required) Time zone for the gateway. The time zone is of the format "GMT", "GMT-hr:mm", or "GMT+hr:mm". For example, `GMT-4:00` indicates the time is 4 hours behind GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule. @@ -290,4 +290,4 @@ class MyConvertedCode(TerraformStack): ) ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/verifiedpermissions_identity_source.html.markdown b/website/docs/cdktf/python/r/verifiedpermissions_identity_source.html.markdown new file mode 100644 index 00000000000..4a03d2cd47e --- /dev/null +++ b/website/docs/cdktf/python/r/verifiedpermissions_identity_source.html.markdown @@ -0,0 +1,199 @@ +--- +subcategory: "Verified Permissions" +layout: "aws" +page_title: "AWS: aws_verifiedpermissions_identity_source" +description: |- + Terraform resource for managing an AWS Verified Permissions Identity Source. +--- + + + +# Resource: aws_verifiedpermissions_identity_source + +Terraform resource for managing an AWS Verified Permissions Identity Source. + +## Example Usage + +### Cognito User Pool Configuration Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.cognito_user_pool import CognitoUserPool +from imports.aws.cognito_user_pool_client import CognitoUserPoolClient +from imports.aws.verifiedpermissions_identity_source import VerifiedpermissionsIdentitySource +from imports.aws.verifiedpermissions_policy_store import VerifiedpermissionsPolicyStore +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = CognitoUserPool(self, "example", + name="example" + ) + aws_cognito_user_pool_client_example = CognitoUserPoolClient(self, "example_1", + explicit_auth_flows=["ADMIN_NO_SRP_AUTH"], + name="example", + user_pool_id=example.id + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_cognito_user_pool_client_example.override_logical_id("example") + aws_verifiedpermissions_policy_store_example = + VerifiedpermissionsPolicyStore(self, "example_2", + validation_settings=[VerifiedpermissionsPolicyStoreValidationSettings( + mode="STRICT" + ) + ] + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_verifiedpermissions_policy_store_example.override_logical_id("example") + aws_verifiedpermissions_identity_source_example = + VerifiedpermissionsIdentitySource(self, "example_3", + configuration=[VerifiedpermissionsIdentitySourceConfiguration( + cognito_user_pool_configuration=[VerifiedpermissionsIdentitySourceConfigurationCognitoUserPoolConfiguration( + client_ids=[Token.as_string(aws_cognito_user_pool_client_example.id)], + user_pool_arn=example.arn + ) + ] + ) + ], + policy_store_id=Token.as_string(aws_verifiedpermissions_policy_store_example.id) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_verifiedpermissions_identity_source_example.override_logical_id("example") +``` + +### OpenID Connect Configuration Usage + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.verifiedpermissions_identity_source import VerifiedpermissionsIdentitySource +from imports.aws.verifiedpermissions_policy_store import VerifiedpermissionsPolicyStore +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = VerifiedpermissionsPolicyStore(self, "example", + validation_settings=[VerifiedpermissionsPolicyStoreValidationSettings( + mode="STRICT" + ) + ] + ) + aws_verifiedpermissions_identity_source_example = + VerifiedpermissionsIdentitySource(self, "example_1", + configuration=[VerifiedpermissionsIdentitySourceConfiguration( + open_id_connect_configuration=[VerifiedpermissionsIdentitySourceConfigurationOpenIdConnectConfiguration( + entity_id_prefix="MyOIDCProvider", + group_configuration=[VerifiedpermissionsIdentitySourceConfigurationOpenIdConnectConfigurationGroupConfiguration( + group_claim="groups", + group_entity_type="MyCorp::UserGroup" + ) + ], + issuer="https://auth.example.com", + token_selection=[VerifiedpermissionsIdentitySourceConfigurationOpenIdConnectConfigurationTokenSelection( + access_token_only=[VerifiedpermissionsIdentitySourceConfigurationOpenIdConnectConfigurationTokenSelectionAccessTokenOnly( + audiences=["https://myapp.example.com"], + principal_id_claim="sub" + ) + ] + ) + ] + ) + ] + ) + ], + policy_store_id=example.id, + principal_entity_type="MyCorp::User" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_verifiedpermissions_identity_source_example.override_logical_id("example") +``` + +## Argument Reference + +* `policy_store_id` - (Required) Specifies the ID of the policy store in which you want to store this identity source. +* `configuration`- (Required) Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. See [Configuration](#configuration) below. +* `principal_entity_type`- (Optional) Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source. + +### Configuration + +* `cognito_user_pool_configuration` - (Required) Specifies the configuration details of an Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. See [Cognito User Pool Configuration](#cognito-user-pool-configuration) below. +* `open_id_connect_configuration` - (Required) Specifies the configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. See [Open ID Connect Configuration](#open-id-connect-configuration) below. + +#### Cognito User Pool Configuration + +* `user_pool_arn` - (Required) The Amazon Resource Name (ARN) of the Amazon Cognito user pool that contains the identities to be authorized. +* `client_ids` - (Optional) The unique application client IDs that are associated with the specified Amazon Cognito user pool. +* `group_configuration` - (Optional) The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. See [Group Configuration](#group-configuration) below. + +#### Group Configuration + +* `group_entity_type` - (Required) The name of the schema entity type that's mapped to the user pool group. Defaults to `AWS::CognitoGroup`. + +#### Open ID Connect Configuration + +* `issuer` - (Required) The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path `.well-known/openid-configuration`. +* `token_selection` - (Required) The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source. See [Token Selection](#token-selection) below. +* `entity_id_prefix` - (Optional) A descriptive string that you want to prefix to user entities from your OIDC identity provider. +* `group_configuration` - (Optional) The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. See [Group Configuration](#open-id-group-configuration) below. + +#### Token Selection + +* `access_token_only` - (Optional) The OIDC configuration for processing access tokens. See [Access Token Only](#access-token-only) below. +* `identity_token_only` - (Optional) The OIDC configuration for processing identity (ID) tokens. See [Identity Token Only](#identity-token-only) below. + +#### Access Token Only + +* `audiences` - (Optional) The access token aud claim values that you want to accept in your policy store. +* `principal_id_claim` - (Optional) The claim that determines the principal in OIDC access tokens. + +#### Identity Token Only + +* `client_ids` - (Optional) The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. +* `group_entity_type` - (Optional) The claim that determines the principal in OIDC access tokens. + +#### Open ID Group Configuration + +* `group_claim` - (Required) The token claim that you want Verified Permissions to interpret as group membership. For example, `groups`. +* `group_entity_type` - (Required) The policy store entity type that you want to map your users' group claim to. For example, `MyCorp::UserGroup`. A group entity type is an entity that can have a user entity type as a member. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `policy_id` - The Policy ID of the policy. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Verified Permissions Identity Source using the `policy_store_id:identity_source_id`. For example: + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.verifiedpermissions_identity_source import VerifiedpermissionsIdentitySource +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + VerifiedpermissionsIdentitySource.generate_config_for_import(self, "example", "policy-store-id-12345678:identity-source-id-12345678") +``` + +Using `terraform import`, import Verified Permissions Identity Source using the `policy_store_id:identity_source_id`. For example: + +```console +% terraform import aws_verifiedpermissions_identity_source.example policy-store-id-12345678:identity-source-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_endpoint.html.markdown b/website/docs/cdktf/python/r/vpc_endpoint.html.markdown index 553e72c5e69..882fc5e5caf 100644 --- a/website/docs/cdktf/python/r/vpc_endpoint.html.markdown +++ b/website/docs/cdktf/python/r/vpc_endpoint.html.markdown @@ -88,6 +88,36 @@ class MyConvertedCode(TerraformStack): ) ``` +### Interface Endpoint Type with User-Defined IP Address + +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from constructs import Construct +from cdktf import TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.vpc_endpoint import VpcEndpoint +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + VpcEndpoint(self, "ec2", + service_name="com.amazonaws.us-west-2.ec2", + subnet_configuration=[VpcEndpointSubnetConfiguration( + ipv4="10.0.1.10", + subnet_id=example1.id + ), VpcEndpointSubnetConfiguration( + ipv4="10.0.2.10", + subnet_id=example2.id + ) + ], + subnet_ids=[example1.id, example2.id], + vpc_endpoint_type="Interface", + vpc_id=example.id + ) +``` + ### Gateway Load Balancer Endpoint Type ```python @@ -178,6 +208,7 @@ Defaults to `false`. * `dns_options` - (Optional) The DNS options for the endpoint. See dns_options below. * `ip_address_type` - (Optional) The IP address type for the endpoint. Valid values are `ipv4`, `dualstack`, and `ipv6`. * `route_table_ids` - (Optional) One or more route table IDs. Applicable for endpoints of type `Gateway`. +* `subnet_configuration` - (Optional) Subnet configuration for the endpoint, used to select specific IPv4 and/or IPv6 addresses to the endpoint. See subnet_configuration below. * `subnet_ids` - (Optional) The ID of one or more subnets in which to create a network interface for the endpoint. Applicable for endpoints of type `GatewayLoadBalancer` and `Interface`. Interface type endpoints cannot function without being assigned to a subnet. * `security_group_ids` - (Optional) The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type `Interface`. If no security groups are specified, the VPC's [default security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#DefaultSecurityGroup) is associated with the endpoint. @@ -189,6 +220,12 @@ If no security groups are specified, the VPC's [default security group](https:// * `dns_record_ip_type` - (Optional) The DNS records created for the endpoint. Valid values are `ipv4`, `dualstack`, `service-defined`, and `ipv6`. * `private_dns_only_for_inbound_resolver_endpoint` - (Optional) Indicates whether to enable private DNS only for inbound endpoints. This option is available only for services that support both gateway and interface endpoints. It routes traffic that originates from the VPC to the gateway endpoint and traffic that originates from on-premises to the interface endpoint. Default is `false`. Can only be specified if private_dns_enabled is `true`. +### subnet_configuration + +* `ipv4` - (Optional) The IPv4 address to assign to the endpoint network interface in the subnet. You must provide an IPv4 address if the VPC endpoint supports IPv4. +* `ipv6` - (Optional) The IPv6 address to assign to the endpoint network interface in the subnet. You must provide an IPv6 address if the VPC endpoint supports IPv6. +* `subnet` - (Optional) The ID of the subnet. Must have a corresponding subnet in the `subnet_ids` argument. + ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): @@ -242,4 +279,4 @@ Using `terraform import`, import VPC Endpoints using the VPC endpoint `id`. For % terraform import aws_vpc_endpoint.endpoint1 vpce-3ecf2a57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_peering_connection.html.markdown b/website/docs/cdktf/python/r/vpc_peering_connection.html.markdown index 04fd74bd4a9..ddbe42d2806 100644 --- a/website/docs/cdktf/python/r/vpc_peering_connection.html.markdown +++ b/website/docs/cdktf/python/r/vpc_peering_connection.html.markdown @@ -148,7 +148,7 @@ can be done using the [`auto_accept`](vpc_peering_connection.html#auto_accept) a Connection has to be made active manually using other means. See [notes](vpc_peering_connection.html#notes) below for more information. -This argument supports the following arguments: +This resource supports the following arguments: * `peer_owner_id` - (Optional) The AWS account ID of the target peer VPC. Defaults to the account ID the [AWS provider][1] is currently connected to, so must be managed if connecting cross-account. @@ -220,4 +220,4 @@ Using `terraform import`, import VPC Peering resources using the VPC peering `id [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/vpc_security_group_egress_rule.html.markdown b/website/docs/cdktf/python/r/vpc_security_group_egress_rule.html.markdown index a4edacd05e5..84736dd7850 100644 --- a/website/docs/cdktf/python/r/vpc_security_group_egress_rule.html.markdown +++ b/website/docs/cdktf/python/r/vpc_security_group_egress_rule.html.markdown @@ -46,7 +46,7 @@ class MyConvertedCode(TerraformStack): ~> **Note** Although `cidr_ipv4`, `cidr_ipv6`, `prefix_list_id`, and `referenced_security_group_id` are all marked as optional, you *must* provide one of them in order to configure the destination of the traffic. The `from_port` and `to_port` arguments are required unless `ip_protocol` is set to `-1` or `icmpv6`. -This argument supports the following arguments: +This resource supports the following arguments: * `cidr_ipv4` - (Optional) The destination IPv4 CIDR range. * `cidr_ipv6` - (Optional) The destination IPv6 CIDR range. @@ -92,4 +92,4 @@ Using `terraform import`, import security group egress rules using the `security % terraform import aws_vpc_security_group_egress_rule.example sgr-02108b27edd666983 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_web_acl.html.markdown b/website/docs/cdktf/python/r/wafv2_web_acl.html.markdown index f79dbd2f8ca..9f2430aef93 100644 --- a/website/docs/cdktf/python/r/wafv2_web_acl.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_web_acl.html.markdown @@ -954,7 +954,7 @@ Inspect a single header. Provide the name of the header to inspect, for example, The `single_header` block supports the following arguments: -* `name` - (Optional) Name of the query header to inspect. This setting must be provided as lower case characters. +* `name` - (Required) Name of the query header to inspect. This setting must be provided as lower case characters. ### `single_query_argument` Block @@ -962,7 +962,7 @@ Inspect a single query argument. Provide the name of the query argument to inspe The `single_query_argument` block supports the following arguments: -* `name` - (Optional) Name of the query header to inspect. This setting must be provided as lower case characters. +* `name` - (Required) Name of the query header to inspect. This setting must be provided as lower case characters. ### `body` Block @@ -1173,4 +1173,4 @@ Using `terraform import`, import WAFv2 Web ACLs using `ID/Name/Scope`. For examp % terraform import aws_wafv2_web_acl.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_web_acl_association.html.markdown b/website/docs/cdktf/python/r/wafv2_web_acl_association.html.markdown index 843619e6406..2a060c4b518 100644 --- a/website/docs/cdktf/python/r/wafv2_web_acl_association.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_web_acl_association.html.markdown @@ -19,68 +19,85 @@ Creates a WAFv2 Web ACL Association. ## Example Usage -```terraform -resource "aws_api_gateway_rest_api" "example" { - body = jsonencode({ - openapi = "3.0.1" - info = { - title = "example" - version = "1.0" - } - paths = { - "/path1" = { - get = { - x-amazon-apigateway-integration = { - httpMethod = "GET" - payloadFormatVersion = "1.0" - type = "HTTP_PROXY" - uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" - } - } - } - } - }) - - name = "example" -} - -resource "aws_api_gateway_deployment" "example" { - rest_api_id = aws_api_gateway_rest_api.example.id - - triggers = { - redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) - } - - lifecycle { - create_before_destroy = true - } -} - -resource "aws_api_gateway_stage" "example" { - deployment_id = aws_api_gateway_deployment.example.id - rest_api_id = aws_api_gateway_rest_api.example.id - stage_name = "example" -} - -resource "aws_wafv2_web_acl" "example" { - name = "web-acl-association-example" - scope = "REGIONAL" - - default_action { - allow {} - } - - visibility_config { - cloudwatch_metrics_enabled = false - metric_name = "friendly-metric-name" - sampled_requests_enabled = false - } -} - -resource "aws_wafv2_web_acl_association" "example" { - resource_arn = aws_api_gateway_stage.example.arn - web_acl_arn = aws_wafv2_web_acl.example.arn -} +```python +# DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +from cdktf import TerraformResourceLifecycle +from constructs import Construct +from cdktf import Fn, Token, TerraformStack +# +# Provider bindings are generated by running `cdktf get`. +# See https://cdk.tf/provider-generation for more details. +# +from imports.aws.api_gateway_deployment import ApiGatewayDeployment +from imports.aws.api_gateway_rest_api import ApiGatewayRestApi +from imports.aws.api_gateway_stage import ApiGatewayStage +from imports.aws.wafv2_web_acl import Wafv2WebAcl +from imports.aws.wafv2_web_acl_association import Wafv2WebAclAssociation +class MyConvertedCode(TerraformStack): + def __init__(self, scope, name): + super().__init__(scope, name) + example = ApiGatewayRestApi(self, "example", + body=Token.as_string( + Fn.jsonencode({ + "info": { + "title": "example", + "version": "1.0" + }, + "openapi": "3.0.1", + "paths": { + "/path1": { + "get": { + "x-amazon-apigateway-integration": { + "http_method": "GET", + "payload_format_version": "1.0", + "type": "HTTP_PROXY", + "uri": "https://ip-ranges.amazonaws.com/ip-ranges.json" + } + } + } + } + })), + name="example" + ) + aws_wafv2_web_acl_example = Wafv2WebAcl(self, "example_1", + default_action=Wafv2WebAclDefaultAction( + allow=Wafv2WebAclDefaultActionAllow() + ), + name="web-acl-association-example", + scope="REGIONAL", + visibility_config=Wafv2WebAclVisibilityConfig( + cloudwatch_metrics_enabled=False, + metric_name="friendly-metric-name", + sampled_requests_enabled=False + ) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_wafv2_web_acl_example.override_logical_id("example") + aws_api_gateway_deployment_example = ApiGatewayDeployment(self, "example_2", + lifecycle=TerraformResourceLifecycle( + create_before_destroy=True + ), + rest_api_id=example.id, + triggers={ + "redeployment": Token.as_string( + Fn.sha1(Token.as_string(Fn.jsonencode(example.body)))) + } + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_api_gateway_deployment_example.override_logical_id("example") + aws_api_gateway_stage_example = ApiGatewayStage(self, "example_3", + deployment_id=Token.as_string(aws_api_gateway_deployment_example.id), + rest_api_id=example.id, + stage_name="example" + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_api_gateway_stage_example.override_logical_id("example") + aws_wafv2_web_acl_association_example = Wafv2WebAclAssociation(self, "example_4", + resource_arn=Token.as_string(aws_api_gateway_stage_example.arn), + web_acl_arn=Token.as_string(aws_wafv2_web_acl_example.arn) + ) + # This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match. + aws_wafv2_web_acl_association_example.override_logical_id("example") ``` ## Argument Reference @@ -125,4 +142,4 @@ Using `terraform import`, import WAFv2 Web ACL Association using `WEB_ACL_ARN,RE % terraform import aws_wafv2_web_acl_association.example arn:aws:wafv2:...7ce849ea,arn:aws:apigateway:...ages/name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/python/r/wafv2_web_acl_logging_configuration.html.markdown b/website/docs/cdktf/python/r/wafv2_web_acl_logging_configuration.html.markdown index 38f8ab577e0..e12679cf4d1 100644 --- a/website/docs/cdktf/python/r/wafv2_web_acl_logging_configuration.html.markdown +++ b/website/docs/cdktf/python/r/wafv2_web_acl_logging_configuration.html.markdown @@ -224,7 +224,7 @@ To redact a single header, provide the name of the header to be redacted. For ex The `single_header` block supports the following arguments: -* `name` - (Optional) Name of the query header to redact. This setting must be provided in lowercase characters. +* `name` - (Required) Name of the query header to redact. This setting must be provided in lowercase characters. ## Attribute Reference @@ -257,4 +257,4 @@ Using `terraform import`, import WAFv2 Web ACL Logging Configurations using the % terraform import aws_wafv2_web_acl_logging_configuration.example arn:aws:wafv2:us-west-2:123456789012:regional/webacl/test-logs/a1b2c3d4-5678-90ab-cdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/apigatewayv2_api.html.markdown b/website/docs/cdktf/typescript/d/apigatewayv2_api.html.markdown index 2d58586fc08..b5f933ca0c3 100644 --- a/website/docs/cdktf/typescript/d/apigatewayv2_api.html.markdown +++ b/website/docs/cdktf/typescript/d/apigatewayv2_api.html.markdown @@ -39,7 +39,7 @@ class MyConvertedCode extends TerraformStack { The arguments of this data source act as filters for querying the available APIs in the current region. The given filters must match exactly one API whose data will be exported as attributes. -This argument supports the following arguments: +This data source supports the following arguments: * `apiId` - (Required) API identifier. @@ -73,4 +73,4 @@ The `corsConfiguration` object supports the following: * `exposeHeaders` - Set of exposed HTTP headers. * `maxAge` - Number of seconds that the browser should cache preflight request results. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/appstream_image.html.markdown b/website/docs/cdktf/typescript/d/appstream_image.html.markdown new file mode 100644 index 00000000000..b626fc5c9b3 --- /dev/null +++ b/website/docs/cdktf/typescript/d/appstream_image.html.markdown @@ -0,0 +1,91 @@ +--- +subcategory: "AppStream 2.0" +layout: "aws" +page_title: "AWS: aws_appstream_image" +description: |- + Terraform data source for describing an AWS AppStream 2.0 Appstream Image. +--- + + + +# Data Source: aws_appstream_image + +Terraform data source for managing an AWS AppStream 2.0 Image. + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsAppstreamImage } from "./.gen/providers/aws/data-aws-appstream-image"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsAppstreamImage(this, "test", { + mostRecent: true, + name: "AppStream-WinServer2019-06-17-2024", + type: "PUBLIC", + }); + } +} + +``` + +## Argument Reference + +The following arguments are optional: + +* `name` - Name of the image being searched for. Cannot be used with name_regex or arn. +* `nameRegex` - Regular expression name of the image being searched for. Cannot be used with arn or name. +* `arn` - Arn of the image being searched for. Cannot be used with name_regex or name. +* `type` - The type of image which must be (PUBLIC, PRIVATE, or SHARED). +* `mostRecent` - Boolean that if it is set to true and there are multiple images returned the most recent will be returned. If it is set to false and there are multiple images return the datasource will error. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `application` - A application object that contains the following: + * `app_block_arn` - The app block ARN of the application. + * `createdTime` - The time at which the application was created within the app block. + * `description` - The description of the application. + * `displayName` - The application name to display. + * `enabled` - Bool based on if the application is enabled. + * `icon_s3_location` - A list named icon_s3_location that contains the following: + * `s3Bucket` - S3 bucket of the S3 object. + * `s3Key` - S3 key of the S3 object. + * `iconUrl` - URL of the application icon. This URL may be time-limited. + * `instance_families` - List of the instance families of the application. + * `launch_parameters` - Arguments that are passed to the application at it's launch. + * `launchPath` - Path to the application's excecutable in the instance. + * `metadata` - String to string map that contains additional attributes used to describe the application. + * `Name` - Name of the application. + * `platforms` - Array of strings describing the platforms on which the application can run. + Values will be from: WINDOWS | WINDOWS_SERVER_2016 | WINDOWS_SERVER_2019 | WINDOWS_SERVER_2022 | AMAZON_LINUX2 + * `workingDirectory` - Working directory for the application. +* `appstreamAgentVersion` - Version of the AppStream 2.0 agent to use for instances that are launched from this image. Has a maximum length of 100 characters. +* `arn` - ARN of the image. +* `baseImageArn` - ARN of the image from which the image was created. +* `createdTime` - Time at which this image was created. +* `description` - Description of image. +* `displayName` - Image name to display. +* `imageBuilderName` - The name of the image builder that was used to created the private image. If the image is sharedthen the value is null. +* `imageBuilderSupported` - Boolean to indicate whether an image builder can be launched from this image. +* `image error` - Resource error object that describes the error containing the following: + * `errorCode` - Error code of the image. Values will be from: IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION | IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION | IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION | NETWORK_INTERFACE_LIMIT_EXCEEDED | INTERNAL_SERVICE_ERROR | IAM_SERVICE_ROLE_IS_MISSING | MACHINE_ROLE_IS_MISSING | STS_DISABLED_IN_REGION | SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES | IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION | SUBNET_NOT_FOUND | IMAGE_NOT_FOUND | INVALID_SUBNET_CONFIGURATION | SECURITY_GROUPS_NOT_FOUND | IGW_NOT_ATTACHED | IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION | FLEET_STOPPED | FLEET_INSTANCE_PROVISIONING_FAILURE | DOMAIN_JOIN_ERROR_FILE_NOT_FOUND | DOMAIN_JOIN_ERROR_ACCESS_DENIED | DOMAIN_JOIN_ERROR_LOGON_FAILURE | DOMAIN_JOIN_ERROR_INVALID_PARAMETER | DOMAIN_JOIN_ERROR_MORE_DATA | DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN | DOMAIN_JOIN_ERROR_NOT_SUPPORTED | DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME | DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED | DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED | DOMAIN_JOIN_NERR_PASSWORD_EXPIRED | DOMAIN_JOIN_INTERNAL_SERVICE_ERROR as the values. + * `errorMessage` - Error message of the image. + * `error_timestamp` - Time when the error occurred. +* `imagePermissions` - List of strings describing the image permissions containing the following: + * `allow_fleet` - Boolean indicating if the image can be used for a fleet. + * `allow_image_builder` - indicated whether the image can be used for an image builder. +* `platform` - Operating system platform of the image. Values will be from: WINDOWS | WINDOWS_SERVER_2016 | WINDOWS_SERVER_2019 | WINDOWS_SERVER_2022 | AMAZON_LINUX2 +* `public_image_released_date` - Release date of base image if public. For private images, it is the release date of the base image that it was created from. +* `state` - Current state of image. Image starts in PENDING state which changes to AVAILABLE if creation passes and FAILED if it fails. Values will be from: PENDING | AVAILABLE | FAILED | COPYING | DELETING | CREATING | IMPORTING. +* `visibility` - Visibility type enum indicating whether the image is PUBLIC, PRIVATE, or SHARED. Valid values include: PUBLIC | PRIVATE | SHARED. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/autoscaling_group.html.markdown b/website/docs/cdktf/typescript/d/autoscaling_group.html.markdown index 559f541a705..8c0ef27629b 100644 --- a/website/docs/cdktf/typescript/d/autoscaling_group.html.markdown +++ b/website/docs/cdktf/typescript/d/autoscaling_group.html.markdown @@ -96,6 +96,7 @@ interpolation. * `instanceGenerations` - List of instance generation names. * `localStorage` - Indicates whether instance types with instance store volumes are included, excluded, or required. * `localStorageTypes` - List of local storage type names. + * `maxSpotPriceAsPercentageOfOptimalOnDemandPrice` - Price protection threshold for Spot Instances. * `memoryGibPerVcpu` - List of objects describing the minimum and maximum amount of memory (GiB) per vCPU. * `min` - Minimum. * `max` - Maximum. @@ -147,4 +148,4 @@ interpolation. * `poolState` - Instance state to transition to after the lifecycle actions are complete. * `warmPoolSize` - Current size of the warm pool. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/backup_plan.html.markdown b/website/docs/cdktf/typescript/d/backup_plan.html.markdown index 20e86c5a1b1..2f1aa3ff2f5 100644 --- a/website/docs/cdktf/typescript/d/backup_plan.html.markdown +++ b/website/docs/cdktf/typescript/d/backup_plan.html.markdown @@ -46,7 +46,8 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the backup plan. * `name` - Display name of a backup plan. +* `rule` - Rules of a backup plan. * `tags` - Metadata that you can assign to help organize the plans you create. * `version` - Unique, randomly generated, Unicode, UTF-8 encoded string that serves as the version ID of the backup plan. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_custom_model.html.markdown b/website/docs/cdktf/typescript/d/bedrock_custom_model.html.markdown index 484b9afa76a..4127525b46f 100644 --- a/website/docs/cdktf/typescript/d/bedrock_custom_model.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_custom_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_model" description: |- @@ -65,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `validationMetrics` - The loss metric for each validator that you provided. * `validation_loss` - The validation loss associated with the validator. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_custom_models.html.markdown b/website/docs/cdktf/typescript/d/bedrock_custom_models.html.markdown index 87bfe79e222..437469692ef 100644 --- a/website/docs/cdktf/typescript/d/bedrock_custom_models.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_custom_models.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_models" description: |- @@ -45,4 +45,4 @@ This data source exports the following attributes in addition to the arguments a * `modelArn` - The ARN of the custom model. * `modelName` - The name of the custom model. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_foundation_model.html.markdown b/website/docs/cdktf/typescript/d/bedrock_foundation_model.html.markdown index fc345090889..dc5e3ab107b 100644 --- a/website/docs/cdktf/typescript/d/bedrock_foundation_model.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_foundation_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_foundation_model" description: |- @@ -65,4 +65,4 @@ This data source exports the following attributes in addition to the arguments a * `providerName` - Model provider name. * `responseStreamingSupported` - Indicates whether the model supports streaming. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/bedrock_foundation_models.html.markdown b/website/docs/cdktf/typescript/d/bedrock_foundation_models.html.markdown index 26e1f0e248f..210f872ee6f 100644 --- a/website/docs/cdktf/typescript/d/bedrock_foundation_models.html.markdown +++ b/website/docs/cdktf/typescript/d/bedrock_foundation_models.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_foundation_models" description: |- @@ -84,4 +84,4 @@ This data source exports the following attributes in addition to the arguments a * `providerName` - Model provider name. * `responseStreamingSupported` - Indicates whether the model supports streaming. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cloudfront_origin_access_control.html.markdown b/website/docs/cdktf/typescript/d/cloudfront_origin_access_control.html.markdown new file mode 100644 index 00000000000..c66011b0423 --- /dev/null +++ b/website/docs/cdktf/typescript/d/cloudfront_origin_access_control.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_origin_access_control" +description: |- + Use this data source to retrieve information for an Amazon CloudFront origin access control config. +--- + + + +# Data Source: aws_cloudfront_origin_access_control + +Use this data source to retrieve information for an Amazon CloudFront origin access control config. + +## Example Usage + +The below example retrieves a CloudFront origin access control config. + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsCloudfrontOriginAccessIdentity } from "./.gen/providers/aws/data-aws-cloudfront-origin-access-identity"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsCloudfrontOriginAccessIdentity(this, "example", { + id: "E2T5VTFBZJ3BJB", + }); + } +} + +``` + +## Argument Reference + +* `id` (Required) - The identifier for the origin access control settings. For example: `E2T5VTFBZJ3BJB`. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `description` - A description of the origin access control. +* `etag` - Current version of the origin access control's information. For example: `E2QWRUHAPOMQZL`. +* `name` - A name to identify the origin access control. +* `originAccessControlOriginType` - The type of origin that this origin access control is for. +* `signingBehavior` - Specifies which requests CloudFront signs. +* `signingProtocol` - The signing protocol of the origin access control, which determines how CloudFront signs (authenticates) requests. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/cognito_user_pool.html.markdown b/website/docs/cdktf/typescript/d/cognito_user_pool.html.markdown new file mode 100644 index 00000000000..9899e85046f --- /dev/null +++ b/website/docs/cdktf/typescript/d/cognito_user_pool.html.markdown @@ -0,0 +1,136 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_user_pool" +description: |- + Terraform data source for managing an AWS Cognito User Pool. +--- + + + +# Data Source: aws_cognito_user_pool + +Terraform data source for managing an AWS Cognito User Pool. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsCognitoUserPool } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsCognitoUserPool(this, "example", { + user_pool_id: "us-west-2_aaaaaaaaa", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `userPoolId` - (Required) The cognito pool ID + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the User Pool. +* [account_recovery_setting](#account-recover-setting) - The available verified method a user can use to recover their password when they call ForgotPassword. You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email. +* [admin_create_user_config](#admin-create-user-config) - The configuration for AdminCreateUser requests. +* `autoVerifiedAttributes` - The attributes that are auto-verified in a user pool. +* `creationDate` - The date and time, in ISO 8601 format, when the item was created. +* `customDomain` - A custom domain name that you provide to Amazon Cognito. This parameter applies only if you use a custom domain to host the sign-up and sign-in pages for your application. An example of a custom domain name might be auth.example.com. +* `deletionProtection` - When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. +* [device_configuration](#device-configuration) - The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool. +* `domain` - The domain prefix, if the user pool has a domain associated with it. +* [email_configuration](#email-configuration) - The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool. +* `estimatedNumberOfUsers` - A number estimating the size of the user pool. +* [lambda_config](#lambda-config) - The AWS Lambda triggers associated with the user pool. +* `lastModifiedDate` - The date and time, in ISO 8601 format, when the item was modified. +* `mfaConfiguration` - Can be one of the following values: `OFF` | `ON` | `OPTIONAL` +* `name` - The name of the user pool. +* [schema_attributes](#schema-attributes) - A list of the user attributes and their properties in your user pool. The attribute schema contains standard attributes, custom attributes with a custom: prefix, and developer attributes with a dev: prefix. For more information, see User pool attributes. +* `smsAuthenticationMessage` - The contents of the SMS authentication message. +* `sms_configuration_failure` - The reason why the SMS configuration can't send the messages to your users. +* `smsVerificationMessage` - The contents of the SMS authentication message. +* `user_pool_tags` - The tags that are assigned to the user pool. A tag is a label that you can apply to user pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria. +* `usernameAttributes` - Specifies whether a user can use an email address or phone number as a username when they sign up. + +### account recover setting + +* [recovery_mechanism](#recovery-mechanism) - Details about an individual recovery mechanism. + +### recovery mechanism + +* `name` - Name of the recovery mechanism (e.g., email, phone number). +* `priority` - Priority of this mechanism in the recovery process (lower numbers are higher priority). + +### admin create user config + +* `allowAdminCreateUserOnly` - Whether only admins can create users. +* `unused_account_validity_days` - Number of days an unconfirmed user account remains valid. +* [invite_message_template](#invite-message-template) - Templates for invitation messages. + +### invite message template + +* `emailMessage` - Email message content. +* `emailSubject` - Email message subject. +* `smsMessage` - SMS message content. + +### device configuration + +* `challengeRequiredOnNewDevice` - Whether a challenge is required on new devices. +* `deviceOnlyRememberedOnUserPrompt` - Whether devices are only remembered if the user prompts it. + +### email configuration + +* `configurationSet` - Configuration set used for sending emails. +* `emailSendingAccount` - Email sending account. +* `from` - Email sender address. +* `replyToEmailAddress` - Reply-to email address. +* `sourceArn` - Source Amazon Resource Name (ARN) for emails. + +### lambda config + +* [custom_email_sender](#lambda-function) - Configuration for a custom email sender Lambda function. +* [custom_sms_sender](#lambda-function) - Configuration for a custom SMS sender Lambda function +* [pre_token_generation_config](#lambda-function) - Configuration for a Lambda function that executes before token generation. + +### lambda function + +* `lambdaArn` - ARN of the Lambda function. +* `lambdaVersion` - Version of the Lambda function. + +### schema attributes + +* `attributeDataType` - Data type of the attribute (e.g., string, number). +* `developerOnlyAttribute` - Whether the attribute is for developer use only. +* `mutable` - Whether the attribute can be changed after user creation. +* `name` - Name of the attribute. +* `required` - Whether the attribute is required during user registration. +* [number_attribute_constraints](#number-attribute-constraints) - Constraints for numeric attributes. +* [string_attribute_constraints](#string-attribute-constraints) - Constraints for string attributes. + +### number attribute constraints + +* `maxValue` - Maximum allowed value. +* `minValue` - Minimum allowed value. + +### string attribute constraints + +* `maxLength` - Maximum allowed length. +* `minLength` - Minimum allowed length. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_contact_flow.html.markdown b/website/docs/cdktf/typescript/d/connect_contact_flow.html.markdown index 110209c4a35..f63a1b5d216 100644 --- a/website/docs/cdktf/typescript/d/connect_contact_flow.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_contact_flow.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `contactFlowId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `contactFlowId` - (Optional) Returns information on a specific Contact Flow by contact flow id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance @@ -80,4 +80,4 @@ This data source exports the following attributes in addition to the arguments a * `tags` - Tags to assign to the Contact Flow. * `type` - Type of Contact Flow. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_contact_flow_module.html.markdown b/website/docs/cdktf/typescript/d/connect_contact_flow_module.html.markdown index 658dfaff8e6..bcabc19ff53 100644 --- a/website/docs/cdktf/typescript/d/connect_contact_flow_module.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_contact_flow_module.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `contactFlowModuleId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `contactFlowModuleId` - (Optional) Returns information on a specific Contact Flow Module by contact flow module id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance @@ -81,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `state` - Type of Contact Flow Module Module. Values are either `ACTIVE` or `ARCHIVED`. * `status` - Status of the Contact Flow Module Module. Values are either `PUBLISHED` or `SAVED`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_hours_of_operation.html.markdown b/website/docs/cdktf/typescript/d/connect_hours_of_operation.html.markdown index b3bd48c559c..1239eeba4f1 100644 --- a/website/docs/cdktf/typescript/d/connect_hours_of_operation.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_hours_of_operation.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `hoursOfOperationId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `hoursOfOperationId` - (Optional) Returns information on a specific Hours of Operation by hours of operation id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance @@ -99,4 +99,4 @@ A `startTime` block supports the following arguments: * `hours` - Hour of opening. * `minutes` - Minute of opening. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_instance.html.markdown b/website/docs/cdktf/typescript/d/connect_instance.html.markdown index 232c93ebbe7..98e4c52fb5d 100644 --- a/website/docs/cdktf/typescript/d/connect_instance.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_instance.html.markdown @@ -62,7 +62,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** One of either `instanceId` or `instanceAlias` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instanceId` - (Optional) Returns information on a specific connect instance by id @@ -86,4 +86,4 @@ This data source exports the following attributes in addition to the arguments a * `status` - State of the instance. * `serviceRole` - Service role of the instance. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_queue.html.markdown b/website/docs/cdktf/typescript/d/connect_queue.html.markdown index e49cf13e7c0..accf57683c6 100644 --- a/website/docs/cdktf/typescript/d/connect_queue.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_queue.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `queueId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `queueId` - (Optional) Returns information on a specific Queue by Queue id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance @@ -90,4 +90,4 @@ A `outboundCallerConfig` block supports the following arguments: * `outboundCallerIdNumberId` - Specifies the caller ID number. * `outboundFlowId` - Outbound whisper flow to be used during an outbound call. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_quick_connect.html.markdown b/website/docs/cdktf/typescript/d/connect_quick_connect.html.markdown index 1e635d70a3a..2ea5c21812e 100644 --- a/website/docs/cdktf/typescript/d/connect_quick_connect.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_quick_connect.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `quickConnectId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `quickConnectId` - (Optional) Returns information on a specific Quick Connect by Quick Connect id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance @@ -102,4 +102,4 @@ A `userConfig` block contains the following arguments: * `contactFlowId` - Identifier of the contact flow. * `userId` - Identifier for the user. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_routing_profile.html.markdown b/website/docs/cdktf/typescript/d/connect_routing_profile.html.markdown index f3b45b91157..55448c2d675 100644 --- a/website/docs/cdktf/typescript/d/connect_routing_profile.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_routing_profile.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `routingProfileId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instanceId` - Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Routing Profile by name @@ -96,4 +96,4 @@ A `queueConfigs` block supports the following attributes: * `queueId` - Identifier for the queue. * `queueName` - Name for the queue. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_security_profile.html.markdown b/website/docs/cdktf/typescript/d/connect_security_profile.html.markdown index 2b7fa07b6da..6c624805278 100644 --- a/website/docs/cdktf/typescript/d/connect_security_profile.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_security_profile.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `securityProfileId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `securityProfileId` - (Optional) Returns information on a specific Security Profile by Security Profile id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance @@ -81,4 +81,4 @@ This data source exports the following attributes in addition to the arguments a * `permissions` - List of permissions assigned to the security profile. * `tags` - Map of tags to assign to the Security Profile. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_user.html.markdown b/website/docs/cdktf/typescript/d/connect_user.html.markdown index 1a98e25b877..16d5ec55fbf 100644 --- a/website/docs/cdktf/typescript/d/connect_user.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_user.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `userId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific User by name @@ -102,4 +102,4 @@ A `phoneConfig` block supports the following attributes: * `deskPhoneNumber` - The phone number for the user's desk phone. * `phoneType` - The phone type. Valid values are `DESK_PHONE` and `SOFT_PHONE`. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_user_hierarchy_group.html.markdown b/website/docs/cdktf/typescript/d/connect_user_hierarchy_group.html.markdown index a69c2335352..cfd4f253337 100644 --- a/website/docs/cdktf/typescript/d/connect_user_hierarchy_group.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_user_hierarchy_group.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `hierarchyGroupId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `hierarchyGroupId` - (Optional) Returns information on a specific hierarchy group by hierarchy group id * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance @@ -94,4 +94,4 @@ A level block supports the following attributes: * `id` - The identifier of the hierarchy group. * `name` - Name of the hierarchy group. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/connect_vocabulary.html.markdown b/website/docs/cdktf/typescript/d/connect_vocabulary.html.markdown index 523f2337b01..30a2445369c 100644 --- a/website/docs/cdktf/typescript/d/connect_vocabulary.html.markdown +++ b/website/docs/cdktf/typescript/d/connect_vocabulary.html.markdown @@ -64,7 +64,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** `instanceId` and one of either `name` or `vocabularyId` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instanceId` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Vocabulary by name @@ -85,4 +85,4 @@ separated by a colon (`:`). * `tags` - A map of tags to assign to the Vocabulary. * `vocabularyId` - The identifier of the custom vocabulary. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/db_snapshot.html.markdown b/website/docs/cdktf/typescript/d/db_snapshot.html.markdown index 84bc6eeba39..774a10bfbf9 100644 --- a/website/docs/cdktf/typescript/d/db_snapshot.html.markdown +++ b/website/docs/cdktf/typescript/d/db_snapshot.html.markdown @@ -66,7 +66,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** One of either `dbInstanceIdentifier` or `dbSnapshotIdentifier` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `mostRecent` - (Optional) If more than one result is returned, use the most recent Snapshot. @@ -106,4 +106,4 @@ This data source exports the following attributes in addition to the arguments a * `snapshotCreateTime` - Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). Changes for the copy when the snapshot is copied. * `originalSnapshotCreateTime` - Provides the time when the snapshot was taken, in Universal Coordinated Time (UTC). Doesn't change when the snapshot is copied. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/default_tags.html.markdown b/website/docs/cdktf/typescript/d/default_tags.html.markdown index d81ff9b08ec..436400c60ac 100644 --- a/website/docs/cdktf/typescript/d/default_tags.html.markdown +++ b/website/docs/cdktf/typescript/d/default_tags.html.markdown @@ -98,11 +98,6 @@ This data source has no arguments. This data source exports the following attributes in addition to the arguments above: -* `tags` - Blocks of default tags set on the provider. See details below. +* `tags` - Key-value mapping of provider default tags. -### tags - -* `key` - Key name of the tag (i.e., `tags.#.key`). -* `value` - Value of the tag (i.e., `tags.#.value`). - - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_capacity_block_offering.html.markdown b/website/docs/cdktf/typescript/d/ec2_capacity_block_offering.html.markdown new file mode 100644 index 00000000000..b6ed803d788 --- /dev/null +++ b/website/docs/cdktf/typescript/d/ec2_capacity_block_offering.html.markdown @@ -0,0 +1,62 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_ec2_capacity_block_offering" +description: |- + Information about a single EC2 Capacity Block Offering. +--- + + + +# Data Source: aws_ec2_capacity_block_offering + +Information about a single EC2 Capacity Block Offering. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsEc2CapacityBlockOffering } from "./.gen/providers/aws/data-aws-ec2-capacity-block-offering"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsEc2CapacityBlockOffering(this, "example", { + capacityDurationHours: 24, + endDateRange: "2024-05-30T15:04:05Z", + instanceCount: 1, + instance_platform: "Linux/UNIX", + instanceType: "p4d.24xlarge", + startDateRange: "2024-04-28T15:04:05Z", + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `capacityDurationHours` - (Required) The amount of time of the Capacity Block reservation in hours. +* `endDateRange` - (Optional) The date and time at which the Capacity Block Reservation expires. When a Capacity Reservation expires, the reserved capacity is released and you can no longer launch instances into it. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) +* `instanceCount` - (Required) The number of instances for which to reserve capacity. +* `instanceType` - (Required) The instance type for which to reserve capacity. +* `startDateRange` - (Optional) The date and time at which the Capacity Block Reservation starts. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `availabilityZone` - The Availability Zone in which to create the Capacity Reservation. +* `currencyCode` - The currency of the payment for the Capacity Block. +* `capacityBlockOfferingId` - The Capacity Block Reservation ID. +* `upfrontFee` - The total price to be paid up front. +* `tenancy` - Indicates the tenancy of the Capacity Reservation. Specify either `default` or `dedicated`. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachments.html.markdown b/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachments.html.markdown new file mode 100644 index 00000000000..eeb3a4168f8 --- /dev/null +++ b/website/docs/cdktf/typescript/d/ec2_transit_gateway_peering_attachments.html.markdown @@ -0,0 +1,106 @@ +--- +subcategory: "Transit Gateway" +layout: "aws" +page_title: "AWS: aws_ec2_transit_gateway_peering_attachments" +description: |- + Get information on EC2 Transit Gateway Peering Attachments +--- + + + +# Data Source: aws_ec2_transit_gateway_peering_attachments + +Get information on EC2 Transit Gateway Peering Attachments. + +## Example Usage + +### All Resources + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsEc2TransitGatewayPeeringAttachments } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsEc2TransitGatewayPeeringAttachments(this, "test", {}); + } +} + +``` + +### By Filter + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformCount, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsEc2TransitGatewayPeeringAttachments } from "./.gen/providers/aws/"; +import { DataAwsEc2TransitGatewayPeeringAttachment } from "./.gen/providers/aws/data-aws-ec2-transit-gateway-peering-attachment"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const filtered = new DataAwsEc2TransitGatewayPeeringAttachments( + this, + "filtered", + { + filter: [ + { + name: "state", + values: ["pendingAcceptance"], + }, + ], + } + ); + /*In most cases loops should be handled in the programming language context and + not inside of the Terraform context. If you are looping over something external, e.g. a variable or a file input + you should consider using a for loop. If you are looping over something only known to Terraform, e.g. a result of a data source + you need to keep this like it is.*/ + const unitCount = TerraformCount.of( + Token.asNumber(Fn.lengthOf(filtered.ids)) + ); + new DataAwsEc2TransitGatewayPeeringAttachment(this, "unit", { + id: Token.asString(Fn.lookupNested(filtered.ids, [unitCount.index])), + count: unitCount, + }); + } +} + +``` + +## Argument Reference + +This data source supports the following arguments: + +* `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. + +### filter Argument Reference + +* `name` - (Required) Name of the field to filter by, as defined by [the underlying AWS API][1] +* `values` - (Required) List of one or more values for the filter. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `ids` A list of all attachments ids matching the filter. You can retrieve more information about the attachment using the [aws_ec2_transit_gateway_peering_attachment][2] data source, searching by identifier. + +[1]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayPeeringAttachments.html +[2]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_transit_gateway_peering_attachment + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `read` - (Default `20m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/ecr_lifecycle_policy_document.html.markdown b/website/docs/cdktf/typescript/d/ecr_lifecycle_policy_document.html.markdown index b3f0b1948c4..1323d065140 100644 --- a/website/docs/cdktf/typescript/d/ecr_lifecycle_policy_document.html.markdown +++ b/website/docs/cdktf/typescript/d/ecr_lifecycle_policy_document.html.markdown @@ -69,14 +69,14 @@ Each document configuration may have one or more `rule` blocks, which each accep * `action` (Optional) - Specifies the action type. * `type` (Required) - The supported value is `expire`. * `description` (Optional) - Describes the purpose of a rule within a lifecycle policy. -* `priority` (Required) - Sets the order in which rules are evaluated, lowest to highest. When you add rules to a lifecycle policy, you must give them each a unique value for `priority`. Values do not need to be sequential across rules in a policy. A rule with a `tagStatus` value of any must have the highest value for `priority` and be evaluated last. +* `priority` (Required) - Sets the order in which rules are evaluated, lowest to highest. When you add rules to a lifecycle policy, you must give them each a unique value for `priority`. Values do not need to be sequential across rules in a policy. A rule with a `tagStatus` value of "any" must have the highest value for `priority` and be evaluated last. * `selection` (Required) - Collects parameters describing the selection criteria for the ECR lifecycle policy: - * `tagStatus` (Required) - Determines whether the lifecycle policy rule that you are adding specifies a tag for an image. Acceptable options are tagged, untagged, or any. If you specify any, then all images have the rule applied to them. If you specify tagged, then you must also specify a `tagPrefixList` value. If you specify untagged, then you must omit `tagPrefixList`. - * `tagPatternList` (Required if `tagStatus` is set to tagged and `tagPrefixList` isn't specified) - You must specify a comma-separated list of image tag patterns that may contain wildcards (*) on which to take action with your lifecycle policy. For example, if your images are tagged as prod, prod1, prod2, and so on, you would use the tag pattern list prod* to specify all of them. If you specify multiple tags, only the images with all specified tags are selected. There is a maximum limit of four wildcards (*) per string. For example, ["*test*1*2*3", "test*1*2*3*"] is valid but ["test*1*2*3*4*5*6"] is invalid. - * `tagPrefixList` (Required if `tagStatus` is set to tagged and `tagPatternList` isn't specified) - You must specify a comma-separated list of image tag prefixes on which to take action with your lifecycle policy. For example, if your images are tagged as prod, prod1, prod2, and so on, you would use the tag prefix prod to specify all of them. If you specify multiple tags, only images with all specified tags are selected. - * `countType` (Required) - Specify a count type to apply to the images. If `countType` is set to imageCountMoreThan, you also specify `countNumber` to create a rule that sets a limit on the number of images that exist in your repository. If `countType` is set to sinceImagePushed, you also specify `countUnit` and `countNumber` to specify a time limit on the images that exist in your repository. - * `countUnit` (Required if `countType` is set to sinceImagePushed) - Specify a count unit of days to indicate that as the unit of time, in addition to `countNumber`, which is the number of days. - * `countNumber` (Required) - Specify a count number. If the `countType` used is imageCountMoreThan, then the value is the maximum number of images that you want to retain in your repository. If the `countType` used is sinceImagePushed, then the value is the maximum age limit for your images. + * `tagStatus` (Required) - Determines whether the lifecycle policy rule that you are adding specifies a tag for an image. Acceptable options are "tagged", "untagged", or "any". If you specify "any", then all images have the rule applied to them. If you specify "tagged", then you must also specify a `tagPrefixList` value. If you specify "untagged", then you must omit `tagPrefixList`. + * `tagPatternList` (Required if `tagStatus` is set to "tagged" and `tagPrefixList` isn't specified) - You must specify a comma-separated list of image tag patterns that may contain wildcards (\*) on which to take action with your lifecycle policy. For example, if your images are tagged as `prod`, `prod1`, `prod2`, and so on, you would use the tag pattern list `["prod\*"]` to specify all of them. If you specify multiple tags, only the images with all specified tags are selected. There is a maximum limit of four wildcards (\*) per string. For example, `["*test*1*2*3", "test*1*2*3*"]` is valid but `["test*1*2*3*4*5*6"]` is invalid. + * `tagPrefixList` (Required if `tagStatus` is set to "tagged" and `tagPatternList` isn't specified) - You must specify a comma-separated list of image tag prefixes on which to take action with your lifecycle policy. For example, if your images are tagged as `prod`, `prod1`, `prod2`, and so on, you would use the tag prefix "prod" to specify all of them. If you specify multiple tags, only images with all specified tags are selected. + * `countType` (Required) - Specify a count type to apply to the images. If `countType` is set to "imageCountMoreThan", you also specify `countNumber` to create a rule that sets a limit on the number of images that exist in your repository. If `countType` is set to "sinceImagePushed", you also specify `countUnit` and `countNumber` to specify a time limit on the images that exist in your repository. + * `countUnit` (Required if `countType` is set to "sinceImagePushed") - Specify a count unit of days to indicate that as the unit of time, in addition to `countNumber`, which is the number of days. + * `countNumber` (Required) - Specify a count number. If the `countType` used is "imageCountMoreThan", then the value is the maximum number of images that you want to retain in your repository. If the `countType` used is "sinceImagePushed", then the value is the maximum age limit for your images. ## Attribute Reference @@ -84,4 +84,4 @@ This data source exports the following attributes in addition to the arguments a * `json` - The above arguments serialized as a standard JSON policy document. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/fsx_ontap_file_system.html.markdown b/website/docs/cdktf/typescript/d/fsx_ontap_file_system.html.markdown index 9db204fddae..03703c90877 100644 --- a/website/docs/cdktf/typescript/d/fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/typescript/d/fsx_ontap_file_system.html.markdown @@ -51,7 +51,9 @@ In addition to all arguments above, the following attributes are exported: * `dailyAutomaticBackupStartTime` - The preferred time (in `HH:MM` format) to take daily automatic backups, in the UTC time zone. * `deploymentType` - The file system deployment type. * `diskIopsConfiguration` - The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system, specifying the number of provisioned IOPS and the provision mode. See [Disk IOPS](#disk-iops) Below. -* `dnsName` - DNS name for the file system (e.g. `fs-12345678.corp.example.com`). +* `dnsName` - DNS name for the file system. + + **Note:** This attribute does not apply to FSx for ONTAP file systems and is consequently not set. You can access your FSx for ONTAP file system and volumes via a [Storage Virtual Machine (SVM)](fsx_ontap_storage_virtual_machine.html) using its DNS name or IP address. * `endpointIpAddressRange` - (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system exist. * `endpoints` - The Management and Intercluster FileSystemEndpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. See [FileSystemEndpoints](#file-system-endpoints) below. * `haPairs` - The number of HA pairs for the file system. @@ -85,4 +87,4 @@ In addition to all arguments above, the following attributes are exported: * `DNSName` - The file system's DNS name. You can mount your file system using its DNS name. * `IpAddresses` - IP addresses of the file system endpoint. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/glue_catalog_table.html.markdown b/website/docs/cdktf/typescript/d/glue_catalog_table.html.markdown index 2015f4f7696..f59065ce642 100644 --- a/website/docs/cdktf/typescript/d/glue_catalog_table.html.markdown +++ b/website/docs/cdktf/typescript/d/glue_catalog_table.html.markdown @@ -76,6 +76,7 @@ This data source exports the following attributes in addition to the arguments a ### storage_descriptor +* `additional_locations` - List of locations that point to the path where a Delta table is located * `bucketColumns` - List of reducer grouping columns, clustering columns, and bucketing columns in the table. * `columns` - Configuration block for columns in the table. See [`columns`](#columns) below. * `compressed` - Whether the data in the table is compressed. @@ -133,4 +134,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - Name of the target table. * `region` - Region of the target table. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/lakeformation_data_lake_settings.html.markdown b/website/docs/cdktf/typescript/d/lakeformation_data_lake_settings.html.markdown index 154f818392d..37385d6e132 100644 --- a/website/docs/cdktf/typescript/d/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/cdktf/typescript/d/lakeformation_data_lake_settings.html.markdown @@ -52,6 +52,7 @@ This data source exports the following attributes in addition to the arguments a * `allowExternalDataFiltering` - Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `externalDataFilteringAllowList` - A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `authorizedSessionTagValueList` - Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. +* `allow_full_table_external_data_access` - Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. ### create_database_default_permissions @@ -63,4 +64,4 @@ This data source exports the following attributes in addition to the arguments a * `permissions` - List of permissions granted to the principal. * `principal` - Principal who is granted permissions. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/launch_configuration.html.markdown b/website/docs/cdktf/typescript/d/launch_configuration.html.markdown index 47a24a81c2e..09607d53386 100644 --- a/website/docs/cdktf/typescript/d/launch_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/launch_configuration.html.markdown @@ -57,6 +57,7 @@ This data source exports the following attributes in addition to the arguments a * `httpPutResponseHopLimit` - The desired HTTP PUT response hop limit for instance metadata requests. * `securityGroups` - List of associated Security Group IDS. * `associatePublicIpAddress` - Whether a Public IP address is associated with the instance. +* `primary_ipv6` - Whether the first IPv6 GUA will be made the primary IPv6 address. * `userData` - User Data of the instance. * `enableMonitoring` - Whether Detailed Monitoring is Enabled. * `ebsOptimized` - Whether the launched EC2 instance will be EBS-optimized. @@ -92,4 +93,4 @@ This data source exports the following attributes in addition to the arguments a * `deviceName` - Name of the device. * `virtualName` - Virtual Name of the device. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mskconnect_connector.html.markdown b/website/docs/cdktf/typescript/d/mskconnect_connector.html.markdown index 23d44bdb66a..50a6a42d1ba 100644 --- a/website/docs/cdktf/typescript/d/mskconnect_connector.html.markdown +++ b/website/docs/cdktf/typescript/d/mskconnect_connector.html.markdown @@ -46,6 +46,7 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the connector. * `description` - Summary description of the connector. +* `tags` - A map of tags assigned to the resource. * `version` - Current version of the connector. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mskconnect_custom_plugin.html.markdown b/website/docs/cdktf/typescript/d/mskconnect_custom_plugin.html.markdown index 9cdc67d8e01..b23ec59e16c 100644 --- a/website/docs/cdktf/typescript/d/mskconnect_custom_plugin.html.markdown +++ b/website/docs/cdktf/typescript/d/mskconnect_custom_plugin.html.markdown @@ -48,5 +48,6 @@ This data source exports the following attributes in addition to the arguments a * `description` - a summary description of the custom plugin. * `latestRevision` - an ID of the latest successfully created revision of the custom plugin. * `state` - the state of the custom plugin. +* `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/mskconnect_worker_configuration.html.markdown b/website/docs/cdktf/typescript/d/mskconnect_worker_configuration.html.markdown index 060c0bb16dd..1eb18fabdbe 100644 --- a/website/docs/cdktf/typescript/d/mskconnect_worker_configuration.html.markdown +++ b/website/docs/cdktf/typescript/d/mskconnect_worker_configuration.html.markdown @@ -48,5 +48,6 @@ This data source exports the following attributes in addition to the arguments a * `description` - a summary description of the worker configuration. * `latestRevision` - an ID of the latest successfully created revision of the worker configuration. * `propertiesFileContent` - contents of connect-distributed.properties file. +* `tags` - A map of tags assigned to the resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/networkmanager_core_network_policy_document.html.markdown b/website/docs/cdktf/typescript/d/networkmanager_core_network_policy_document.html.markdown index 29f2337b409..1364e797c32 100644 --- a/website/docs/cdktf/typescript/d/networkmanager_core_network_policy_document.html.markdown +++ b/website/docs/cdktf/typescript/d/networkmanager_core_network_policy_document.html.markdown @@ -195,6 +195,7 @@ The following arguments are available: * `coreNetworkConfiguration` (Required) - The core network configuration section defines the Regions where a core network should operate. For AWS Regions that are defined in the policy, the core network creates a Core Network Edge where you can connect attachments. After it's created, each Core Network Edge is peered with every other defined Region and is configured with consistent segment and routing across all Regions. Regions cannot be removed until the associated attachments are deleted. Detailed below. * `segments` (Required) - Block argument that defines the different segments in the network. Here you can provide descriptions, change defaults, and provide explicit Regional operational and route filters. The names defined for each segment are used in the `segmentActions` and `attachmentPolicies` section. Each segment is created, and operates, as a completely separated routing domain. By default, attachments can only communicate with other attachments in the same segment. Detailed below. * `segmentActions` (Optional) - A block argument, `segmentActions` define how routing works between segments. By default, attachments can only communicate with other attachments in the same segment. Detailed below. +* `networkFunctionGroups` (Optional) - Block argument that defines the service insertion actions you want to include. Detailed below. ### `attachmentPolicies` @@ -205,15 +206,17 @@ The following arguments are available: * `conditions` (Required) - A block argument. Detailed Below. * `description` (Optional) - A user-defined description that further helps identify the rule. * `ruleNumber` (Required) - An integer from `1` to `65535` indicating the rule's order number. Rules are processed in order from the lowest numbered rule to the highest. Rules stop processing when a rule is matched. It's important to make sure that you number your rules in the exact order that you want them processed. +* `addToNetworkFunctionGroup` (Optional) - The name of the network function group to attach to the attachment policy. ### `action` The following arguments are available: -* `associationMethod` (Required) - Defines how a segment is mapped. Values can be `constant` or `tag`. `constant` statically defines the segment to associate the attachment to. `tag` uses the value of a tag to dynamically try to map to a segment.reference_policies_elements_condition_operators.html) to evaluate. +* `associationMethod` (Optional) - Defines how a segment is mapped. Values can be `constant` or `tag`. `constant` statically defines the segment to associate the attachment to. `tag` uses the value of a tag to dynamically try to map to a segment.reference_policies_elements_condition_operators.html) to evaluate. * `segment` (Optional) - Name of the `segment` to share as defined in the `segments` section. This is used only when the `associationMethod` is `constant`. * `tagValueOfKey` (Optional) - Maps the attachment to the value of a known key. This is used with the `associationMethod` is `tag`. For example a `tag` of `stage = “test”`, will map to a segment named `test`. The value must exactly match the name of a segment. This allows you to have many segments, but use only a single rule without having to define multiple nearly identical conditions. This prevents creating many similar conditions that all use the same keys to map to segments. * `requireAcceptance` (Optional) - Determines if this mapping should override the segment value for `requireAttachmentAcceptance`. You can only set this to `true`, indicating that this setting applies only to segments that have `requireAttachmentAcceptance` set to `false`. If the segment already has the default `requireAttachmentAcceptance`, you can set this to inherit segment’s acceptance value. +* `addToNetworkFunctionGroup` (Optional) - The name of the network function group to attach to the attachment policy. ### `conditions` @@ -257,20 +260,33 @@ The following arguments are available: ### `segmentActions` -`segmentActions` have differnet outcomes based on their `action` argument value. There are 2 valid values for `action`: `create-route` & `share`. Behaviors of the below arguments changed depending on the `action` you specify. For more details on their use see the [AWS documentation](https://docs.aws.amazon.com/vpc/latest/cloudwan/cloudwan-policies-json.html#cloudwan-segment-actions-json). +`segmentActions` have different outcomes based on their `action` argument value. Behaviors of the below arguments changed depending on the `action` you specify. For more details on their use see the [AWS documentation](https://docs.aws.amazon.com/vpc/latest/cloudwan/cloudwan-policies-json.html#cloudwan-segment-actions-json). ~> **NOTE:** `shareWith` and `shareWithExcept` break from the AWS API specification. The API has 1 argument `share-with` and it can accept 3 input types as valid (`"*"`, `[""]`, or `{ except: [""]}`). To emulate this behavior, `shareWith` is always a list that can accept the argument `["*"]` as valid for `"*"` and `shareWithExcept` is a that can accept `[""]` as valid for `{ except: [""]}`. You may only specify one of: `shareWith` or `shareWithExcept`. The following arguments are available: -* `action` (Required) - Action to take for the chosen segment. Valid values `create-route` or `share`. +* `action` (Required) - Action to take for the chosen segment. Valid values: `create-route`, `share`, `send-via` and `send-to`. * `description` (Optional) - A user-defined string describing the segment action. * `destinationCidrBlocks` (Optional) - List of strings containing CIDRs. You can define the IPv4 and IPv6 CIDR notation for each AWS Region. For example, `10.1.0.0/16` or `2001:db8::/56`. This is an array of CIDR notation strings. * `destinations` (Optional) - A list of strings. Valid values include `["blackhole"]` or a list of attachment ids. -* `mode` (Optional) - String. This mode places the attachment and return routes in each of the `shareWith` segments. Valid values include: `attachment-route`. +* `mode` (Optional) - String. When `action` is `share`, a `mode` value of `attachment-route` places the attachment and return routes in each of the `shareWith` segments. When `action` is `send-via`, indicates the mode used for packets. Valid values: `attachment-route`, `single-hop`, `dual-hop`. * `segment` (Optional) - Name of the segment. * `shareWith` (Optional) - A list of strings to share with. Must be a substring is all segments. Valid values include: `["*"]` or `[""]`. * `shareWithExcept` (Optional) - A set subtraction of segments to not share with. +* `whenSentTo` (Optional) - The destination segments for the `send-via` or `send-to` `action`. + * `segments` (Optional) - A list of strings. The list of segments that the `send-via` `action` uses. +* `via` (Optional) - The network function groups and any edge overrides associated with the action. + * `networkFunctionGroups` (Optional) - A list of strings. The network function group to use for the service insertion action. + * `withEdgeOverride` (Optional) - Any edge overrides and the preferred edge to use. + * `edgeSets` (Optional) - A list of strings. The list of edges associated with the network function group. + * `useEdge` (Optional) - The preferred edge to use. + +### `networkFunctionGroups` + +* `name` (Required) - This identifies the network function group container. +* `description` (Optional) - Optional description of the network function group. +* `requireAttachmentAcceptance` (Required) - This will be either `true`, that attachment acceptance is required, or `false`, that it is not required. ## Attribute Reference @@ -278,4 +294,4 @@ This data source exports the following attributes in addition to the arguments a * `json` - Standard JSON policy document rendered based on the arguments above. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/oam_link.html.markdown b/website/docs/cdktf/typescript/d/oam_link.html.markdown index 193219ead56..38e17e49c85 100644 --- a/website/docs/cdktf/typescript/d/oam_link.html.markdown +++ b/website/docs/cdktf/typescript/d/oam_link.html.markdown @@ -48,10 +48,31 @@ The following arguments are required: This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the link. +* `id` - ARN of the link. * `label` - Label that is assigned to this link. * `labelTemplate` - Human-readable name used to identify this source account when you are viewing data from it in the monitoring account. +* `linkConfiguration` - Configuration for creating filters that specify that only some metric namespaces or log groups are to be shared from the source account to the monitoring account. See [`linkConfiguration` Block](#link_configuration-block) for details. * `linkId` - ID string that AWS generated as part of the link ARN. * `resourceTypes` - Types of data that the source account shares with the monitoring account. * `sinkArn` - ARN of the sink that is used for this link. - \ No newline at end of file +### `linkConfiguration` Block + +The `linkConfiguration` configuration block supports the following arguments: + +* `logGroupConfiguration` - Configuration for filtering which log groups are to send log events from the source account to the monitoring account. See [`logGroupConfiguration` Block](#log_group_configuration-block) for details. +* `metricConfiguration` - Configuration for filtering which metric namespaces are to be shared from the source account to the monitoring account. See [`metricConfiguration` Block](#metric_configuration-block) for details. + +### `logGroupConfiguration` Block + +The `logGroupConfiguration` configuration block supports the following arguments: + +* `filter` - Filter string that specifies which log groups are to share their log events with the monitoring account. See [LogGroupConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_LogGroupConfiguration.html) for details. + +### `metricConfiguration` Block + +The `metricConfiguration` configuration block supports the following arguments: + +* `filter` - Filter string that specifies which metrics are to be shared with the monitoring account. See [MetricConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_MetricConfiguration.html) for details. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/oam_sink.html.markdown b/website/docs/cdktf/typescript/d/oam_sink.html.markdown index 6b083e4e4e0..2551c341b58 100644 --- a/website/docs/cdktf/typescript/d/oam_sink.html.markdown +++ b/website/docs/cdktf/typescript/d/oam_sink.html.markdown @@ -48,8 +48,9 @@ The following arguments are required: This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the sink. +* `id` - ARN of the sink. * `name` - Name of the sink. * `sinkId` - Random ID string that AWS generated as part of the sink ARN. * `tags` - Tags assigned to the sink. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/opensearch_domain.html.markdown b/website/docs/cdktf/typescript/d/opensearch_domain.html.markdown index b5804d76f77..e947f841119 100644 --- a/website/docs/cdktf/typescript/d/opensearch_domain.html.markdown +++ b/website/docs/cdktf/typescript/d/opensearch_domain.html.markdown @@ -81,6 +81,7 @@ This data source exports the following attributes in addition to the arguments a * `identityPoolId` - Cognito Identity pool used by the domain. * `roleArn` - IAM Role with the AmazonOpenSearchServiceCognitoAccess policy attached. * `created` – Status of the creation of the domain. +* `dashboardEndpoint` - Domain-specific endpoint used to access the [Dashboard application](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). * `deleted` – Status of the deletion of the domain. * `domainId` – Unique identifier for the domain. * `ebsOptions` - EBS Options for the instances in the domain. @@ -94,7 +95,7 @@ This data source exports the following attributes in addition to the arguments a * `enabled` - Whether encryption at rest is enabled in the domain. * `kmsKeyId` - KMS key id used to encrypt data at rest. * `endpoint` – Domain-specific endpoint used to submit index, search, and data upload requests. -* `dashboardEndpoint` - Domain-specific endpoint used to access the [Dashboard application](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/dashboards.html). +* `ipAddressType` - Type of IP addresses supported by the endpoint for the domain. * `kibanaEndpoint` - (**Deprecated**) Domain-specific endpoint for kibana without https scheme. Use the `dashboardEndpoint` attribute instead. * `logPublishingOptions` - Domain log publishing related options. * `logType` - Type of OpenSearch log being published. @@ -120,4 +121,4 @@ This data source exports the following attributes in addition to the arguments a * `subnetIds` - Subnets used by the domain. * `vpcId` - VPC used by the domain. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/organizations_policy.html.markdown b/website/docs/cdktf/typescript/d/organizations_policy.html.markdown index 9896fa390ae..3e03e3013c1 100644 --- a/website/docs/cdktf/typescript/d/organizations_policy.html.markdown +++ b/website/docs/cdktf/typescript/d/organizations_policy.html.markdown @@ -24,29 +24,28 @@ import { Fn, Token, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsOrganizationalPolicies } from "./.gen/providers/aws/"; import { DataAwsOrganizationsOrganization } from "./.gen/providers/aws/data-aws-organizations-organization"; import { DataAwsOrganizationsPoliciesForTarget } from "./.gen/providers/aws/data-aws-organizations-policies-for-target"; +import { DataAwsOrganizationsPolicy } from "./.gen/providers/aws/data-aws-organizations-policy"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - new DataAwsOrganizationalPolicies(this, "test", { - policy_id: Fn.lookupNested(current.policies, ["0", "id"]), - }); - const dataAwsOrganizationsOrganizationCurrent = - new DataAwsOrganizationsOrganization(this, "current", {}); + const current = new DataAwsOrganizationsOrganization(this, "current", {}); const dataAwsOrganizationsPoliciesForTargetCurrent = - new DataAwsOrganizationsPoliciesForTarget(this, "current_2", { + new DataAwsOrganizationsPoliciesForTarget(this, "current_1", { filter: "SERVICE_CONTROL_POLICY", - targetId: Token.asString( - Fn.lookupNested(dataAwsOrganizationsOrganizationCurrent.roots, [ - "0", - "id", - ]) - ), + targetId: Token.asString(Fn.lookupNested(current.roots, ["0", "id"])), }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ dataAwsOrganizationsPoliciesForTargetCurrent.overrideLogicalId("current"); + new DataAwsOrganizationsPolicy(this, "test", { + policyId: Token.asString( + Fn.lookupNested(dataAwsOrganizationsPoliciesForTargetCurrent.policies, [ + "0", + "id", + ]) + ), + }); } } @@ -69,4 +68,4 @@ This data source exports the following attributes in addition to the arguments a * `name` - The friendly name of the policy. * `type` - The type of policy values can be `SERVICE_CONTROL_POLICY | TAG_POLICY | BACKUP_POLICY | AISERVICES_OPT_OUT_POLICY` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/route53_zone.html.markdown b/website/docs/cdktf/typescript/d/route53_zone.html.markdown index 68aa27b1e41..7e494134bca 100644 --- a/website/docs/cdktf/typescript/d/route53_zone.html.markdown +++ b/website/docs/cdktf/typescript/d/route53_zone.html.markdown @@ -51,10 +51,9 @@ class MyConvertedCode extends TerraformStack { The arguments of this data source act as filters for querying the available Hosted Zone. You have to use `zoneId` or `name`, not both of them. The given filter must match exactly one -Hosted Zone. If you use `name` field for private Hosted Zone, you need to add `privateZone` field to `true` +Hosted Zone. If you use `name` field for private Hosted Zone, you need to add `privateZone` field to `true`. * `zoneId` - (Optional) Hosted Zone id of the desired Hosted Zone. - * `name` - (Optional) Hosted Zone name of the desired Hosted Zone. * `privateZone` - (Optional) Used with `name` field to get a private Hosted Zone. * `vpcId` - (Optional) Used with `name` field to get a private Hosted Zone associated with the vpc_id (in this case, private_zone is not mandatory). @@ -72,10 +71,14 @@ The following attribute is additionally exported: * `arn` - ARN of the Hosted Zone. * `callerReference` - Caller Reference of the Hosted Zone. * `comment` - Comment field of the Hosted Zone. +* `linkedServicePrincipal` - The service that created the Hosted Zone (e.g., `servicediscovery.amazonaws.com`). +* `linkedServiceDescription` - The description provided by the service that created the Hosted Zone (e.g., `arn:aws:servicediscovery:us-east-1:1234567890:namespace/ns-xxxxxxxxxxxxxxxx`). +* `name` - The Hosted Zone name. * `nameServers` - List of DNS name servers for the Hosted Zone. * `primaryNameServer` - The Route 53 name server that created the SOA record. +* `privateZone` - Indicates whether this is a private hosted zone. * `resourceRecordSetCount` - The number of Record Set in the Hosted Zone. -* `linkedServicePrincipal` - The service that created the Hosted Zone (e.g., `servicediscovery.amazonaws.com`). -* `linkedServiceDescription` - The description provided by the service that created the Hosted Zone (e.g., `arn:aws:servicediscovery:us-east-1:1234567890:namespace/ns-xxxxxxxxxxxxxxxx`). +* `tags` - A map of tags assigned to the Hosted Zone. +* `zoneId` - The Hosted Zone identifier. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/service_discovery_service.html.markdown b/website/docs/cdktf/typescript/d/service_discovery_service.html.markdown index ed738a144b7..94a5694bd2d 100644 --- a/website/docs/cdktf/typescript/d/service_discovery_service.html.markdown +++ b/website/docs/cdktf/typescript/d/service_discovery_service.html.markdown @@ -49,39 +49,39 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the service. * `arn` - ARN of the service. * `description` - Description of the service. -* `dnsConfig` - Complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. -* `healthCheckConfig` - Complex type that contains settings for an optional health check. Only for Public DNS namespaces. -* `healthCheckCustomConfig` - A complex type that contains settings for ECS managed health checks. +* `dnsConfig` - Complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dnsConfig` Block](#dns_config-block) for details. +* `healthCheckConfig` - Complex type that contains settings for an optional health check. Only for Public DNS namespaces. See [`healthCheckConfig` Block](#health_check_config-block) for details. +* `healthCheckCustomConfig` - A complex type that contains settings for ECS managed health checks. See [`healthCheckCustomConfig` Block](#health_check_custom_config-block) for details. * `tags` - Map of tags to assign to the service. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `tagsAll` - (**Deprecated**) Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -### dns_config +### `dnsConfig` Block -This argument supports the following arguments: +The `dnsConfig` configuration block supports the following arguments: * `namespaceId` - ID of the namespace to use for DNS configuration. -* `dnsRecords` - An array that contains one DnsRecord object for each resource record set. +* `dnsRecords` - An array that contains one DnsRecord object for each resource record set. See [`dnsRecords` Block](#dns_records-block) for details. * `routingPolicy` - Routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED -#### dns_records +#### `dnsRecords` Block -This argument supports the following arguments: +The `dnsRecords` configuration block supports the following arguments: * `ttl` - Amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. * `type` - Type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME -### health_check_config +### `healthCheckConfig` Block -This argument supports the following arguments: +The `healthCheckConfig` configuration block supports the following arguments: * `failureThreshold` - Number of consecutive health checks. Maximum value of 10. * `resourcePath` - Path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. * `type` - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP -### health_check_custom_config +### `healthCheckCustomConfig` Block -This argument supports the following arguments: +The `healthCheckCustomConfig` configuration block supports the following arguments: * `failureThreshold` - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/d/transfer_connector.html.markdown b/website/docs/cdktf/typescript/d/transfer_connector.html.markdown new file mode 100644 index 00000000000..7b94943c718 --- /dev/null +++ b/website/docs/cdktf/typescript/d/transfer_connector.html.markdown @@ -0,0 +1,70 @@ +--- +subcategory: "Transfer Family" +layout: "aws" +page_title: "AWS: aws_transfer_connector" +description: |- + Terraform data source for managing an AWS Transfer Family Connector. +--- + + + +# Data Source: aws_transfer_connector + +Terraform data source for managing an AWS Transfer Family Connector. + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsTransferConnector } from "./.gen/providers/aws/data-aws-transfer-connector"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new DataAwsTransferConnector(this, "test", { + id: "c-xxxxxxxxxxxxxx", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) Unique identifier for connector + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `accessRole` - ARN of the AWS Identity and Access Management role. +* `arn` - ARN of the Connector. +* `as2Config` - Structure containing the parameters for an AS2 connector object. Contains the following attributes: + * `basic_auth_secret_id` - Basic authentication for AS2 connector API. Returns a null value if not set. + * `compression` - Specifies whether AS2 file is compressed. Will be ZLIB or DISABLED + * `encryptionAlgorithm` - Algorithm used to encrypt file. Will be AES128_CBC or AES192_CBC or AES256_CBC or DES_EDE3_CBC or NONE. + * `localProfileId` - Unique identifier for AS2 local profile. + * `mdnResponse` - Used for outbound requests to tell if response is asynchronous or not. Will be either SYNC or NONE. + * `mdnSigningAlgorithm` - Signing algorithm for MDN response. Will be SHA256 or SHA384 or SHA512 or SHA1 or NONE or DEFAULT. + * `messageSubject` - Subject HTTP header attribute in outbound AS2 messages to the connector. + * `partnerProfileId` - Unique identifier used by connector for partner profile. + * `signingAlgorithm` - Algorithm used for signing AS2 messages sent with the connector. +* `loggingRole` - ARN of the IAM role that allows a connector to turn on CLoudwatch logging for Amazon S3 events. +* `securityPolicyName` - Name of security policy. +* `serviceManagedEgressIpAddresses` - List of egress Ip addresses. +* `sftpConfig` - Object containing the following attributes: + * `trustedHostKeys` - List of the public portions of the host keys that are used to identify the servers the connector is connected to. + * `userSecretId` - Identifer for the secret in AWS Secrets Manager that contains the SFTP user's private key, and/or password. +* `tags` - Object containing the following attributes: + * `key` - Name of the tag. + * `value` - Values associated with the tags key. +* `url` - URL of the partner's AS2 or SFTP endpoint. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/guides/custom-service-endpoints.html.markdown b/website/docs/cdktf/typescript/guides/custom-service-endpoints.html.markdown index 144ea70a63d..a3727fc1b0e 100644 --- a/website/docs/cdktf/typescript/guides/custom-service-endpoints.html.markdown +++ b/website/docs/cdktf/typescript/guides/custom-service-endpoints.html.markdown @@ -116,6 +116,7 @@ class MyConvertedCode extends TerraformStack {
  • appflow
  • appintegrations (or appintegrationsservice)
  • applicationinsights
  • +
  • applicationsignals
  • appmesh
  • apprunner
  • appstream
  • @@ -165,6 +166,7 @@ class MyConvertedCode extends TerraformStack {
  • costoptimizationhub
  • cur (or costandusagereportservice)
  • customerprofiles
  • +
  • databrew (or gluedatabrew)
  • dataexchange
  • datapipeline
  • datasync
  • @@ -258,6 +260,7 @@ class MyConvertedCode extends TerraformStack {
  • neptunegraph
  • networkfirewall
  • networkmanager
  • +
  • networkmonitor
  • oam (or cloudwatchobservabilityaccessmanager)
  • opensearch (or opensearchservice)
  • opensearchserverless
  • @@ -445,4 +448,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/index.html.markdown b/website/docs/cdktf/typescript/index.html.markdown index 52cdc6d1ca8..54ff9f1bf91 100644 --- a/website/docs/cdktf/typescript/index.html.markdown +++ b/website/docs/cdktf/typescript/index.html.markdown @@ -13,7 +13,7 @@ Use the Amazon Web Services (AWS) provider to interact with the many resources supported by AWS. You must configure the provider with the proper credentials before you can use it. -Use the navigation to the left to read about the available resources. There are currently 1373 resources and 559 data sources available in the provider. +Use the navigation to the left to read about the available resources. There are currently 1387 resources and 564 data sources available in the provider. To learn the basics of Terraform using this provider, follow the hands-on [get started tutorials](https://learn.hashicorp.com/tutorials/terraform/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). Interact with AWS services, @@ -853,4 +853,4 @@ Approaches differ per authentication providers: There used to be no better way to get account ID out of the API when using the federated account until `sts:GetCallerIdentity` was introduced. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/amplify_domain_association.html.markdown b/website/docs/cdktf/typescript/r/amplify_domain_association.html.markdown index 0358cd7d5aa..f0d94693ad0 100644 --- a/website/docs/cdktf/typescript/r/amplify_domain_association.html.markdown +++ b/website/docs/cdktf/typescript/r/amplify_domain_association.html.markdown @@ -72,11 +72,17 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: * `appId` - (Required) Unique ID for an Amplify app. +* `certificateSettings` - (Optional) The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you. * `domainName` - (Required) Domain name for the domain association. * `enableAutoSubDomain` - (Optional) Enables the automated creation of subdomains for branches. * `subDomain` - (Required) Setting for the subdomain. Documented below. * `waitForVerification` - (Optional) If enabled, the resource will wait for the domain association status to change to `PENDING_DEPLOYMENT` or `AVAILABLE`. Setting this to `false` will skip the process. Default: `true`. +The `certificateSettings` configuration block supports the following arguments: + +* `type` - (Required) The certificate type. Valid values are `AMPLIFY_MANAGED` and `CUSTOM`. +* `customCertificateArn` - (Optional) The Amazon resource name (ARN) for the custom certificate. + The `subDomain` configuration block supports the following arguments: * `branchName` - (Required) Branch name setting for the subdomain. @@ -126,4 +132,4 @@ Using `terraform import`, import Amplify domain association using `appId` and `d % terraform import aws_amplify_domain_association.app d2ypk4k47z8u6/example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/api_gateway_integration.html.markdown b/website/docs/cdktf/typescript/r/api_gateway_integration.html.markdown index 08ffe8b1d90..7ab6b72227c 100644 --- a/website/docs/cdktf/typescript/r/api_gateway_integration.html.markdown +++ b/website/docs/cdktf/typescript/r/api_gateway_integration.html.markdown @@ -276,7 +276,7 @@ This resource supports the following arguments: * `cacheKeyParameters` - (Optional) List of cache key parameters for the integration. * `cacheNamespace` - (Optional) Integration's cache namespace. * `contentHandling` - (Optional) How to handle request payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT`. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. -* `timeoutMilliseconds` - (Optional) Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds. +* `timeoutMilliseconds` - (Optional) Custom timeout between 50 and 300,000 milliseconds. The default value is 29,000 milliseconds. You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase time beyond 29,000 milliseconds. * `tlsConfig` - (Optional) TLS configuration. See below. ### tls_config Configuration Block @@ -321,4 +321,4 @@ Using `terraform import`, import `aws_api_gateway_integration` using `REST-API-I % terraform import aws_api_gateway_integration.example 12345abcde/67890fghij/GET ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_app_authorization.html.markdown b/website/docs/cdktf/typescript/r/appfabric_app_authorization.html.markdown new file mode 100644 index 00000000000..32fcb3b995a --- /dev/null +++ b/website/docs/cdktf/typescript/r/appfabric_app_authorization.html.markdown @@ -0,0 +1,102 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_app_authorization" +description: |- + Terraform resource for managing an AWS AppFabric App Authorization. +--- + + + +# Resource: aws_appfabric_app_authorization + +Terraform resource for managing an AWS AppFabric App Authorization. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppfabricAppAuthorization } from "./.gen/providers/aws/appfabric-app-authorization"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppfabricAppAuthorization(this, "example", { + app: "TERRAFORMCLOUD", + appBundleArn: arn, + authType: "apiKey", + credential: [ + { + apiKeyCredential: [ + { + apiKey: "exampleapikeytoken", + }, + ], + }, + ], + tenant: [ + { + tenantDisplayName: "example", + tenantIdentifier: "example", + }, + ], + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `app` - (Required) The name of the application for valid values see https://docs.aws.amazon.com/appfabric/latest/api/API_CreateAppAuthorization.html. +* `appBundleArn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. +* `authType` - (Required) The authorization type for the app authorization valid values are oauth2 and apiKey. +* `credential` - (Required) Contains credentials for the application, such as an API key or OAuth2 client ID and secret. +Specify credentials that match the authorization type for your request. For example, if the authorization type for your request is OAuth2 (oauth2), then you should provide only the OAuth2 credentials. +* `tenant` - (Required) Contains information about an application tenant, such as the application display name and identifier. + +Credential support the following: + +* `apiKeyCredential` - (Optional) Contains API key credential information. +* `oauth2Credential` - (Optional) Contains OAuth2 client credential information. + +API Key Credential support the following: + +* `apiKey` - (Required) Contains API key credential information. + +oauth2 Credential support the following: + +* `clientId` - (Required) The client ID of the client application. +* `clientSecret` - (Required) The client secret of the client application. + +Tenant support the following: + +* `tenantDisplayName` - (Required) The display name of the tenant. +* `tenantIdentifier` - (Required) The ID of the application tenant. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the App Authorization. Do not begin the description with "An", "The", "Defines", "Indicates", or "Specifies," as these are verbose. In other words, "Indicates the amount of storage," can be rewritten as "Amount of storage," without losing any information. +* `authUrl` - The application URL for the OAuth flow. +* `persona` - The user persona of the app authorization. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_app_authorization_connection.html.markdown b/website/docs/cdktf/typescript/r/appfabric_app_authorization_connection.html.markdown new file mode 100644 index 00000000000..85f6ad4f12a --- /dev/null +++ b/website/docs/cdktf/typescript/r/appfabric_app_authorization_connection.html.markdown @@ -0,0 +1,66 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_app_authorization_connection" +description: |- + Terraform resource for managing an AWS AppFabric App Authorization Connection. +--- + + + +# Resource: aws_appfabric_app_authorization_connection + +Terraform resource for managing an AWS AppFabric App Authorization Connection. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppfabricAppAuthorizationConnection } from "./.gen/providers/aws/appfabric-app-authorization-connection"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppfabricAppAuthorizationConnection(this, "example", { + appAuthorizationArn: test.arn, + appBundleArn: arn, + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `appBundleArn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. +* `appAuthorizationArn` - (Required) The Amazon Resource Name (ARN) or Universal Unique Identifier (UUID) of the app authorization to use for the request. +* `authRequest` - (Optional) Contains OAuth2 authorization information.This is required if the app authorization for the request is configured with an OAuth2 (oauth2) authorization type. + +Auth Request support the following: + +* `code` - (Required) The authorization code returned by the application after permission is granted in the application OAuth page (after clicking on the AuthURL).. +* `redirectUri` - (Optional) The redirect URL that is specified in the AuthURL and the application client. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `app` - The name of the application. +* `tenant` - Contains information about an application tenant, such as the application display name and identifier. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_app_bundle.html.markdown b/website/docs/cdktf/typescript/r/appfabric_app_bundle.html.markdown new file mode 100644 index 00000000000..fd100a04494 --- /dev/null +++ b/website/docs/cdktf/typescript/r/appfabric_app_bundle.html.markdown @@ -0,0 +1,88 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_app_bundle" +description: |- + Terraform resource for managing an AWS AppFabric AppBundle. +--- + + + +# Resource: aws_appfabric_app_bundle + +Terraform resource for managing an AWS AppFabric AppBundle. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppfabricAppBundle } from "./.gen/providers/aws/appfabric-app-bundle"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppfabricAppBundle(this, "example", { + customerManagedKeyArn: Token.asString(awmsKmsKeyExample.arn), + tags: { + Environment: "test", + }, + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `customerManagedKeyArn` - (Optional) The Amazon Resource Name (ARN) of the AWS Key Management Service (AWS KMS) key to use to encrypt the application data. If this is not specified, an AWS owned key is used for encryption. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the AppBundle. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFabric AppBundle using the `arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppfabricAppBundle } from "./.gen/providers/aws/appfabric-app-bundle"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + AppfabricAppBundle.generateConfigForImport( + this, + "example", + "arn:aws:appfabric:[region]:[account]:appbundle/ee5587b4-5765-4288-a202-xxxxxxxxxx" + ); + } +} + +``` + +Using `terraform import`, import AppFabric AppBundle using the `arn`. For example: + +```console +% terraform import aws_appfabric_app_bundle.example arn:aws:appfabric:[region]:[account]:appbundle/ee5587b4-5765-4288-a202-xxxxxxxxxx +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_ingestion.html.markdown b/website/docs/cdktf/typescript/r/appfabric_ingestion.html.markdown new file mode 100644 index 00000000000..10bf52726fa --- /dev/null +++ b/website/docs/cdktf/typescript/r/appfabric_ingestion.html.markdown @@ -0,0 +1,95 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_ingestion" +description: |- + Terraform resource for managing an AWS AppFabric Ingestion. +--- + + + +# Resource: aws_appfabric_ingestion + +Terraform resource for managing an AWS AppFabric Ingestion. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppfabricIngestion } from "./.gen/providers/aws/appfabric-ingestion"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppfabricIngestion(this, "example", { + app: "OKTA", + appBundleArn: Token.asString(awsAppfabricAppBundleExample.arn), + ingestionType: "auditLog", + tags: { + Environment: "test", + }, + tenantId: "example.okta.com", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `app` - (Required) Name of the application. +Refer to the AWS Documentation for the [list of valid values](https://docs.aws.amazon.com/appfabric/latest/api/API_CreateIngestion.html#appfabric-CreateIngestion-request-app) +* `appBundleArn` - (Required) Amazon Resource Name (ARN) of the app bundle to use for the request. +* `ingestionType` - (Required) Ingestion type. Valid values are `auditLog`. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tenantId` - (Required) ID of the application tenant. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Ingestion. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFabric Ingestion using the `app_bundle_identifier` and `arn` separated by `,`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppfabricIngestion } from "./.gen/providers/aws/appfabric-ingestion"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + AppfabricIngestion.generateConfigForImport( + this, + "example", + "arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx,arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx/ingestion/32251416-710b-4425-96ca-xxxxxxxxxx" + ); + } +} + +``` + +Using `terraform import`, import AppFabric Ingestion using the `app_bundle_identifier` and `arn` separated by `,`. For example: + +```console +% terraform import aws_appfabric_ingestion.example arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx,arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx/ingestion/32251416-710b-4425-96ca-xxxxxxxxxx +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appfabric_ingestion_destination.html.markdown b/website/docs/cdktf/typescript/r/appfabric_ingestion_destination.html.markdown new file mode 100644 index 00000000000..bbfb403da14 --- /dev/null +++ b/website/docs/cdktf/typescript/r/appfabric_ingestion_destination.html.markdown @@ -0,0 +1,123 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_ingestion_destination" +description: |- + Terraform resource for managing an AWS AppFabric Ingestion Destination. +--- + + + +# Resource: aws_appfabric_ingestion_destination + +Terraform resource for managing an AWS AppFabric Ingestion Destination. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { AppfabricIngestionDestination } from "./.gen/providers/aws/appfabric-ingestion-destination"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new AppfabricIngestionDestination(this, "example", { + appBundleArn: Token.asString(awsAppfabricAppBundleExample.arn), + destinationConfiguration: [ + { + auditLog: [ + { + destination: [ + { + s3Bucket: [ + { + bucketName: Token.asString(awsS3BucketExample.bucket), + }, + ], + }, + ], + }, + ], + }, + ], + ingestionArn: Token.asString(awsAppfabricIngestionExample.arn), + processingConfiguration: [ + { + auditLog: [ + { + format: "json", + schema: "raw", + }, + ], + }, + ], + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `appBundleArn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. +* `ingestionArn` - (Required) The Amazon Resource Name (ARN) of the ingestion to use for the request. +* `destinationConfiguration` - (Required) Contains information about the destination of ingested data. +* `processingConfiguration` - (Required) Contains information about how ingested data is processed. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +Destination Configuration support the following: + +* `auditLog` - (Required) Contains information about an audit log destination configuration. + +Audit Log Destination Configuration support the following: + +* `destination` - (Required) Contains information about an audit log destination. Only one destination (Firehose Stream) or (S3 Bucket) can be specified. + +Destination support the following: + +* `firehoseStream` - (Optional) Contains information about an Amazon Data Firehose delivery stream. +* `s3Bucket` - (Optional) Contains information about an Amazon S3 bucket. + +Firehose Stream support the following: + +* `streamName` - (Required) The name of the Amazon Data Firehose delivery stream. + +S3 Bucket support the following: + +* `bucketName` - (Required) The name of the Amazon S3 bucket. +* `prefix` - (Optional) The object key to use. + +Processing Configuration support the following: + +* `auditLog` - (Required) Contains information about an audit log processing configuration. + +Audit Log Processing Configuration support the following: + +* `format` - (Required) The format in which the audit logs need to be formatted. Valid values: `json`, `parquet`. +* `schema` - (Required) The event schema in which the audit logs need to be formatted. Valid values: `ocsf`, `raw`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Ingestion Destination. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appstream_fleet.html.markdown b/website/docs/cdktf/typescript/r/appstream_fleet.html.markdown index 97e422479dd..6e3f37e29a0 100644 --- a/website/docs/cdktf/typescript/r/appstream_fleet.html.markdown +++ b/website/docs/cdktf/typescript/r/appstream_fleet.html.markdown @@ -68,7 +68,7 @@ The following arguments are optional: * `enableDefaultInternetAccess` - (Optional) Enables or disables default internet access for the fleet. * `fleetType` - (Optional) Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON` * `iamRoleArn` - (Optional) ARN of the IAM role to apply to the fleet. -* `idleDisconnectTimeoutInSeconds` - (Optional) Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnectTimeoutInSeconds` time interval begins. Defaults to 60 seconds. +* `idleDisconnectTimeoutInSeconds` - (Optional) Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnectTimeoutInSeconds` time interval begins. Defaults to `0`. Valid value is between `60` and `3600 `seconds. * `imageName` - (Optional) Name of the image used to create the fleet. * `imageArn` - (Optional) ARN of the public, private, or shared image to use. * `streamView` - (Optional) AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays. If not specified, defaults to `APP`. @@ -138,4 +138,4 @@ Using `terraform import`, import `aws_appstream_fleet` using the id. For example % terraform import aws_appstream_fleet.example fleetNameExample ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_datasource.html.markdown b/website/docs/cdktf/typescript/r/appsync_datasource.html.markdown index 7a9b1841338..02599b760c3 100644 --- a/website/docs/cdktf/typescript/r/appsync_datasource.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_datasource.html.markdown @@ -119,88 +119,90 @@ This resource supports the following arguments: * `name` - (Required) User-supplied name for the data source. * `type` - (Required) Type of the Data Source. Valid values: `AWS_LAMBDA`, `AMAZON_DYNAMODB`, `AMAZON_ELASTICSEARCH`, `HTTP`, `NONE`, `RELATIONAL_DATABASE`, `AMAZON_EVENTBRIDGE`, `AMAZON_OPENSEARCH_SERVICE`. * `description` - (Optional) Description of the data source. -* `dynamodbConfig` - (Optional) DynamoDB settings. See [DynamoDB Config](#dynamodb-config) -* `elasticsearchConfig` - (Optional) Amazon Elasticsearch settings. See [ElasticSearch Config](#elasticsearch-config) -* `eventBridgeConfig` - (Optional) AWS EventBridge settings. See [Event Bridge Config](#event-bridge-config) -* `httpConfig` - (Optional) HTTP settings. See [HTTP Config](#http-config) -* `lambdaConfig` - (Optional) AWS Lambda settings. See [Lambda Config](#lambda-config) -* `opensearchserviceConfig` - (Optional) Amazon OpenSearch Service settings. See [OpenSearch Service Config](#opensearch-service-config) -* `relationalDatabaseConfig` (Optional) AWS RDS settings. See [Relational Database Config](#relational-database-config) +* `dynamodbConfig` - (Optional) DynamoDB settings. See [`dynamodbConfig` Block](#dynamodb_config-block) for details. +* `elasticsearchConfig` - (Optional) Amazon Elasticsearch settings. See [`elasticsearchConfig` Block](#elasticsearch_config-block) for details. +* `eventBridgeConfig` - (Optional) AWS EventBridge settings. See [`eventBridgeConfig` Block](#event_bridge_config-block) for details. +* `httpConfig` - (Optional) HTTP settings. See [`httpConfig` Block](#http_config-block) for details. +* `lambdaConfig` - (Optional) AWS Lambda settings. See [`lambdaConfig` Block](#lambda_config-block) for details. +* `opensearchserviceConfig` - (Optional) Amazon OpenSearch Service settings. See [`opensearchserviceConfig` Block](#opensearchservice_config-block) for details. +* `relationalDatabaseConfig` (Optional) AWS RDS settings. See [`relationalDatabaseConfig` Block](#relational_database_config-block) for details. * `serviceRoleArn` - (Optional) IAM service role ARN for the data source. Required if `type` is specified as `AWS_LAMBDA`, `AMAZON_DYNAMODB`, `AMAZON_ELASTICSEARCH`, `AMAZON_EVENTBRIDGE`, or `AMAZON_OPENSEARCH_SERVICE`. -### DynamoDB Config +### `dynamodbConfig` Block -This argument supports the following arguments: +The `dynamodbConfig` configuration block supports the following arguments: * `tableName` - (Required) Name of the DynamoDB table. * `region` - (Optional) AWS region of the DynamoDB table. Defaults to current region. * `useCallerCredentials` - (Optional) Set to `true` to use Amazon Cognito credentials with this data source. -* `deltaSyncConfig` - (Optional) The DeltaSyncConfig for a versioned data source. See [Delta Sync Config](#delta-sync-config) +* `deltaSyncConfig` - (Optional) The DeltaSyncConfig for a versioned data source. See [`deltaSyncConfig` Block](#delta_sync_config-block) for details. * `versioned` - (Optional) Detects Conflict Detection and Resolution with this data source. -### Delta Sync Config +### `deltaSyncConfig` Block + +The `deltaSyncConfig` configuration block supports the following arguments: * `baseTableTtl` - (Optional) The number of minutes that an Item is stored in the data source. * `deltaSyncTableName` - (Required) The table name. * `deltaSyncTableTtl` - (Optional) The number of minutes that a Delta Sync log entry is stored in the Delta Sync table. -### ElasticSearch Config +### `elasticsearchConfig` Block -This argument supports the following arguments: +The `elasticsearchConfig` configuration block supports the following arguments: * `endpoint` - (Required) HTTP endpoint of the Elasticsearch domain. * `region` - (Optional) AWS region of Elasticsearch domain. Defaults to current region. -### Event Bridge Config +### `eventBridgeConfig` Block -This argument supports the following arguments: +The `eventBridgeConfig` configuration block supports the following arguments: * `eventBusArn` - (Required) ARN for the EventBridge bus. -### HTTP Config +### `httpConfig` Block -This argument supports the following arguments: +The `httpConfig` configuration block supports the following arguments: * `endpoint` - (Required) HTTP URL. -* `authorizationConfig` - (Optional) Authorization configuration in case the HTTP endpoint requires authorization. See [Authorization Config](#authorization-config). +* `authorizationConfig` - (Optional) Authorization configuration in case the HTTP endpoint requires authorization. See [`authorizationConfig` Block](#authorization_config-block) for details. -#### Authorization Config +### `authorizationConfig` Block -This argument supports the following arguments: +The `authorizationConfig` configuration block supports the following arguments: * `authorizationType` - (Optional) Authorization type that the HTTP endpoint requires. Default values is `AWS_IAM`. -* `awsIamConfig` - (Optional) Identity and Access Management (IAM) settings. See [AWS IAM Config](#aws-iam-config). +* `awsIamConfig` - (Optional) Identity and Access Management (IAM) settings. See [`awsIamConfig` Block](#aws_iam_config-block) for details. -##### AWS IAM Config +### `awsIamConfig` Block -This argument supports the following arguments: +The `awsIamConfig` configuration block supports the following arguments: * `signingRegion` - (Optional) Signing Amazon Web Services Region for IAM authorization. * `signingServiceName`- (Optional) Signing service name for IAM authorization. -### Lambda Config +### `lambdaConfig` Block -This argument supports the following arguments: +The `lambdaConfig` configuration block supports the following arguments: * `functionArn` - (Required) ARN for the Lambda function. -### OpenSearch Service Config +### `opensearchserviceConfig` Block -This argument supports the following arguments: +The `opensearchserviceConfig` configuration block supports the following arguments: * `endpoint` - (Required) HTTP endpoint of the OpenSearch domain. * `region` - (Optional) AWS region of the OpenSearch domain. Defaults to current region. -### Relational Database Config +### `relationalDatabaseConfig` Block -This argument supports the following arguments: +The `relationalDatabaseConfig` configuration block supports the following arguments: -* `httpEndpointConfig` - (Required) Amazon RDS HTTP endpoint configuration. See [HTTP Endpoint Config](#http-endpoint-config). +* `httpEndpointConfig` - (Required) Amazon RDS HTTP endpoint configuration. See [`httpEndpointConfig` Block](#http_endpoint_config-block) for details. * `sourceType` - (Optional) Source type for the relational database. Valid values: `RDS_HTTP_ENDPOINT`. -#### HTTP Endpoint Config +### `httpEndpointConfig` Block -This argument supports the following arguments: +The `httpEndpointConfig` configuration block supports the following arguments: * `dbClusterIdentifier` - (Required) Amazon RDS cluster identifier. * `awsSecretStoreArn` - (Required) AWS secret store ARN for database credentials. @@ -246,4 +248,4 @@ Using `terraform import`, import `aws_appsync_datasource` using the `apiId`, a h % terraform import aws_appsync_datasource.example abcdef123456-example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_function.html.markdown b/website/docs/cdktf/typescript/r/appsync_function.html.markdown index 5170d0b69ac..3ec81a9d604 100644 --- a/website/docs/cdktf/typescript/r/appsync_function.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_function.html.markdown @@ -105,28 +105,28 @@ This resource supports the following arguments: * `requestMappingTemplate` - (Optional) Function request mapping template. Functions support only the 2018-05-29 version of the request mapping template. * `responseMappingTemplate` - (Optional) Function response mapping template. * `description` - (Optional) Function description. -* `runtime` - (Optional) Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See [Runtime](#runtime). -* `syncConfig` - (Optional) Describes a Sync configuration for a resolver. See [Sync Config](#sync-config). +* `runtime` - (Optional) Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See [`runtime` Block](#runtime-block) for details. +* `syncConfig` - (Optional) Describes a Sync configuration for a resolver. See [`syncConfig` Block](#sync_config-block) for details. * `functionVersion` - (Optional) Version of the request mapping template. Currently the supported value is `2018-05-29`. Does not apply when specifying `code`. -### Runtime +### `runtime` Block -This argument supports the following arguments: +The `runtime` configuration block supports the following arguments: * `name` - (Optional) The name of the runtime to use. Currently, the only allowed value is `APPSYNC_JS`. * `runtimeVersion` - (Optional) The version of the runtime to use. Currently, the only allowed version is `1.0.0`. -### Sync Config +### `syncConfig` Block -This argument supports the following arguments: +The `syncConfig` configuration block supports the following arguments: * `conflictDetection` - (Optional) Conflict Detection strategy to use. Valid values are `NONE` and `VERSION`. * `conflictHandler` - (Optional) Conflict Resolution strategy to perform in the event of a conflict. Valid values are `NONE`, `OPTIMISTIC_CONCURRENCY`, `AUTOMERGE`, and `LAMBDA`. -* `lambdaConflictHandlerConfig` - (Optional) Lambda Conflict Handler Config when configuring `LAMBDA` as the Conflict Handler. See [Lambda Conflict Handler Config](#lambda-conflict-handler-config). +* `lambdaConflictHandlerConfig` - (Optional) Lambda Conflict Handler Config when configuring `LAMBDA` as the Conflict Handler. See [`lambdaConflictHandlerConfig` Block](#lambda_conflict_handler_config-block) for details. -#### Lambda Conflict Handler Config +#### `lambdaConflictHandlerConfig` Block -This argument supports the following arguments: +The `lambdaConflictHandlerConfig` configuration block supports the following arguments: * `lambdaConflictHandlerArn` - (Optional) ARN for the Lambda function to use as the Conflict Handler. @@ -166,4 +166,4 @@ Using `terraform import`, import `aws_appsync_function` using the AppSync API ID % terraform import aws_appsync_function.example xxxxx-yyyyy ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/appsync_graphql_api.html.markdown b/website/docs/cdktf/typescript/r/appsync_graphql_api.html.markdown index f2973264c5c..38816c23fad 100644 --- a/website/docs/cdktf/typescript/r/appsync_graphql_api.html.markdown +++ b/website/docs/cdktf/typescript/r/appsync_graphql_api.html.markdown @@ -371,13 +371,13 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: * `authenticationType` - (Required) Authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA` -* `name` - (Required) User-supplied name for the GraphqlApi. -* `logConfig` - (Optional) Nested argument containing logging configuration. Defined below. -* `openidConnectConfig` - (Optional) Nested argument containing OpenID Connect configuration. Defined below. -* `userPoolConfig` - (Optional) Amazon Cognito User Pool configuration. Defined below. -* `lambdaAuthorizerConfig` - (Optional) Nested argument containing Lambda authorizer configuration. Defined below. +* `name` - (Required) User-supplied name for the GraphSQL API. +* `logConfig` - (Optional) Nested argument containing logging configuration. See [`logConfig` Block](#log_config-block) for details. +* `openidConnectConfig` - (Optional) Nested argument containing OpenID Connect configuration. See [`openidConnectConfig` Block](#openid_connect_config-block) for details. +* `userPoolConfig` - (Optional) Amazon Cognito User Pool configuration. See [`userPoolConfig` Block](#user_pool_config-block) for details. +* `lambdaAuthorizerConfig` - (Optional) Nested argument containing Lambda authorizer configuration. See [`lambdaAuthorizerConfig` Block](#lambda_authorizer_config-block) for details. * `schema` - (Optional) Schema definition, in GraphQL schema language format. Terraform cannot perform drift detection of this configuration. -* `additionalAuthenticationProvider` - (Optional) One or more additional authentication providers for the GraphqlApi. Defined below. +* `additionalAuthenticationProvider` - (Optional) One or more additional authentication providers for the GraphSQL API. See [`additionalAuthenticationProvider` Block](#additional_authentication_provider-block) for details. * `introspectionConfig` - (Optional) Sets the value of the GraphQL API to enable (`ENABLED`) or disable (`DISABLED`) introspection. If no value is provided, the introspection configuration will be set to ENABLED by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled. For more information about introspection, see [GraphQL introspection](https://graphql.org/learn/introspection/). * `queryDepthLimit` - (Optional) The maximum depth a query can have in a single request. Depth refers to the amount of nested levels allowed in the body of query. The default value is `0` (or unspecified), which indicates there's no depth limit. If you set a limit, it can be between `1` and `75` nested levels. This field will produce a limit error if the operation falls out of bounds. @@ -387,43 +387,43 @@ This resource supports the following arguments: * `xrayEnabled` - (Optional) Whether tracing with X-ray is enabled. Defaults to false. * `visibility` - (Optional) Sets the value of the GraphQL API to public (`GLOBAL`) or private (`PRIVATE`). If no value is provided, the visibility will be set to `GLOBAL` by default. This value cannot be changed once the API has been created. -### log_config +### `logConfig` Block -This argument supports the following arguments: +The `logConfig` configuration block supports the following arguments: * `cloudwatchLogsRoleArn` - (Required) Amazon Resource Name of the service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account. * `fieldLogLevel` - (Required) Field logging level. Valid values: `ALL`, `ERROR`, `NONE`. * `excludeVerboseContent` - (Optional) Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level. Valid values: `true`, `false`. Default value: `false` -### additional_authentication_provider +### `additionalAuthenticationProvider` Block -This argument supports the following arguments: +The `additionalAuthenticationProvider` configuration block supports the following arguments: * `authenticationType` - (Required) Authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA` -* `openidConnectConfig` - (Optional) Nested argument containing OpenID Connect configuration. Defined below. -* `userPoolConfig` - (Optional) Amazon Cognito User Pool configuration. Defined below. +* `openidConnectConfig` - (Optional) Nested argument containing OpenID Connect configuration. See [`openidConnectConfig` Block](#openid_connect_config-block) for details. +* `userPoolConfig` - (Optional) Amazon Cognito User Pool configuration. See [`userPoolConfig` Block](#user_pool_config-block) for details. -### openid_connect_config +### `openidConnectConfig` Block -This argument supports the following arguments: +The `openidConnectConfig` configuration block supports the following arguments: * `issuer` - (Required) Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. * `authTtl` - (Optional) Number of milliseconds a token is valid after being authenticated. * `clientId` - (Optional) Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. * `iatTtl` - (Optional) Number of milliseconds a token is valid after being issued to a user. -### user_pool_config +### `userPoolConfig` Block -This argument supports the following arguments: +The `userPoolConfig` configuration block supports the following arguments: * `defaultAction` - (Required only if Cognito is used as the default auth provider) Action that you want your GraphQL API to take when a request that uses Amazon Cognito User Pool authentication doesn't match the Amazon Cognito User Pool configuration. Valid: `ALLOW` and `DENY` * `userPoolId` - (Required) User pool ID. * `appIdClientRegex` - (Optional) Regular expression for validating the incoming Amazon Cognito User Pool app client ID. * `awsRegion` - (Optional) AWS region in which the user pool was created. -### lambda_authorizer_config +### `lambdaAuthorizerConfig` Block -This argument supports the following arguments: +The `lambdaAuthorizerConfig` configuration block supports the following arguments: * `authorizerUri` - (Required) ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow `lambda:InvokeFunction` from service principal `appsync.amazonaws.com`. * `authorizerResultTtlInSeconds` - (Optional) Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a `ttlOverride` key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. @@ -466,4 +466,4 @@ Using `terraform import`, import AppSync GraphQL API using the GraphQL API ID. F % terraform import aws_appsync_graphql_api.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_group.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_group.html.markdown index 941e43aaf0b..8f32a4c3836 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_group.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_group.html.markdown @@ -763,6 +763,7 @@ This configuration block supports the following: * ssd - solid state drive ``` +- `maxSpotPriceAsPercentageOfOptimalOnDemandPrice` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spotMaxPricePercentageOverLowestPrice` - `memoryGibPerVcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. - `min` - (Optional) Minimum. May be a decimal number, e.g. `0.5`. - `max` - (Optional) Maximum. May be a decimal number, e.g. `0.5`. @@ -780,7 +781,7 @@ This configuration block supports the following: If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. - `requireHibernateSupport` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -- `spotMaxPricePercentageOverLowestPrice` - (Optional) Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +- `spotMaxPricePercentageOverLowestPrice` - (Optional) Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `maxSpotPriceAsPercentageOfOptimalOnDemandPrice` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. @@ -991,4 +992,4 @@ Using `terraform import`, import Auto Scaling Groups using the `name`. For examp % terraform import aws_autoscaling_group.web web-asg ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/autoscaling_policy.html.markdown b/website/docs/cdktf/typescript/r/autoscaling_policy.html.markdown index 1f280b29a36..61e9ad5f588 100644 --- a/website/docs/cdktf/typescript/r/autoscaling_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/autoscaling_policy.html.markdown @@ -374,14 +374,14 @@ The following fields are available in target tracking configuration: ### predefined_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefinedMetricType` - (Required) Metric type. * `resourceLabel` - (Optional) Identifies the resource associated with the metric type. ### customized_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metricDimension` - (Optional) Dimensions of the metric. * `metricName` - (Optional) Name of the metric. @@ -392,14 +392,14 @@ This argument supports the following arguments: #### metric_dimension -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. #### metrics -This argument supports the following arguments: +This configuration block supports the following arguments: * `expression` - (Optional) Math expression used on the returned metric. You must specify either `expression` or `metricStat`, but not both. * `id` - (Required) Short name for the metric used in target tracking scaling policy. @@ -409,7 +409,7 @@ This argument supports the following arguments: ##### metric_stat -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric` - (Required) Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. * `stat` - (Required) Statistic of the metrics to return. @@ -417,7 +417,7 @@ This argument supports the following arguments: ##### metric -This argument supports the following arguments: +This configuration block supports the following arguments: * `dimensions` - (Optional) Dimensions of the metric. * `metricName` - (Required) Name of the metric. @@ -425,14 +425,14 @@ This argument supports the following arguments: ###### dimensions -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. ### predictive_scaling_configuration -This argument supports the following arguments: +This configuration block supports the following arguments: * `maxCapacityBreachBehavior` - (Optional) Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Valid values are `HonorMaxCapacity` or `IncreaseMaxCapacity`. Default is `HonorMaxCapacity`. * `maxCapacityBuffer` - (Optional) Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. Valid range is `0` to `100`. If set to `0`, Amazon EC2 Auto Scaling may scale capacity higher than the maximum capacity to equal but not exceed forecast capacity. @@ -442,7 +442,7 @@ This argument supports the following arguments: #### metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `customizedCapacityMetricSpecification` - (Optional) Customized capacity metric specification. The field is only valid when you use `customizedLoadMetricSpecification` * `customizedLoadMetricSpecification` - (Optional) Customized load metric specification. @@ -453,46 +453,46 @@ This argument supports the following arguments: ##### predefined_load_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefinedMetricType` - (Required) Metric type. Valid values are `ASGTotalCPUUtilization`, `ASGTotalNetworkIn`, `ASGTotalNetworkOut`, or `ALBTargetGroupRequestCount`. * `resourceLabel` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### predefined_metric_pair_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefinedMetricType` - (Required) Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. For example, if the metric type is `ASGCPUUtilization`, the Auto Scaling group's total CPU metric is used as the load metric, and the average CPU metric is used for the scaling metric. Valid values are `ASGCPUUtilization`, `ASGNetworkIn`, `ASGNetworkOut`, or `ALBRequestCount`. * `resourceLabel` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### predefined_scaling_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefinedMetricType` - (Required) Describes a scaling metric for a predictive scaling policy. Valid values are `ASGAverageCPUUtilization`, `ASGAverageNetworkIn`, `ASGAverageNetworkOut`, or `ALBRequestCountPerTarget`. * `resourceLabel` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### customized_scaling_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metricDataQueries` - (Required) List of up to 10 structures that defines custom scaling metric in predictive scaling policy ##### customized_load_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metricDataQueries` - (Required) List of up to 10 structures that defines custom load metric in predictive scaling policy ##### customized_capacity_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metricDataQueries` - (Required) List of up to 10 structures that defines custom capacity metric in predictive scaling policy ##### metric_data_queries -This argument supports the following arguments: +This configuration block supports the following arguments: * `expression` - (Optional) Math expression used on the returned metric. You must specify either `expression` or `metricStat`, but not both. * `id` - (Required) Short name for the metric used in predictive scaling policy. @@ -502,7 +502,7 @@ This argument supports the following arguments: ##### metric_stat -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric` - (Required) Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. * `stat` - (Required) Statistic of the metrics to return. @@ -510,7 +510,7 @@ This argument supports the following arguments: ##### metric -This argument supports the following arguments: +This configuration block supports the following arguments: * `dimensions` - (Optional) Dimensions of the metric. * `metricName` - (Required) Name of the metric. @@ -518,7 +518,7 @@ This argument supports the following arguments: ##### dimensions -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. @@ -565,4 +565,4 @@ Using `terraform import`, import AutoScaling scaling policy using the role autos % terraform import aws_autoscaling_policy.test-policy asg-name/policy-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_custom_model.html.markdown b/website/docs/cdktf/typescript/r/bedrock_custom_model.html.markdown index d75e8156087..35592de592d 100644 --- a/website/docs/cdktf/typescript/r/bedrock_custom_model.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_custom_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_model" description: |- @@ -135,4 +135,4 @@ Using `terraform import`, import Bedrock custom model using the `jobArn`. For ex % terraform import aws_bedrock_custom_model.example arn:aws:bedrock:us-west-2:123456789012:model-customization-job/amazon.titan-text-express-v1:0:8k/1y5n57gh5y2e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_model_invocation_logging_configuration.html.markdown b/website/docs/cdktf/typescript/r/bedrock_model_invocation_logging_configuration.html.markdown index c1ca1887bc1..9d0c7c18c54 100644 --- a/website/docs/cdktf/typescript/r/bedrock_model_invocation_logging_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_model_invocation_logging_configuration.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_model_invocation_logging_configuration" description: |- @@ -136,4 +136,4 @@ Using `terraform import`, import Bedrock custom model using the `id` set to the % terraform import aws_bedrock_model_invocation_logging_configuration.my_config us-east-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrock_provisioned_model_throughput.html.markdown b/website/docs/cdktf/typescript/r/bedrock_provisioned_model_throughput.html.markdown index 159f23d1097..f4edb4ea680 100644 --- a/website/docs/cdktf/typescript/r/bedrock_provisioned_model_throughput.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrock_provisioned_model_throughput.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_provisioned_model_throughput" description: |- @@ -78,4 +78,4 @@ Using `terraform import`, import Provisioned Throughput using the `provisionedMo % terraform import aws_bedrock_provisioned_model_throughput.example arn:aws:bedrock:us-west-2:123456789012:provisioned-model/1y5n57gh5y2e ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent.html.markdown index 161244ef602..331058d375f 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent" description: |- @@ -214,4 +214,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent using the agent % terraform import aws_bedrockagent_agent.example GGRRAED6JP ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent_action_group.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent_action_group.html.markdown index 861db422f01..8f578bc8048 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent_action_group.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent_action_group.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_action_group" description: |- @@ -136,4 +136,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Action Group th % terraform import aws_bedrockagent_agent_action_group.example MMAUDBZTH4,GGRRAED6JP,DRAFT ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent_alias.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent_alias.html.markdown index 569caf5efd2..49fbe2b0b3e 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent_alias.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent_alias.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_alias" description: |- @@ -202,4 +202,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Alias using the % terraform import aws_bedrockagent_agent_alias.example 66IVY0GUTF,GGRRAED6JP ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_agent_knowledge_base_association.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_agent_knowledge_base_association.html.markdown index d181ccac5a8..48f15960e47 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_agent_knowledge_base_association.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_agent_knowledge_base_association.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_knowledge_base_association" description: |- @@ -89,4 +89,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Agent Knowledge Base % terraform import aws_bedrockagent_agent_knowledge_base_association.example GGRRAED6JP,DRAFT,EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_data_source.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_data_source.html.markdown index e38a531cccd..724712f7f94 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_data_source.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_data_source.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_data_source" description: |- @@ -149,4 +149,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Data Source using the % terraform import aws_bedrockagent_data_source.example GWCMFMQF6T,EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/bedrockagent_knowledge_base.html.markdown b/website/docs/cdktf/typescript/r/bedrockagent_knowledge_base.html.markdown index 9f0525449e6..6daf585de50 100644 --- a/website/docs/cdktf/typescript/r/bedrockagent_knowledge_base.html.markdown +++ b/website/docs/cdktf/typescript/r/bedrockagent_knowledge_base.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_knowledge_base" description: |- @@ -200,4 +200,4 @@ Using `terraform import`, import Agents for Amazon Bedrock Knowledge Base using % terraform import aws_bedrockagent_knowledge_base.example EMDPPAYPZI ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudformation_stack_set_instance.html.markdown b/website/docs/cdktf/typescript/r/cloudformation_stack_set_instance.html.markdown index e2cc8a8dd8b..1eec1ff6d2a 100644 --- a/website/docs/cdktf/typescript/r/cloudformation_stack_set_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudformation_stack_set_instance.html.markdown @@ -163,7 +163,7 @@ This resource supports the following arguments: * `stackSetName` - (Required) Name of the StackSet. * `accountId` - (Optional) Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. -* `deploymentTargets` - (Optional) The AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See [deployment_targets](#deployment_targets-argument-reference) below. +* `deploymentTargets` - (Optional) AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See [deployment_targets](#deployment_targets-argument-reference) below. * `parameterOverrides` - (Optional) Key-value map of input parameters to override from the StackSet for this Instance. * `region` - (Optional) Target AWS Region to create a Stack based on the StackSet. Defaults to current region. * `retainStack` - (Optional) During Terraform resource destroy, remove Instance from StackSet while keeping the Stack and its associated resources. Must be enabled in Terraform state _before_ destroy operation to take effect. You cannot reassociate a retained Stack or add an existing, saved Stack to a new StackSet. Defaults to `false`. @@ -174,25 +174,28 @@ This resource supports the following arguments: The `deploymentTargets` configuration block supports the following arguments: -* `organizationalUnitIds` - (Optional) The organization root ID or organizational unit (OU) IDs to which StackSets deploys. +* `organizationalUnitIds` - (Optional) Organization root ID or organizational unit (OU) IDs to which StackSets deploys. +* `account_filter_type` - (Optional) Limit deployment targets to individual accounts or include additional accounts with provided OUs. Valid values: `INTERSECTION`, `DIFFERENCE`, `UNION`, `NONE`. +* `accounts` - (Optional) List of accounts to deploy stack set updates. +* `accounts_url` - (Optional) S3 URL of the file containing the list of accounts. ### `operationPreferences` Argument Reference The `operationPreferences` configuration block supports the following arguments: -* `failureToleranceCount` - (Optional) The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. -* `failureTolerancePercentage` - (Optional) The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. -* `maxConcurrentCount` - (Optional) The maximum number of accounts in which to perform this operation at one time. -* `maxConcurrentPercentage` - (Optional) The maximum percentage of accounts in which to perform this operation at one time. -* `regionConcurrencyType` - (Optional) The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are `SEQUENTIAL` and `PARALLEL`. -* `regionOrder` - (Optional) The order of the Regions in where you want to perform the stack operation. +* `failureToleranceCount` - (Optional) Number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. +* `failureTolerancePercentage` - (Optional) Percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. +* `maxConcurrentCount` - (Optional) Maximum number of accounts in which to perform this operation at one time. +* `maxConcurrentPercentage` - (Optional) Maximum percentage of accounts in which to perform this operation at one time. +* `regionConcurrencyType` - (Optional) Concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are `SEQUENTIAL` and `PARALLEL`. +* `regionOrder` - (Optional) Order of the Regions in where you want to perform the stack operation. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - Unique identifier for the resource. If `deploymentTargets` is set, this is a comma-delimited string combining stack set name, organizational unit IDs (`/`-delimited), and region (ie. `mystack,ou-123/ou-456,us-east-1`). Otherwise, this is a comma-delimited string combining stack set name, AWS account ID, and region (ie. `mystack,123456789012,us-east-1`). -* `organizationalUnitId` - The organization root ID or organizational unit (OU) ID in which the stack is deployed. +* `organizationalUnitId` - Organization root ID or organizational unit (OU) ID in which the stack is deployed. * `stackId` - Stack identifier. * `stackInstanceSummaries` - List of stack instances created from an organizational unit deployment target. This will only be populated when `deploymentTargets` is set. See [`stackInstanceSummaries`](#stack_instance_summaries-attribute-reference). @@ -302,4 +305,4 @@ Using `terraform import`, import CloudFormation StackSet Instances when acting a % terraform import aws_cloudformation_stack_set_instance.example example,ou-sdas-123123123/ou-sdas-789789789,us-east-1,DELEGATED_ADMIN ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudtrail_event_data_store.html.markdown b/website/docs/cdktf/typescript/r/cloudtrail_event_data_store.html.markdown index 94730f860f6..1679d9ddf3a 100644 --- a/website/docs/cdktf/typescript/r/cloudtrail_event_data_store.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudtrail_event_data_store.html.markdown @@ -105,6 +105,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: - `name` - (Required) The name of the event data store. +- `billingMode` - (Optional) The billing mode for the event data store. The valid values are `EXTENDABLE_RETENTION_PRICING` and `FIXED_RETENTION_PRICING`. Defaults to `EXTENDABLE_RETENTION_PRICING`. - `advancedEventSelector` - (Optional) The advanced event selectors to use to select the events for the data store. For more information about how to use advanced event selectors, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide. - `multiRegionEnabled` - (Optional) Specifies whether the event data store includes events from all regions, or only from the region in which the event data store is created. Default: `true`. - `organizationEnabled` - (Optional) Specifies whether an event data store collects events logged for an organization in AWS Organizations. Default: `false`. @@ -172,4 +173,4 @@ Using `terraform import`, import event data stores using their `arn`. For exampl % terraform import aws_cloudtrail_event_data_store.example arn:aws:cloudtrail:us-east-1:123456789123:eventdatastore/22333815-4414-412c-b155-dd254033gfhf ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_log_account_policy.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_log_account_policy.html.markdown new file mode 100644 index 00000000000..136da2038a6 --- /dev/null +++ b/website/docs/cdktf/typescript/r/cloudwatch_log_account_policy.html.markdown @@ -0,0 +1,146 @@ +--- +subcategory: "CloudWatch Logs" +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_account_policy" +description: |- + Provides a CloudWatch Log Account Policy resource. +--- + + + +# Resource: aws_cloudwatch_log_account_policy + +Provides a CloudWatch Log Account Policy resource. + +## Example Usage + +### Account Data Protection Policy + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchLogAccountPolicy } from "./.gen/providers/aws/cloudwatch-log-account-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new CloudwatchLogAccountPolicy(this, "data_protection", { + policyDocument: Token.asString( + Fn.jsonencode({ + Name: "DataProtection", + Statement: [ + { + DataIdentifier: [ + "arn:aws:dataprotection::aws:data-identifier/EmailAddress", + ], + Operation: { + Audit: { + FindingsDestination: {}, + }, + }, + Sid: "Audit", + }, + { + DataIdentifier: [ + "arn:aws:dataprotection::aws:data-identifier/EmailAddress", + ], + Operation: { + Deidentify: { + MaskConfig: {}, + }, + }, + Sid: "Redact", + }, + ], + Version: "2021-06-01", + }) + ), + policyName: "data-protection", + policyType: "DATA_PROTECTION_POLICY", + }); + } +} + +``` + +### Subscription Filter Policy + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchLogAccountPolicy } from "./.gen/providers/aws/cloudwatch-log-account-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new CloudwatchLogAccountPolicy(this, "subscription_filter", { + policyDocument: Token.asString( + Fn.jsonencode({ + DestinationArn: test.arn, + FilterPattern: "test", + }) + ), + policyName: "subscription-filter", + policyType: "SUBSCRIPTION_FILTER_POLICY", + selectionCriteria: 'LogGroupName NOT IN [\\"excluded_log_group_name\\"]', + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `policyDocument` - (Required) Text of the account policy. Refer to the [AWS docs](https://docs.aws.amazon.com/cli/latest/reference/logs/put-account-policy.html) for more information. +* `policyType` - (Required) Type of account policy. Either `DATA_PROTECTION_POLICY` or `SUBSCRIPTION_FILTER_POLICY`. You can have one account policy per type in an account. +* `policyName` - (Required) Name of the account policy. +* `scope` - (Optional) Currently defaults to and only accepts the value: `ALL`. +* `selectionCriteria` - (Optional) - Criteria for applying a subscription filter policy to a selection of log groups. The only allowable criteria selector is `LogGroupName NOT IN []`. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import this resource using the `policyName` and `policyType` fields separated by `:`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CloudwatchLogAccountPolicy } from "./.gen/providers/aws/cloudwatch-log-account-policy"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + CloudwatchLogAccountPolicy.generateConfigForImport( + this, + "example", + "my-account-policy:SUBSCRIPTION_FILTER_POLICY" + ); + } +} + +``` + +Using `terraform import`, import this resource using the `policyName` and `policyType` separated by `:`. For example: + +```console +% terraform import aws_cloudwatch_log_account_policy.example "my-account-policy:SUBSCRIPTION_FILTER_POLICY" +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/cloudwatch_metric_alarm.html.markdown b/website/docs/cdktf/typescript/r/cloudwatch_metric_alarm.html.markdown index 8ddab38abfe..866a400edf5 100644 --- a/website/docs/cdktf/typescript/r/cloudwatch_metric_alarm.html.markdown +++ b/website/docs/cdktf/typescript/r/cloudwatch_metric_alarm.html.markdown @@ -244,7 +244,7 @@ You must choose one or the other See [related part of AWS Docs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html) for details about valid values. -This argument supports the following arguments: +This resource supports the following arguments: * `alarmName` - (Required) The descriptive name for the alarm. This name must be unique within the user's AWS account * `comparisonOperator` - (Required) The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: `GreaterThanOrEqualToThreshold`, `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`. Additionally, the values `LessThanLowerOrGreaterThanUpperThreshold`, `LessThanLowerThreshold`, and `GreaterThanUpperThreshold` are used only for alarms based on anomaly detection models. @@ -344,4 +344,4 @@ Using `terraform import`, import CloudWatch Metric Alarm using the `alarmName`. % terraform import aws_cloudwatch_metric_alarm.test alarm-12345 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codebuild_project.html.markdown b/website/docs/cdktf/typescript/r/codebuild_project.html.markdown index 8a276e272e6..34b1322cac6 100644 --- a/website/docs/cdktf/typescript/r/codebuild_project.html.markdown +++ b/website/docs/cdktf/typescript/r/codebuild_project.html.markdown @@ -237,7 +237,7 @@ The following arguments are optional: * `badgeEnabled` - (Optional) Generates a publicly-accessible URL for the projects build badge. Available as `badgeUrl` attribute when enabled. * `buildBatchConfig` - (Optional) Defines the batch build options for the project. -* `buildTimeout` - (Optional) Number of minutes, from 5 to 480 (8 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes. The `buildTimeout` property is not available on the `Lambda` compute type. +* `buildTimeout` - (Optional) Number of minutes, from 5 to 2160 (36 hours), for AWS CodeBuild to wait until timing out any related build that does not get marked as completed. The default is 60 minutes. The `buildTimeout` property is not available on the `Lambda` compute type. * `cache` - (Optional) Configuration block. Detailed below. * `concurrentBuildLimit` - (Optional) Specify a maximum number of concurrent builds for the project. The value specified must be greater than 0 and less than the account concurrent running builds limit. * `description` - (Optional) Short description of the project. @@ -444,4 +444,4 @@ Using `terraform import`, import CodeBuild Project using the `name`. For example % terraform import aws_codebuild_project.name project-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codebuild_webhook.html.markdown b/website/docs/cdktf/typescript/r/codebuild_webhook.html.markdown index b565c931278..71548228e83 100644 --- a/website/docs/cdktf/typescript/r/codebuild_webhook.html.markdown +++ b/website/docs/cdktf/typescript/r/codebuild_webhook.html.markdown @@ -122,7 +122,7 @@ This resource supports the following arguments: `filter` supports the following: * `type` - (Required) The webhook filter group's type. Valid values for this parameter are: `EVENT`, `BASE_REF`, `HEAD_REF`, `ACTOR_ACCOUNT_ID`, `FILE_PATH`, `COMMIT_MESSAGE`, `WORKFLOW_NAME`, `TAG_NAME`, `RELEASE_NAME`. At least one filter group must specify `EVENT` as its type. -* `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. +* `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED`, `WORKFLOW_JOB_QUEUED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. * `excludeMatchedPattern` - (Optional) If set to `true`, the specified filter does *not* trigger a build. Defaults to `false`. ## Attribute Reference @@ -164,4 +164,4 @@ Using `terraform import`, import CodeBuild Webhooks using the CodeBuild Project % terraform import aws_codebuild_webhook.example MyProjectName ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/codepipeline.html.markdown b/website/docs/cdktf/typescript/r/codepipeline.html.markdown index 1515146ad31..da76944380c 100644 --- a/website/docs/cdktf/typescript/r/codepipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/codepipeline.html.markdown @@ -242,7 +242,7 @@ A `trigger` block supports the following arguments: A `gitConfiguration` block supports the following arguments: -* `sourceActionName` - (Required) The name of the pipeline source action where the trigger configuration. +* `sourceActionName` - (Required) The name of the pipeline source action where the trigger configuration, such as Git tags, is specified. The trigger configuration will start the pipeline upon the specified change only. * `pullRequest` - (Optional) The field where the repository event that will start the pipeline is specified as pull requests. A `pullRequest` block is documented below. * `push` - (Optional) The field where the repository event that will start the pipeline, such as pushing Git tags, is specified with details. A `push` block is documented below. @@ -317,4 +317,4 @@ Using `terraform import`, import CodePipelines using the name. For example: % terraform import aws_codepipeline.foo example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/config_conformance_pack.html.markdown b/website/docs/cdktf/typescript/r/config_conformance_pack.html.markdown index 337da2b1be9..dfa6a280991 100644 --- a/website/docs/cdktf/typescript/r/config_conformance_pack.html.markdown +++ b/website/docs/cdktf/typescript/r/config_conformance_pack.html.markdown @@ -100,7 +100,7 @@ class MyConvertedCode extends TerraformStack { ~> **Note:** If both `templateBody` and `templateS3Uri` are specified, AWS Config uses the `templateS3Uri` and ignores the `templateBody`. -This argument supports the following arguments: +This resource supports the following arguments: * `name` - (Required, Forces new resource) The name of the conformance pack. Must begin with a letter and contain from 1 to 256 alphanumeric characters and hyphens. * `deliveryS3Bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates. Maximum length of 63. @@ -150,4 +150,4 @@ Using `terraform import`, import Config Conformance Packs using the `name`. For % terraform import aws_config_conformance_pack.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/controltower_control.html.markdown b/website/docs/cdktf/typescript/r/controltower_control.html.markdown index 39d96a18683..eccd08ce1b1 100644 --- a/website/docs/cdktf/typescript/r/controltower_control.html.markdown +++ b/website/docs/cdktf/typescript/r/controltower_control.html.markdown @@ -46,6 +46,12 @@ class MyConvertedCode extends TerraformStack { "arn:aws:controltower:${" + current.name + "}::control/AWS-GR_EC2_VOLUME_INUSE_CHECK", + parameters: [ + { + key: "AllowedRegions", + value: Token.asString(Fn.jsonencode(["us-east-1"])), + }, + ], targetIdentifier: Token.asString( Fn.lookupNested( "${[ for x in ${" + @@ -65,15 +71,25 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +This following arguments are required: * `controlIdentifier` - (Required) The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny guardrail. * `targetIdentifier` - (Required) The ARN of the organizational unit. +The following arguments are optional: + +* `parameters` - (Optional) Parameter values which are specified to configure the control when you enable it. See [Parameters](#parameters) for more details. + +### Parameters + +* `key` - (Required) The name of the parameter. +* `value` - (Required) The value of the parameter. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: +* `arn` - The ARN of the EnabledControl resource. * `id` - The ARN of the organizational unit. ## Import @@ -108,4 +124,4 @@ Using `terraform import`, import Control Tower Controls using their `organizatio % terraform import aws_controltower_control.example arn:aws:organizations::123456789101:ou/o-qqaejywet/ou-qg5o-ufbhdtv3,arn:aws:controltower:us-east-1::control/WTDSMKDKDNLE ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_instance.html.markdown b/website/docs/cdktf/typescript/r/db_instance.html.markdown index adf37af870c..406023cc67c 100644 --- a/website/docs/cdktf/typescript/r/db_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/db_instance.html.markdown @@ -394,7 +394,7 @@ class MyConvertedCode extends TerraformStack { For more detailed documentation about each argument, refer to the [AWS official documentation](http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `allocatedStorage` - (Required unless a `snapshotIdentifier` or `replicateSourceDb` is provided) The allocated storage in gibibytes. If `maxAllocatedStorage` is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs. If `replicateSourceDb` is set, the value is ignored during the creation of the instance. * `allowMajorVersionUpgrade` - (Optional) Indicates that major version @@ -706,4 +706,4 @@ Using `terraform import`, import DB Instances using the `identifier`. For exampl % terraform import aws_db_instance.default mydb-rds-instance ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/db_proxy_default_target_group.html.markdown b/website/docs/cdktf/typescript/r/db_proxy_default_target_group.html.markdown index b723540c551..4a723d69fbe 100644 --- a/website/docs/cdktf/typescript/r/db_proxy_default_target_group.html.markdown +++ b/website/docs/cdktf/typescript/r/db_proxy_default_target_group.html.markdown @@ -85,7 +85,7 @@ This resource supports the following arguments: * `initQuery` - (Optional) One or more SQL statements for the proxy to run when opening each new database connection. Typically used with `SET` statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single `SET` statement, such as `SET x=1, y=2`. * `maxConnectionsPercent` - (Optional) The maximum size of the connection pool for each target in a target group. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. * `maxIdleConnectionsPercent` - (Optional) Controls how actively the proxy closes idle database connections in the connection pool. A high value enables the proxy to leave a high percentage of idle connections open. A low value causes the proxy to close idle client connections and return the underlying database connections to the connection pool. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. -* `sessionPinningFilters` - (Optional) Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is `EXCLUDE_VARIABLE_SETS`. +* `sessionPinningFilters` - (Optional) Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. This setting is only supported for MySQL engine family databases. Currently, the only allowed value is `EXCLUDE_VARIABLE_SETS`. ## Attribute Reference @@ -134,4 +134,4 @@ Using `terraform import`, import DB proxy default target groups using the `dbPro % terraform import aws_db_proxy_default_target_group.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/detective_organization_configuration.html.markdown b/website/docs/cdktf/typescript/r/detective_organization_configuration.html.markdown index a5277d1bc79..b14bdfdc3d1 100644 --- a/website/docs/cdktf/typescript/r/detective_organization_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/detective_organization_configuration.html.markdown @@ -35,7 +35,7 @@ class MyConvertedCode extends TerraformStack { const awsDetectiveOrganizationConfigurationExample = new DetectiveOrganizationConfiguration(this, "example_1", { autoEnable: true, - graphArn: example.id, + graphArn: example.graphArn, }); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ awsDetectiveOrganizationConfigurationExample.overrideLogicalId("example"); @@ -59,7 +59,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_detective_organization_admin_account` using the Detective Graph ID. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_detective_organization_admin_account` using the behavior graph ARN. For example: ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -76,17 +76,17 @@ class MyConvertedCode extends TerraformStack { DetectiveOrganizationConfiguration.generateConfigForImport( this, "example", - "00b00fd5aecc0ab60a708659477e9617" + "arn:aws:detective:us-east-1:123456789012:graph:00b00fd5aecc0ab60a708659477e9617" ); } } ``` -Using `terraform import`, import `aws_detective_organization_admin_account` using the Detective Graph ID. For example: +Using `terraform import`, import `aws_detective_organization_admin_account` using the behavior graph ARN. For example: ```console -% terraform import aws_detective_organization_configuration.example 00b00fd5aecc0ab60a708659477e9617 +% terraform import aws_detective_organization_configuration.example arn:aws:detective:us-east-1:123456789012:graph:00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_cluster.html.markdown b/website/docs/cdktf/typescript/r/docdb_cluster.html.markdown index 89710889c77..f130c4d2b18 100644 --- a/website/docs/cdktf/typescript/r/docdb_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_cluster.html.markdown @@ -56,7 +56,7 @@ class MyConvertedCode extends TerraformStack { For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-cluster.html). -This argument supports the following arguments: +This resource supports the following arguments: * `allowMajorVersionUpgrade` - (Optional) A value that indicates whether major version upgrades are allowed. Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version. * `applyImmediately` - (Optional) Specifies whether any cluster modifications @@ -69,7 +69,7 @@ This argument supports the following arguments: * `clusterIdentifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `dbSubnetGroupName` - (Optional) A DB subnet group to associate with this DB instance. * `dbClusterParameterGroupName` - (Optional) A cluster parameter group to associate with the cluster. -* `deletionProtection` - (Optional) A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. +* `deletionProtection` - (Optional) A boolean value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. Defaults to `false`. * `enabledCloudwatchLogsExports` - (Optional) List of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `profiler`. * `engineVersion` - (Optional) The database engine version. Updating this argument results in an outage. @@ -86,6 +86,7 @@ This argument supports the following arguments: * `preferredBackupWindow` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC Default: A 30-minute window selected at random from an 8-hour block of time per regionE.g., 04:00-09:00 * `preferredMaintenanceWindow` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g., wed:04:00-wed:04:30 +* `restoreToPointInTime` - (Optional, Forces new resource) A configuration block for restoring a DB instance to an arbitrary point in time. Requires the `identifier` argument to be set with the name of the new DB instance to be created. See [Restore To Point In Time](#restore-to-point-in-time) below for details. * `skipFinalSnapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `finalSnapshotIdentifier`. Default is `false`. * `snapshotIdentifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. You can use either the name or ARN when specifying a DB cluster snapshot, or the ARN when specifying a DB snapshot. Automated snapshots **should not** be used for this attribute, unless from a different cluster. Automated snapshots are deleted as part of cluster destruction when the resource is replaced. * `storageEncrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false`. @@ -94,6 +95,15 @@ Default: A 30-minute window selected at random from an 8-hour block of time per * `vpcSecurityGroupIds` - (Optional) List of VPC security groups to associate with the Cluster +### Restore To Point In Time + +The `restoreToPointInTime` block supports the following arguments: + +* `restoreToTime` - (Optional) The date and time to restore from. Value must be a time in Universal Coordinated Time (UTC) format and must be before the latest restorable time for the DB instance. Cannot be specified with `useLatestRestorableTime`. +* `restoreType` - (Optional) The type of restore to be performed. Valid values are `full-copy`, `copy-on-write`. +* `sourceClusterIdentifier` - (Required) The identifier of the source DB cluster from which to restore. Must match the identifier of an existing DB cluster. +* `useLatestRestorableTime` - (Optional) A boolean value that indicates whether the DB cluster is restored from the latest backup time. Defaults to `false`. Cannot be specified with `restoreToTime`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -148,4 +158,4 @@ Using `terraform import`, import DocumentDB Clusters using the `clusterIdentifie % terraform import aws_docdb_cluster.docdb_cluster docdb-prod-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/docdb_cluster_instance.html.markdown b/website/docs/cdktf/typescript/r/docdb_cluster_instance.html.markdown index a1968c6f7eb..f9e50d2aa2d 100644 --- a/website/docs/cdktf/typescript/r/docdb_cluster_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/docdb_cluster_instance.html.markdown @@ -61,7 +61,7 @@ class MyConvertedCode extends TerraformStack { For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-instance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `applyImmediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. @@ -167,4 +167,4 @@ Using `terraform import`, import DocumentDB Cluster Instances using the `identif % terraform import aws_docdb_cluster_instance.prod_instance_1 aurora-cluster-instance-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/drs_replication_configuration_template.html.markdown b/website/docs/cdktf/typescript/r/drs_replication_configuration_template.html.markdown new file mode 100644 index 00000000000..0d7ec9cb1cb --- /dev/null +++ b/website/docs/cdktf/typescript/r/drs_replication_configuration_template.html.markdown @@ -0,0 +1,162 @@ +--- +subcategory: "DRS (Elastic Disaster Recovery)" +layout: "aws" +page_title: "AWS: drs_replication_configuration_template" +description: |- + Provides an Elastic Disaster Recovery replication configuration template resource. +--- + + + +# Resource: aws_drs_replication_configuration_template + +Provides an Elastic Disaster Recovery replication configuration template resource. Before using DRS, your account must be [initialized](https://docs.aws.amazon.com/drs/latest/userguide/getting-started-initializing.html). + +~> **NOTE:** Your configuration must use the PIT policy shown in the [basic configuration](#basic-configuration) due to AWS rules. The only value that you can change is the `retentionDuration` of `ruleId` 3. + +## Example Usage + +### Basic configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DrsReplicationConfigurationTemplate } from "./.gen/providers/aws/drs-replication-configuration-template"; +interface MyConfig { + ebsEncryption: any; +} +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string, config: MyConfig) { + super(scope, name); + new DrsReplicationConfigurationTemplate(this, "example", { + associateDefaultSecurityGroup: false, + bandwidthThrottling: 12, + createPublicIp: false, + dataPlaneRouting: "PRIVATE_IP", + defaultLargeStagingDiskType: "GP2", + ebs_ecryption: "DEFAULT", + ebsEncryptionKeyArn: + "arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab", + pitPolicy: [ + { + enabled: true, + interval: 10, + retentionDuration: 60, + ruleId: 1, + units: "MINUTE", + }, + { + enabled: true, + interval: 1, + retentionDuration: 24, + ruleId: 2, + units: "HOUR", + }, + { + enabled: true, + interval: 1, + retentionDuration: 3, + ruleId: 3, + units: "DAY", + }, + ], + replicationServerInstanceType: "t3.small", + replicationServersSecurityGroupsIds: Token.asList( + Fn.lookupNested(awsSecurityGroupExample, ["*", "id"]) + ), + stagingAreaSubnetId: Token.asString(awsSubnetExample.id), + useDedicatedReplicationServer: false, + ebsEncryption: config.ebsEncryption, + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `associateDefaultSecurityGroup` - (Required) Whether to associate the default Elastic Disaster Recovery Security group with the Replication Configuration Template. +* `bandwidthThrottling` - (Required) Configure bandwidth throttling for the outbound data transfer rate of the Source Server in Mbps. +* `createPublicIp` - (Required) Whether to create a Public IP for the Recovery Instance by default. +* `dataPlaneRouting` - (Required) Data plane routing mechanism that will be used for replication. Valid values are `PUBLIC_IP` and `PRIVATE_IP`. +* `defaultLargeStagingDiskType` - (Required) Staging Disk EBS volume type to be used during replication. Valid values are `GP2`, `GP3`, `ST1`, or `AUTO`. +* `ebsEncryption` - (Required) Type of EBS encryption to be used during replication. Valid values are `DEFAULT` and `CUSTOM`. +* `ebsEncryptionKeyArn` - (Required) ARN of the EBS encryption key to be used during replication. +* `pitPolicy` - (Required) Configuration block for Point in time (PIT) policy to manage snapshots taken during replication. [See below](#pit_policy). +* `replicationServerInstanceType` - (Required) Instance type to be used for the replication server. +* `replicationServersSecurityGroupsIds` - (Required) Security group IDs that will be used by the replication server. +* `stagingAreaSubnetId` - (Required) Subnet to be used by the replication staging area. +* `stagingAreaTags` - (Required) Set of tags to be associated with all resources created in the replication staging area: EC2 replication server, EBS volumes, EBS snapshots, etc. +* `useDedicatedReplicationServer` - (Required) Whether to use a dedicated Replication Server in the replication staging area. + +The following arguments are optional: + +* `autoReplicateNewDisks` - (Optional) Whether to allow the AWS replication agent to automatically replicate newly added disks. +* `tags` - (Optional) Set of tags to be associated with the Replication Configuration Template resource. + +### `pitPolicy` + +The PIT policies _must_ be specified as shown in the [basic configuration example](#basic-configuration) above. The only value that you can change is the `retentionDuration` of `ruleId` 3. + +* `enabled` - (Optional) Whether this rule is enabled or not. +* `interval` - (Required) How often, in the chosen units, a snapshot should be taken. +* `retentionDuration` - (Required) Duration to retain a snapshot for, in the chosen `units`. +* `ruleId` - (Optional) ID of the rule. Valid values are integers. +* `units` - (Required) Units used to measure the `interval` and `retentionDuration`. Valid values are `MINUTE`, `HOUR`, and `DAY`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Replication configuration template ARN. +* `id` - Replication configuration template ID. +* `tagsAll` - Map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `20m`) +- `update` - (Default `20m`) +- `delete` - (Default `20m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DRS Replication Configuration Template using the `id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DrsReplicationConfigurationTemplate } from "./.gen/providers/aws/drs-replication-configuration-template"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + DrsReplicationConfigurationTemplate.generateConfigForImport( + this, + "example", + "templateid" + ); + } +} + +``` + +Using `terraform import`, import DRS Replication Configuration Template using the `id`. For example: + +```console +% terraform import aws_drs_replication_configuration_template.example templateid +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dx_gateway_association.html.markdown b/website/docs/cdktf/typescript/r/dx_gateway_association.html.markdown index 5ca36959ffc..eb34c44ca03 100644 --- a/website/docs/cdktf/typescript/r/dx_gateway_association.html.markdown +++ b/website/docs/cdktf/typescript/r/dx_gateway_association.html.markdown @@ -162,7 +162,7 @@ A full example of how to create a VPN Gateway in one AWS account, create a Direc ~> **NOTE:** If the `associatedGatewayId` is in another region, an [alias](https://developer.hashicorp.com/terraform/language/providers/configuration#alias-multiple-provider-configurations) in a new provider block for that region should be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `dxGatewayId` - (Required) The ID of the Direct Connect gateway. * `associatedGatewayId` - (Optional) The ID of the VGW or transit gateway with which to associate the Direct Connect gateway. @@ -222,4 +222,4 @@ Using `terraform import`, import Direct Connect gateway associations using `dxGa % terraform import aws_dx_gateway_association.example 345508c3-7215-4aef-9832-07c125d5bd0f/vgw-98765432 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_table.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_table.html.markdown index 98f7ecdfa5f..d9b168ef863 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_table.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_table.html.markdown @@ -80,7 +80,7 @@ class MyConvertedCode extends TerraformStack { }, ttl: { attributeName: "TimeToExist", - enabled: false, + enabled: true, }, writeCapacity: 20, }); @@ -312,8 +312,10 @@ Optional arguments: ### `ttl` -* `enabled` - (Required) Whether TTL is enabled. -* `attributeName` - (Required) Name of the table attribute to store the TTL timestamp in. +* `attributeName` - (Optional) Name of the table attribute to store the TTL timestamp in. + Required if `enabled` is `true`, must not be set otherwise. +* `enabled` - (Optional) Whether TTL is enabled. + Default value is `false`. ## Attribute Reference @@ -370,4 +372,4 @@ Using `terraform import`, import DynamoDB tables using the `name`. For example: % terraform import aws_dynamodb_table.basic-dynamodb-table GameScores ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/dynamodb_table_item.html.markdown b/website/docs/cdktf/typescript/r/dynamodb_table_item.html.markdown index 4e2511a6210..1ae1a0d4a4a 100644 --- a/website/docs/cdktf/typescript/r/dynamodb_table_item.html.markdown +++ b/website/docs/cdktf/typescript/r/dynamodb_table_item.html.markdown @@ -62,7 +62,7 @@ class MyConvertedCode extends TerraformStack { ~> **Note:** Names included in `item` are represented internally with everything but letters removed. There is the possibility of collisions if two names, once filtered, are the same. For example, the names `your-name-here` and `yournamehere` will overlap and cause an error. -This argument supports the following arguments: +This resource supports the following arguments: * `hashKey` - (Required) Hash key to use for lookups and identification of the item * `item` - (Required) JSON representation of a map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item. @@ -77,4 +77,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import DynamoDB table items. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_capacity_block_reservation.html.markdown b/website/docs/cdktf/typescript/r/ec2_capacity_block_reservation.html.markdown new file mode 100644 index 00000000000..db14920e84b --- /dev/null +++ b/website/docs/cdktf/typescript/r/ec2_capacity_block_reservation.html.markdown @@ -0,0 +1,89 @@ +--- +subcategory: "EC2 (Elastic Compute Cloud)" +layout: "aws" +page_title: "AWS: aws_ec2_capacity_block_reservation" +description: |- + Provides an EC2 Capacity Block Reservation. This allows you to purchase capacity block for your Amazon EC2 instances in a specific Availability Zone for machine learning (ML) Workloads. +--- + + + +# Resource: aws_ec2_capacity_block_reservation + +Provides an EC2 Capacity Block Reservation. This allows you to purchase capacity block for your Amazon EC2 instances in a specific Availability Zone for machine learning (ML) Workloads. + +~> **NOTE:** Once created, a reservation is valid for the `duration` of the provided `capacityBlockOfferingId` and cannot be deleted. Performing a `destroy` will only remove the resource from state. For more information see [EC2 Capacity Block Reservation Documentation](https://aws.amazon.com/ec2/instance-types/p5/) and [PurchaseReservedDBInstancesOffering](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/capacity-blocks-pricing-billing.html). + +~> **NOTE:** Due to the expense of testing this resource, we provide it as best effort. If you find it useful, and have the ability to help test or notice issues, consider reaching out to us on [GitHub](https://github.com/hashicorp/terraform-provider-aws). + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { DataAwsEc2CapacityBlockOffering } from "./.gen/providers/aws/data-aws-ec2-capacity-block-offering"; +import { Ec2CapacityBlockReservation } from "./.gen/providers/aws/ec2-capacity-block-reservation"; +interface MyConfig { + capacityDurationHours: any; +} +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string, config: MyConfig) { + super(scope, name); + new Ec2CapacityBlockReservation(this, "example", { + capacityBlockOfferingId: Token.asString(test.id), + instancePlatform: "Linux/UNIX", + tags: { + Environment: "dev", + }, + }); + const dataAwsEc2CapacityBlockOfferingExample = + new DataAwsEc2CapacityBlockOffering(this, "example_1", { + capacity_duration: 24, + end_date: "2024-05-30T15:04:05Z", + instanceCount: 1, + instance_platform: "Linux/UNIX", + instanceType: "p4d.24xlarge", + start_date: "2024-04-28T15:04:05Z", + capacityDurationHours: config.capacityDurationHours, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + dataAwsEc2CapacityBlockOfferingExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `capacityBlockOfferingId` - (Required) The Capacity Block Reservation ID. +* `instancePlatform` - (Required) The type of operating system for which to reserve capacity. Valid options are `Linux/UNIX`, `Red Hat Enterprise Linux`, `SUSE Linux`, `Windows`, `Windows with SQL Server`, `Windows with SQL Server Enterprise`, `Windows with SQL Server Standard` or `Windows with SQL Server Web`. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The ARN of the reservation. +* `availabilityZone` - The Availability Zone in which to create the Capacity Block Reservation. +* `createdDate` - The date and time at which the Capacity Block Reservation was created. +* `ebsOptimized` - Indicates whether the Capacity Reservation supports EBS-optimized instances. +* `endDate` - The date and time at which the Capacity Block Reservation expires. When a Capacity Block Reservation expires, the reserved capacity is released and you can no longer launch instances into it. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) +* `endDateType` - Indicates the way in which the Capacity Reservation ends. +* `id` - The ID of the Capacity Block Reservation. +* `instanceCount` - The number of instances for which to reserve capacity. +* `instanceType` - The instance type for which to reserve capacity. +* `outpostArn` - The ARN of the Outpost on which to create the Capacity Block Reservation. +* `placementGroupArn` - The ARN of the placement group in which to create the Capacity Block Reservation. +* `reservationType` - The type of Capacity Reservation. +* `startDate` - The date and time at which the Capacity Block Reservation starts. Valid values: [RFC3339 time string](https://tools.ietf.org/html/rfc3339#section-5.8) (`YYYY-MM-DDTHH:MM:SSZ`) +* `tenancy` - Indicates the tenancy of the Capacity Block Reservation. Specify either `default` or `dedicated`. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_capacity_reservation.html.markdown b/website/docs/cdktf/typescript/r/ec2_capacity_reservation.html.markdown index 11bb6f8df8b..ca2c5f7e466 100644 --- a/website/docs/cdktf/typescript/r/ec2_capacity_reservation.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_capacity_reservation.html.markdown @@ -64,6 +64,14 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - The ARN of the Capacity Reservation. * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `10m`) +- `update` - (Default `10m`) +- `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Capacity Reservations using the `id`. For example: @@ -96,4 +104,4 @@ Using `terraform import`, import Capacity Reservations using the `id`. For examp % terraform import aws_ec2_capacity_reservation.web cr-0123456789abcdef0 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_fleet.html.markdown b/website/docs/cdktf/typescript/r/ec2_fleet.html.markdown index d684861840f..ac97a397a8f 100644 --- a/website/docs/cdktf/typescript/r/ec2_fleet.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_fleet.html.markdown @@ -164,6 +164,7 @@ This configuration block supports the following: * `instanceGenerations` - (Optional) Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Valid values are `current` and `previous`. Default is `current` and `previous` generation instance types. * `localStorage` - (Optional) Indicate whether instance types with local storage volumes are `included`, `excluded`, or `required`. Default is `included`. * `localStorageTypes` - (Optional) List of local storage type names. Valid values are `hdd` and `ssd`. Default any storage type. +* `maxSpotPriceAsPercentageOfOptimalOnDemandPrice` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spotMaxPricePercentageOverLowestPrice` * `memoryGibPerVcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. * `min` - (Optional) The minimum amount of memory per vCPU, in GiB. To specify no minimum limit, omit this parameter. * `max` - (Optional) The maximum amount of memory per vCPU, in GiB. To specify no maximum limit, omit this parameter. @@ -181,7 +182,7 @@ This configuration block supports the following: If you set `targetCapacityUnitType` to `vcpu` or `memory-mib`, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. * `requireHibernateSupport` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -* `spotMaxPricePercentageOverLowestPrice` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +* `spotMaxPricePercentageOverLowestPrice` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `maxSpotPriceAsPercentageOfOptimalOnDemandPrice` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. @@ -195,8 +196,8 @@ This configuration block supports the following: ### on_demand_options * `allocationStrategy` - (Optional) The order of the launch template overrides to use in fulfilling On-Demand capacity. Valid values: `lowestPrice`, `prioritized`. Default: `lowestPrice`. -* `capacity_reservation_options` (Optional) The strategy for using unused Capacity Reservations for fulfilling On-Demand capacity. Supported only for fleets of type `instant`. - * `usage_strategy` - (Optional) Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity. Valid values: `use-capacity-reservations-first`. +* `capacityReservationOptions` (Optional) The strategy for using unused Capacity Reservations for fulfilling On-Demand capacity. Supported only for fleets of type `instant`. + * `usageStrategy` - (Optional) Indicates whether to use unused Capacity Reservations for fulfilling On-Demand capacity. Valid values: `use-capacity-reservations-first`. * `maxTotalPrice` - (Optional) The maximum amount per hour for On-Demand Instances that you're willing to pay. * `minTargetCapacity` - (Optional) The minimum target capacity for On-Demand Instances in the fleet. If the minimum target capacity is not reached, the fleet launches no instances. Supported only for fleets of type `instant`. If you specify `minTargetCapacity`, at least one of the following must be specified: `singleAvailabilityZone` or `singleInstanceType`. @@ -289,4 +290,4 @@ Using `terraform import`, import `aws_ec2_fleet` using the Fleet identifier. For % terraform import aws_ec2_fleet.example fleet-b9b55d27-c5fc-41ac-a6f3-48fcc91f080c ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_network_insights_path.html.markdown b/website/docs/cdktf/typescript/r/ec2_network_insights_path.html.markdown index 9b39c1511c9..f68ba42bc0b 100644 --- a/website/docs/cdktf/typescript/r/ec2_network_insights_path.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_network_insights_path.html.markdown @@ -41,7 +41,7 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: * `source` - (Required) ID or ARN of the resource which is the source of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. -* `destination` - (Required) ID or ARN of the resource which is the destination of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. +* `destination` - (Optional) ID or ARN of the resource which is the destination of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. * `protocol` - (Required) Protocol to use for analysis. Valid options are `tcp` or `udp`. The following arguments are optional: @@ -93,4 +93,4 @@ Using `terraform import`, import Network Insights Paths using the `id`. For exam % terraform import aws_ec2_network_insights_path.test nip-00edfba169923aefd ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment.html.markdown b/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment.html.markdown index 349c5e5e71d..b57a0b9dc76 100644 --- a/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/ec2_transit_gateway_peering_attachment.html.markdown @@ -78,9 +78,16 @@ This resource supports the following arguments: * `peerAccountId` - (Optional) Account ID of EC2 Transit Gateway to peer with. Defaults to the account ID the [AWS provider][1] is currently connected to. * `peerRegion` - (Required) Region of EC2 Transit Gateway to peer with. * `peerTransitGatewayId` - (Required) Identifier of EC2 Transit Gateway to peer with. +* `options` - (Optional) Describes whether dynamic routing is enabled or disabled for the transit gateway peering request. See [options](#options) below for more details! * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Peering Attachment. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `transitGatewayId` - (Required) Identifier of EC2 Transit Gateway. +### options + +The `options` block supports the following: + +* `dynamicRouting` - (Optional) Indicates whether dynamic routing is enabled or disabled.. Supports `enable` and `disable`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -122,4 +129,4 @@ Using `terraform import`, import `aws_ec2_transit_gateway_peering_attachment` us [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/eks_cluster.html.markdown b/website/docs/cdktf/typescript/r/eks_cluster.html.markdown index 46c6ef5274c..8b8b5f28a12 100644 --- a/website/docs/cdktf/typescript/r/eks_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/eks_cluster.html.markdown @@ -152,7 +152,7 @@ class MyConvertedCode extends TerraformStack { ### Enabling IAM Roles for Service Accounts -Only available on Kubernetes version 1.13 and 1.14 clusters created or upgraded on or after September 3, 2019. For more information about this feature, see the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). +For more information about this feature, see the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). ```typescript // DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug @@ -353,6 +353,7 @@ The following arguments are required: The following arguments are optional: * `accessConfig` - (Optional) Configuration block for the access config associated with your cluster, see [Amazon EKS Access Entries](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html). +* `bootstrapSelfManagedAddons` - (Optional) Install default unmanaged add-ons, such as `aws-cni`, `kube-proxy`, and CoreDNS during cluster creation. If `false`, you must manually install desired add-ons. Changing this value will force a new cluster to be created. Defaults to `true`. * `enabledClusterLogTypes` - (Optional) List of the desired control plane logging to enable. For more information, see [Amazon EKS Control Plane Logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). * `encryptionConfig` - (Optional) Configuration block with encryption configuration for the cluster. Only available on Kubernetes 1.13 and above clusters created after March 6, 2020. Detailed below. * `kubernetesNetworkConfig` - (Optional) Configuration block with kubernetes network configuration for the cluster. Detailed below. If removed, Terraform will only perform drift detection if a configuration value is provided. @@ -365,7 +366,7 @@ The following arguments are optional: The `accessConfig` configuration block supports the following arguments: * `authenticationMode` - (Optional) The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` -* `bootstrapClusterCreatorAdminPermissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `true`. +* `bootstrapClusterCreatorAdminPermissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `false`. ### encryption_config @@ -493,4 +494,4 @@ Using `terraform import`, import EKS Clusters using the `name`. For example: % terraform import aws_eks_cluster.my_cluster my_cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elastictranscoder_pipeline.html.markdown b/website/docs/cdktf/typescript/r/elastictranscoder_pipeline.html.markdown index ceae9e3d25f..0cae8025eb5 100644 --- a/website/docs/cdktf/typescript/r/elastictranscoder_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/elastictranscoder_pipeline.html.markdown @@ -48,7 +48,7 @@ class MyConvertedCode extends TerraformStack { See ["Create Pipeline"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-pipeline.html) in the AWS docs for reference. -This argument supports the following arguments: +This resource supports the following arguments: * `awsKmsKeyArn` - (Optional) The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. * `contentConfig` - (Optional) The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below) @@ -146,4 +146,4 @@ Using `terraform import`, import Elastic Transcoder pipelines using the `id`. Fo % terraform import aws_elastictranscoder_pipeline.basic_pipeline 1407981661351-cttk8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/elastictranscoder_preset.html.markdown b/website/docs/cdktf/typescript/r/elastictranscoder_preset.html.markdown index fbf32e7fb57..6427f7e48a1 100644 --- a/website/docs/cdktf/typescript/r/elastictranscoder_preset.html.markdown +++ b/website/docs/cdktf/typescript/r/elastictranscoder_preset.html.markdown @@ -92,7 +92,7 @@ class MyConvertedCode extends TerraformStack { See ["Create Preset"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-preset.html) in the AWS docs for reference. -This argument supports the following arguments: +This resource supports the following arguments: * `audio` - (Optional, Forces new resource) Audio parameters object (documented below). * `audioCodecOptions` - (Optional, Forces new resource) Codec options for the audio parameters (documented below) @@ -209,4 +209,4 @@ Using `terraform import`, import Elastic Transcoder presets using the `id`. For % terraform import aws_elastictranscoder_preset.basic_preset 1407981661351-cttk8b ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/flow_log.html.markdown b/website/docs/cdktf/typescript/r/flow_log.html.markdown index 7158717f127..e7cef4cb429 100644 --- a/website/docs/cdktf/typescript/r/flow_log.html.markdown +++ b/website/docs/cdktf/typescript/r/flow_log.html.markdown @@ -269,7 +269,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** One of `eniId`, `subnetId`, `transitGatewayId`, `transitGatewayAttachmentId`, or `vpcId` must be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `trafficType` - (Required) The type of traffic to capture. Valid values: `ACCEPT`,`REJECT`, `ALL`. * `deliverCrossAccountRole` - (Optional) ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. @@ -334,4 +334,4 @@ Using `terraform import`, import Flow Logs using the `id`. For example: % terraform import aws_flow_log.test_flow_log fl-1a2b3c4d ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fms_resource_set.html.markdown b/website/docs/cdktf/typescript/r/fms_resource_set.html.markdown new file mode 100644 index 00000000000..828dfe038b7 --- /dev/null +++ b/website/docs/cdktf/typescript/r/fms_resource_set.html.markdown @@ -0,0 +1,105 @@ +--- +subcategory: "FMS (Firewall Manager)" +layout: "aws" +page_title: "AWS: aws_fms_resource_set" +description: |- + Terraform resource for managing an AWS FMS (Firewall Manager) Resource Set. +--- + + + +# Resource: aws_fms_resource_set + +Terraform resource for managing an AWS FMS (Firewall Manager) Resource Set. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { FmsResourceSet } from "./.gen/providers/aws/fms-resource-set"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new FmsResourceSet(this, "example", { + resourceSet: [ + { + name: "testing", + resourceTypeList: ["AWS::NetworkFirewall::Firewall"], + }, + ], + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `resourceSet` - (Required) Details about the resource set to be created or updated. See [`resourceSet` Attribute Reference](#resource_set-attribute-reference) below. + +### `resourceSet` Attribute Reference + +* `name` - (Required) Descriptive name of the resource set. You can't change the name of a resource set after you create it. +* `resourceTypeList` - (Required) Determines the resources that can be associated to the resource set. Depending on your setting for max results and the number of resource sets, a single call might not return the full list. +* `description` - (Optional) Description of the resource set. +* `lastUpdateTime` - (Optional) Last time that the reosurce set was changed. +* `resourceSetStatus` - (Optional) Indicates whether the resource set is in or out of the admin's Region scope. Valid values are `ACTIVE` (Admin can manage and delete the resource set) or `OUT_OF_ADMIN_SCOPE` (Admin can view the resource set, but theyy can't edit or delete the resource set.) + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Resource Set. +* `id` - Unique identifier for the resource set. It's returned in the responses to create and list commands. You provide it to operations like update and delete. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import FMS (Firewall Manager) Resource Set using the `id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { FmsResourceSet } from "./.gen/providers/aws/fms-resource-set"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + FmsResourceSet.generateConfigForImport( + this, + "example", + "resource_set-id-12345678" + ); + } +} + +``` + +Using `terraform import`, import FMS (Firewall Manager) Resource Set using the `id`. For example: + +```console +% terraform import aws_fms_resource_set.example resource_set-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/fsx_ontap_file_system.html.markdown b/website/docs/cdktf/typescript/r/fsx_ontap_file_system.html.markdown index 3fd89f4c5f3..c57a4291aa2 100644 --- a/website/docs/cdktf/typescript/r/fsx_ontap_file_system.html.markdown +++ b/website/docs/cdktf/typescript/r/fsx_ontap_file_system.html.markdown @@ -52,11 +52,62 @@ class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); new FsxOntapFileSystem(this, "testhapairs", { - deploymentType: "SINGLE_AZ_2", + deploymentType: "SINGLE_AZ_1", + haPairs: 2, preferredSubnetId: test1.id, storageCapacity: 2048, subnetIds: [test1.id], - throughputCapacityPerHaPair: 3072, + throughputCapacityPerHaPair: 128, + }); + } +} + +``` + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { FsxOntapFileSystem } from "./.gen/providers/aws/fsx-ontap-file-system"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new FsxOntapFileSystem(this, "testsingleazgen2", { + deploymentType: "SINGLE_AZ_2", + haPairs: 4, + preferredSubnetId: test1.id, + storageCapacity: 4096, + subnetIds: [test1.id], + throughputCapacityPerHaPair: 384, + }); + } +} + +``` + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { FsxOntapFileSystem } from "./.gen/providers/aws/fsx-ontap-file-system"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new FsxOntapFileSystem(this, "testmultiazgen2", { + deploymentType: "MULTI_AZ_2", + haPairs: 1, + preferredSubnetId: test1.id, + storageCapacity: 1024, + subnetIds: [test1.id, test2.id], + throughputCapacityPerHaPair: 384, }); } } @@ -67,24 +118,24 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `storageCapacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `1024` and `196608` for file systems with deployment_type `SINGLE_AZ_1` and `MULTI_AZ_1`. Valid values between `2048` (`1024` per ha pair) and `1048576` for file systems with deployment_type `SINGLE_AZ_2`. +* `storageCapacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `1024` and `196608` for file systems with deployment_type `SINGLE_AZ_1` and `MULTI_AZ_1`. Valid values are between `1024` and `524288` for `MULTI_AZ_2`. Valid values between `1024` (`1024` per ha pair) and `1048576` for file systems with deployment_type `SINGLE_AZ_2`. For `SINGLE_AZ_2`, the `1048576` (1PB) maximum is only supported when using 2 or more ha_pairs, the maximum is `524288` (512TB) when using 1 ha_pair. * `subnetIds` - (Required) A list of IDs for the subnets that the file system will be accessible from. Up to 2 subnets can be provided. * `preferredSubnetId` - (Required) The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). * `securityGroupIds` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. * `weeklyMaintenanceStartTime` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. -* `deploymentType` - (Optional) - The filesystem deployment type. Supports `MULTI_AZ_1`, `SINGLE_AZ_1`, and `SINGLE_AZ_2`. +* `deploymentType` - (Optional) - The filesystem deployment type. Supports `MULTI_AZ_1`, `MULTI_AZ_2`, `SINGLE_AZ_1`, and `SINGLE_AZ_2`. * `kmsKeyId` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `automaticBackupRetentionDays` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `dailyAutomaticBackupStartTime` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automaticBackupRetentionDays` to be set. * `diskIopsConfiguration` - (Optional) The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See [Disk Iops Configuration](#disk-iops-configuration) below. * `endpointIpAddressRange` - (Optional) Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range. -* `haPairs` - (Optional) - The number of ha_pairs to deploy for the file system. Valid values are 1 through 12. Value of 2 or greater required for `SINGLE_AZ_2`. Only value of 1 is supported with `SINGLE_AZ_1` or `MULTI_AZ_1` but not required. +* `haPairs` - (Optional) - The number of ha_pairs to deploy for the file system. Valid value is 1 for `SINGLE_AZ_1` or `MULTI_AZ_1` and `MULTI_AZ_2`. Valid values are 1 through 12 for `SINGLE_AZ_2`. * `storageType` - (Optional) - The filesystem storage type. defaults to `SSD`. * `fsxAdminPassword` - (Optional) The ONTAP administrative password for the fsxadmin user that you can use to administer your file system using the ONTAP CLI and REST API. * `routeTableIds` - (Optional) Specifies the VPC route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `throughputCapacity` - (Optional) Sets the throughput capacity (in MBps) for the file system that you're creating. Valid values are `128`, `256`, `512`, `1024`, `2048`, and `4096`. This parameter is only supported when not using the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. -* `throughputCapacityPerHaPair` - (Optional) Sets the throughput capacity (in MBps) for the file system that you're creating. Valid value when using 1 ha_pair are `128`, `256`, `512`, `1024`, `2048`, and `4096`. Valid values when using 2 or more ha_pairs are `3072`,`6144`. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. +* `throughputCapacityPerHaPair` - (Optional) Sets the per-HA-pair throughput capacity (in MBps) for the file system that you're creating, as opposed to `throughputCapacity` which specifies the total throughput capacity for the file system. Valid value for `MULTI_AZ_1` and `SINGLE_AZ_1` are `128`, `256`, `512`, `1024`, `2048`, and `4096`. Valid values for deployment type `MULTI_AZ_2` and `SINGLE_AZ_2` are `384`,`768`,`1536`,`3072`,`6144` where `haPairs` is `1`. Valid values for deployment type `SINGLE_AZ_2` are `1536`, `3072`, and `6144` where `haPairs` is greater than 1. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. ### Disk Iops Configuration @@ -96,7 +147,9 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name of the file system. -* `dnsName` - DNS name for the file system, e.g., `fs-12345678.fsx.us-west-2.amazonaws.com` +* `dnsName` - DNS name for the file system. + + **Note:** This attribute does not apply to FSx for ONTAP file systems and is consequently not set. You can access your FSx for ONTAP file system and volumes via a [Storage Virtual Machine (SVM)](fsx_ontap_storage_virtual_machine.html) using its DNS name or IP address. * `endpoints` - The endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. See [Endpoints](#endpoints) below. * `id` - Identifier of the file system, e.g., `fs-12345678` * `networkInterfaceIds` - Set of Elastic Network Interface identifiers from which the file system is accessible The first network interface returned is the primary network interface. @@ -189,4 +242,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/globalaccelerator_cross_account_attachment.html.markdown b/website/docs/cdktf/typescript/r/globalaccelerator_cross_account_attachment.html.markdown index 2e0a5324731..ecfe4bb9507 100644 --- a/website/docs/cdktf/typescript/r/globalaccelerator_cross_account_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/globalaccelerator_cross_account_attachment.html.markdown @@ -76,6 +76,7 @@ The following arguments are optional: * `principals` - (Optional) List of AWS account IDs that are allowed to associate resources with the accelerator. * `resource` - (Optional) List of resources to be associated with the accelerator. + * `cidrBlock` - (Optional) IP address range, in CIDR format, that is specified as resource. * `endpointId` - (Optional) The endpoint ID for the endpoint that is specified as a AWS resource. * `region` - (Optional) The AWS Region where a shared endpoint resource is located. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. @@ -130,4 +131,4 @@ Using `terraform import`, import Global Accelerator Cross Account Attachment usi % terraform import aws_globalaccelerator_cross_account_attachment.example arn:aws:globalaccelerator::012345678910:attachment/01234567-abcd-8910-efgh-123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_catalog_table.html.markdown b/website/docs/cdktf/typescript/r/glue_catalog_table.html.markdown index 828c8cb0964..d1a3bb58c53 100644 --- a/website/docs/cdktf/typescript/r/glue_catalog_table.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_catalog_table.html.markdown @@ -160,6 +160,7 @@ To add an index to an existing table, see the [`glue_partition_index` resource]( ### storage_descriptor +* `additional_locations` - (Optional) List of locations that point to the path where a Delta table is located. * `bucketColumns` - (Optional) List of reducer grouping columns, clustering columns, and bucketing columns in the table. * `columns` - (Optional) Configuration block for columns in the table. See [`columns`](#columns) below. * `compressed` - (Optional) Whether the data in the table is compressed. @@ -256,4 +257,4 @@ Using `terraform import`, import Glue Tables using the catalog ID (usually AWS a % terraform import aws_glue_catalog_table.MyTable 123456789012:MyDatabase:MyTable ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_connection.html.markdown b/website/docs/cdktf/typescript/r/glue_connection.html.markdown index 002c6066df6..8a9b9da62b8 100644 --- a/website/docs/cdktf/typescript/r/glue_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_connection.html.markdown @@ -51,12 +51,12 @@ import { Token, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsSecretmanagerSecret } from "./.gen/providers/aws/"; +import { DataAwsSecretsmanagerSecret } from "./.gen/providers/aws/data-aws-secretsmanager-secret"; import { GlueConnection } from "./.gen/providers/aws/glue-connection"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - const example = new DataAwsSecretmanagerSecret(this, "example", { + const example = new DataAwsSecretsmanagerSecret(this, "example", { name: "example-secret", }); const awsGlueConnectionExample = new GlueConnection(this, "example_1", { @@ -120,7 +120,7 @@ import { Token, TerraformStack } from "cdktf"; * Provider bindings are generated by running `cdktf get`. * See https://cdk.tf/provider-generation for more details. */ -import { DataAwsSecretmanagerSecret } from "./.gen/providers/aws/"; +import { DataAwsSecretsmanagerSecret } from "./.gen/providers/aws/data-aws-secretsmanager-secret"; import { GlueConnection } from "./.gen/providers/aws/glue-connection"; class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { @@ -137,7 +137,7 @@ class MyConvertedCode extends TerraformStack { matchCriteria: ["template-connection"], name: "example_connector", }); - const example = new DataAwsSecretmanagerSecret(this, "example", { + const example = new DataAwsSecretsmanagerSecret(this, "example", { name: "example-secret", }); new GlueConnection(this, "example_connection", { @@ -217,4 +217,4 @@ Using `terraform import`, import Glue Connections using the `CATALOG-ID` (AWS ac % terraform import aws_glue_connection.MyConnection 123456789012:MyConnection ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_crawler.html.markdown b/website/docs/cdktf/typescript/r/glue_crawler.html.markdown index fcfd5c27f6c..57898d8cf4b 100644 --- a/website/docs/cdktf/typescript/r/glue_crawler.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_crawler.html.markdown @@ -215,7 +215,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** Must specify at least one of `dynamodbTarget`, `jdbcTarget`, `s3Target`, `mongodbTarget` or `catalogTarget`. -This argument supports the following arguments: +This resource supports the following arguments: * `databaseName` (Required) Glue database where results are written. * `name` (Required) Name of the crawler. @@ -355,4 +355,4 @@ Using `terraform import`, import Glue Crawlers using `name`. For example: % terraform import aws_glue_crawler.MyJob MyJob ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/glue_job.html.markdown b/website/docs/cdktf/typescript/r/glue_job.html.markdown index 821fdd842f9..b6258d4a1a3 100644 --- a/website/docs/cdktf/typescript/r/glue_job.html.markdown +++ b/website/docs/cdktf/typescript/r/glue_job.html.markdown @@ -185,6 +185,7 @@ This resource supports the following arguments: * `executionProperty` – (Optional) Execution property of the job. Defined below. * `glueVersion` - (Optional) The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). * `executionClass` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. +* `maintenanceWindow` – (Optional) Specifies the day of the week and hour for the maintenance window for streaming jobs. * `maxCapacity` – (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. `Required` when `pythonshell` is set, accept either `0.0625` or `1.0`. Use `numberOfWorkers` and `workerType` arguments instead with `glueVersion` `2.0` and above. * `maxRetries` – (Optional) The maximum number of times to retry this job if it fails. * `name` – (Required) The name you assign to this job. It must be unique in your account. @@ -254,4 +255,4 @@ Using `terraform import`, import Glue Jobs using `name`. For example: % terraform import aws_glue_job.MyJob MyJob ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_workspace_service_account.html.markdown b/website/docs/cdktf/typescript/r/grafana_workspace_service_account.html.markdown new file mode 100644 index 00000000000..a97e9f08a78 --- /dev/null +++ b/website/docs/cdktf/typescript/r/grafana_workspace_service_account.html.markdown @@ -0,0 +1,90 @@ +--- +subcategory: "Managed Grafana" +layout: "aws" +page_title: "AWS: aws_grafana_workspace_service_account" +description: |- + Terraform resource for managing an Amazon Managed Grafana Workspace Service Account. +--- + + + +# Resource: aws_grafana_workspace_service_account + +-> **Note:** You cannot update a service account. If you change any attribute, Terraform +will delete the current and create a new one. + +Read about Service Accounts in the [Amazon Managed Grafana user guide](https://docs.aws.amazon.com/grafana/latest/userguide/service-accounts.html). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { GrafanaWorkspaceServiceAccount } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new GrafanaWorkspaceServiceAccount(this, "example", { + grafana_role: "ADMIN", + name: "example-admin", + workspace_id: awsGrafanaWorkspaceExample.id, + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the service account. The name must be unique within the workspace, as it determines the ID associated with the service account. +* `grafana_role` - (Required) The permission level to use for this service account. For more information about the roles and the permissions each has, see the [User roles](https://docs.aws.amazon.com/grafana/latest/userguide/Grafana-user-roles.html) documentation. +* `workspaceId` - (Required) The Grafana workspace with which the service account is associated. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `service_account_id` - Identifier of the service account in the given Grafana workspace + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Managed Grafana Workspace Service Account using the `workspaceId` and `service_account_id` separated by a comma (`,`). For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { GrafanaWorkspaceServiceAccount } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + GrafanaWorkspaceServiceAccount.generateConfigForImport( + this, + "example", + "g-abc12345,1" + ); + } +} + +``` + +Using `terraform import`, import Managed Grafana Workspace Service Account using the `workspaceId` and `service_account_id` separated by a comma (`,`). For example: + +```console +% terraform import aws_grafana_workspace_service_account.example g-abc12345,1 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/grafana_workspace_service_account_token.html.markdown b/website/docs/cdktf/typescript/r/grafana_workspace_service_account_token.html.markdown new file mode 100644 index 00000000000..0126f00b9f2 --- /dev/null +++ b/website/docs/cdktf/typescript/r/grafana_workspace_service_account_token.html.markdown @@ -0,0 +1,74 @@ +--- +subcategory: "Managed Grafana" +layout: "aws" +page_title: "AWS: aws_grafana_workspace_service_account_token" +description: |- + Terraform resource for managing an Amazon Managed Grafana Workspace Service Account Token. +--- + + + +# Resource: aws_grafana_workspace_service_account_token + +-> **Note:** You cannot update a service account token. If you change any attribute, Terraform +will delete the current and create a new one. + +Read about Service Accounts Tokens in the [Amazon Managed Grafana user guide](https://docs.aws.amazon.com/grafana/latest/userguide/service-accounts.html#service-account-tokens). + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { + GrafanaWorkspaceServiceAccount, + GrafanaWorkspaceServiceAccountToken, +} from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new GrafanaWorkspaceServiceAccount(this, "example", { + grafana_role: "ADMIN", + name: "example-admin", + workspace_id: awsGrafanaWorkspaceExample.id, + }); + const awsGrafanaWorkspaceServiceAccountTokenExample = + new GrafanaWorkspaceServiceAccountToken(this, "example_1", { + name: "example-key", + seconds_to_live: 3600, + service_account_id: example.serviceAccountId, + workspace_id: awsGrafanaWorkspaceExample.id, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsGrafanaWorkspaceServiceAccountTokenExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the token to create. The name must be unique within the workspace. +* `secondsToLive` - (Required) Sets how long the token will be valid, in seconds. You can set the time up to 30 days in the future. +* `service_account_id` - (Required) The ID of the service account for which to create a token. +* `workspaceId` - (Required) The Grafana workspace with which the service account token is associated. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `service_account_token_id` - Identifier of the service account token in the given Grafana workspace. +* `createdAt` - Specifies when the service account token was created. +* `expiresAt` - Specifies when the service account token will expire. +* `key` - The key for the service account token. Used when making calls to the Grafana HTTP APIs to authenticate and authorize the requests. + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_detector_feature.html.markdown b/website/docs/cdktf/typescript/r/guardduty_detector_feature.html.markdown index a5ba692545e..e35bc1182cd 100644 --- a/website/docs/cdktf/typescript/r/guardduty_detector_feature.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_detector_feature.html.markdown @@ -53,19 +53,19 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: * `detectorId` - (Required) Amazon GuardDuty detector ID. -* `name` - (Required) The name of the detector feature. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. +* `name` - (Required) The name of the detector feature. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. Only one of two features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING` can be added, adding both features will cause an error. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. * `status` - (Required) The status of the detector feature. Valid values: `ENABLED`, `DISABLED`. -* `additionalConfiguration` - (Optional) Additional feature configuration block. See [below](#additional-configuration). +* `additionalConfiguration` - (Optional) Additional feature configuration block for features`EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING`. See [below](#additional-configuration). ### Additional Configuration The `additionalConfiguration` block supports the following: -* `name` - (Required) The name of the additional configuration. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorAdditionalConfiguration.html) for the current list of supported values. +* `name` - (Required) The name of the additional configuration for a feature. Valid values: `EKS_ADDON_MANAGEMENT`, `ECS_FARGATE_AGENT_MANAGEMENT`, `EC2_AGENT_MANAGEMENT`. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorAdditionalConfiguration.html) for the current list of supported values. * `status` - (Required) The status of the additional configuration. Valid values: `ENABLED`, `DISABLED`. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_malware_protection_plan.html.markdown b/website/docs/cdktf/typescript/r/guardduty_malware_protection_plan.html.markdown new file mode 100644 index 00000000000..9ab2cf70c5e --- /dev/null +++ b/website/docs/cdktf/typescript/r/guardduty_malware_protection_plan.html.markdown @@ -0,0 +1,126 @@ +--- +subcategory: "GuardDuty" +layout: "aws" +page_title: "AWS: aws_guardduty_malware_protection_plan" +description: |- + Provides a resource to manage a GuardDuty Malware Protection Plan +--- + + + +# Resource: aws_guardduty_malware_protection_plan + +Provides a resource to manage a GuardDuty malware protection plan. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { GuarddutyMalwareProtectionPlan } from "./.gen/providers/aws/guardduty-malware-protection-plan"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new GuarddutyMalwareProtectionPlan(this, "example", { + actions: [ + { + tagging: [ + { + status: "ENABLED", + }, + ], + }, + ], + protectedResource: [ + { + s3Bucket: [ + { + bucketName: Token.asString(awsS3BucketExample.id), + objectPrefixes: ["example1", "example2"], + }, + ], + }, + ], + role: Token.asString(awsIamRoleExample.arn), + tags: { + Name: "example", + }, + }); + } +} + +``` + +## Argument Reference + +This resource supports the following arguments: + +* `actions` - (Optional) Information about whether the tags will be added to the S3 object after scanning. See [`actions`](#actions-argument-reference) below. +* `protectedResource` - (Required) Information about the protected resource that is associated with the created Malware Protection plan. Presently, S3Bucket is the only supported protected resource. See [`protectedResource`](#protected_resource-argument-reference) below. +* `role` - (Required) The IAM role that includes the permissions required to scan and add tags to the associated protected resource. +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### `actions` argument reference + +* `tagging` - (Required) Indicates whether the scanned S3 object will have tags about the scan result. See [`tagging`](#tagging-argument-reference) below. + +#### `tagging` argument reference + +* `status` - (Required) Indicates whether or not the tags will added. Valid values are `DISABLED` and `ENABLED`. Defaults to `DISABLED` + +### `protectedResource` argument reference + +* `s3Bucket` - (Required) Information about the protected S3 bucket resource. See [`s3Bucket`](#s3_bucket-argument-reference) below. + +#### `s3Bucket` argument reference + +* `bucketName` - (Required, Forces new resource) Name of the S3 bucket. +* `objectPrefixes` - (Optional) The list of object prefixes that specify the S3 objects that will be scanned. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - The ARN of the GuardDuty malware protection plan +* `createdAt` - The timestamp when the Malware Protection plan resource was created. +* `id` - The ID of the GuardDuty malware protection plan +* `status` - The GuardDuty malware protection plan status. Valid values are `ACTIVE`, `WARNING`, and `ERROR`. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import GuardDuty malware protection plans using their IDs. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { GuarddutyMalwareProtectionPlan } from "./.gen/providers/aws/guardduty-malware-protection-plan"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + GuarddutyMalwareProtectionPlan.generateConfigForImport( + this, + "example", + "1234567890abcdef0123" + ); + } +} + +``` + +Using `terraform import`, import GuardDuty malware protection plans using their IDs. For example: + +```console +% terraform import aws_guardduty_malware_protection_plan.example 1234567890abcdef0123 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_organization_configuration.html.markdown b/website/docs/cdktf/typescript/r/guardduty_organization_configuration.html.markdown index d92ff90c4ed..c2baab3209c 100644 --- a/website/docs/cdktf/typescript/r/guardduty_organization_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_organization_configuration.html.markdown @@ -65,7 +65,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** One of `autoEnable` or `autoEnableOrganizationMembers` must be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `autoEnable` - (Optional) *Deprecated:* Use `autoEnableOrganizationMembers` instead. When this setting is enabled, all new accounts that are created in, or added to, the organization are added as a member accounts of the organization’s GuardDuty delegated administrator and GuardDuty is enabled in that AWS Region. * `autoEnableOrganizationMembers` - (Optional) Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. Valid values are `ALL`, `NEW`, `NONE`. @@ -157,4 +157,4 @@ Using `terraform import`, import GuardDuty Organization Configurations using the % terraform import aws_guardduty_organization_configuration.example 00b00fd5aecc0ab60a708659477e9617 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/guardduty_organization_configuration_feature.html.markdown b/website/docs/cdktf/typescript/r/guardduty_organization_configuration_feature.html.markdown index ce8b6668cf0..8e35bed5ed3 100644 --- a/website/docs/cdktf/typescript/r/guardduty_organization_configuration_feature.html.markdown +++ b/website/docs/cdktf/typescript/r/guardduty_organization_configuration_feature.html.markdown @@ -58,18 +58,18 @@ This resource supports the following arguments: * `autoEnable` - (Required) The status of the feature that is configured for the member accounts within the organization. Valid values: `NEW`, `ALL`, `NONE`. * `detectorId` - (Required) The ID of the detector that configures the delegated administrator. -* `name` - (Required) The name of the feature that will be configured for the organization. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. -* `additionalConfiguration` - (Optional) The additional information that will be configured for the organization See [below](#additional-configuration). +* `name` - (Required) The name of the feature that will be configured for the organization. Valid values: `S3_DATA_EVENTS`, `EKS_AUDIT_LOGS`, `EBS_MALWARE_PROTECTION`, `RDS_LOGIN_EVENTS`, `EKS_RUNTIME_MONITORING`, `LAMBDA_NETWORK_LOGS`, `RUNTIME_MONITORING`. Only one of two features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING` can be added, adding both features will cause an error. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorFeatureConfiguration.html) for the current list of supported values. +* `additionalConfiguration` - (Optional) Additional feature configuration block for features `EKS_RUNTIME_MONITORING` or `RUNTIME_MONITORING`. See [below](#additional-configuration). ### Additional Configuration The `additionalConfiguration` block supports the following: * `autoEnable` - (Required) The status of the additional configuration that will be configured for the organization. Valid values: `NEW`, `ALL`, `NONE`. -* `name` - (Required) The name of the additional configuration that will be configured for the organization. Valid values: `EKS_ADDON_MANAGEMENT`, `ECS_FARGATE_AGENT_MANAGEMENT`, `EC2_AGENT_MANAGEMENT`. +* `name` - (Required) The name of the additional configuration for a feature that will be configured for the organization. Valid values: `EKS_ADDON_MANAGEMENT`, `ECS_FARGATE_AGENT_MANAGEMENT`, `EC2_AGENT_MANAGEMENT`. Refer to the [AWS Documentation](https://docs.aws.amazon.com/guardduty/latest/APIReference/API_DetectorAdditionalConfiguration.html) for the current list of supported values. ## Attribute Reference This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iam_server_certificate.html.markdown b/website/docs/cdktf/typescript/r/iam_server_certificate.html.markdown index ee19884521a..ab2164ab4c8 100644 --- a/website/docs/cdktf/typescript/r/iam_server_certificate.html.markdown +++ b/website/docs/cdktf/typescript/r/iam_server_certificate.html.markdown @@ -158,6 +158,12 @@ This resource exports the following attributes in addition to the arguments abov * `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `uploadDate` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) when the server certificate was uploaded. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `delete` - (Default `15m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Server Certificates using the `name`. For example: @@ -194,4 +200,4 @@ Using `terraform import`, import IAM Server Certificates using the `name`. For e [2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingServerCerts.html [lifecycle]: /docs/configuration/resources.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/imagebuilder_image_pipeline.html.markdown b/website/docs/cdktf/typescript/r/imagebuilder_image_pipeline.html.markdown index d3313b054ae..2b328d40286 100644 --- a/website/docs/cdktf/typescript/r/imagebuilder_image_pipeline.html.markdown +++ b/website/docs/cdktf/typescript/r/imagebuilder_image_pipeline.html.markdown @@ -54,11 +54,13 @@ The following arguments are optional: * `description` - (Optional) Description of the image pipeline. * `distributionConfigurationArn` - (Optional) Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. * `enhancedImageMetadataEnabled` - (Optional) Whether additional information about the image being created is collected. Defaults to `true`. +* `executionRole` - (Optional) Amazon Resource Name (ARN) of the service-linked role to be used by Image Builder to [execute workflows](https://docs.aws.amazon.com/imagebuilder/latest/userguide/manage-image-workflows.html). * `imageRecipeArn` - (Optional) Amazon Resource Name (ARN) of the image recipe. * `imageScanningConfiguration` - (Optional) Configuration block with image scanning configuration. Detailed below. * `imageTestsConfiguration` - (Optional) Configuration block with image tests configuration. Detailed below. * `schedule` - (Optional) Configuration block with schedule settings. Detailed below. * `status` - (Optional) Status of the image pipeline. Valid values are `DISABLED` and `ENABLED`. Defaults to `ENABLED`. +* `workflow` - (Optional) Configuration block with the workflow configuration. Detailed below. * `tags` - (Optional) Key-value map of resource tags for the image pipeline. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### image_scanning_configuration @@ -94,6 +96,25 @@ The following arguments are optional: * `timezone` - (Optional) The timezone that applies to the scheduling expression. For example, "Etc/UTC", "America/Los_Angeles" in the [IANA timezone format](https://www.joda.org/joda-time/timezones.html). If not specified this defaults to UTC. +### workflow + +The following arguments are required: + +* `workflowArn` - (Required) Amazon Resource Name (ARN) of the Image Builder Workflow. + +The following arguments are optional: + +* `onFailure` - (Optional) The action to take if the workflow fails. Must be one of `CONTINUE` or `ABORT`. +* `parallelGroup` - (Optional) The parallel group in which to run a test Workflow. +* `parameter` - (Optional) Configuration block for the workflow parameters. Detailed below. + +### parameter + +The following arguments are required: + +* `name` - (Required) The name of the Workflow parameter. +* `value` - (Required) The value of the Workflow parameter. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -138,4 +159,4 @@ Using `terraform import`, import `aws_imagebuilder_image_pipeline` resources usi % terraform import aws_imagebuilder_image_pipeline.example arn:aws:imagebuilder:us-east-1:123456789012:image-pipeline/example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_authorizer.html.markdown b/website/docs/cdktf/typescript/r/iot_authorizer.html.markdown index 50d4f7b3525..b2151b3a701 100644 --- a/website/docs/cdktf/typescript/r/iot_authorizer.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_authorizer.html.markdown @@ -31,6 +31,9 @@ class MyConvertedCode extends TerraformStack { name: "example", signingDisabled: false, status: "ACTIVE", + tags: { + Name: "example", + }, tokenKeyName: "Token-Header", tokenSigningPublicKeys: { Key1: Token.asString( @@ -50,6 +53,7 @@ class MyConvertedCode extends TerraformStack { * `name` - (Required) The name of the authorizer. * `signingDisabled` - (Optional) Specifies whether AWS IoT validates the token signature in an authorization request. Default: `false`. * `status` - (Optional) The status of Authorizer request at creation. Valid values: `ACTIVE`, `INACTIVE`. Default: `ACTIVE`. +* `tags` - (Optional) Map of tags to assign to this resource. If configured with a provider [`defaultTags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `tokenKeyName` - (Optional) The name of the token key used to extract the token from the HTTP headers. This value is required if signing is enabled in your authorizer. * `tokenSigningPublicKeys` - (Optional) The public keys used to verify the digital signature returned by your custom authentication service. This value is required if signing is enabled in your authorizer. @@ -58,6 +62,7 @@ class MyConvertedCode extends TerraformStack { This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the authorizer. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). ## Import @@ -87,4 +92,4 @@ Using `terraform import`, import IOT Authorizers using the name. For example: % terraform import aws_iot_authorizer.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/iot_topic_rule.html.markdown b/website/docs/cdktf/typescript/r/iot_topic_rule.html.markdown index b2ecbd66f0b..d602fadb24f 100644 --- a/website/docs/cdktf/typescript/r/iot_topic_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/iot_topic_rule.html.markdown @@ -115,6 +115,7 @@ The `cloudwatchAlarm` object takes the following arguments: The `cloudwatchLogs` object takes the following arguments: +* `batchMode` - (Optional) The payload that contains a JSON array of records will be sent to CloudWatch via a batch call. * `logGroupName` - (Required) The CloudWatch log group name. * `roleArn` - (Required) The IAM role ARN that allows access to the CloudWatch alarm. @@ -285,4 +286,4 @@ Using `terraform import`, import IoT Topic Rules using the `name`. For example: % terraform import aws_iot_topic_rule.rule ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application.html.markdown b/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application.html.markdown index 55b986daf77..53697a36e71 100644 --- a/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application.html.markdown +++ b/website/docs/cdktf/typescript/r/kinesisanalyticsv2_application.html.markdown @@ -301,7 +301,7 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: * `name` - (Required) The name of the application. -* `runtimeEnvironment` - (Required) The runtime environment for the application. Valid values: `SQL-1_0`, `FLINK-1_6`, `FLINK-1_8`, `FLINK-1_11`, `FLINK-1_13`, `FLINK-1_15`, `FLINK-1_18`. +* `runtimeEnvironment` - (Required) The runtime environment for the application. Valid values: `SQL-1_0`, `FLINK-1_6`, `FLINK-1_8`, `FLINK-1_11`, `FLINK-1_13`, `FLINK-1_15`, `FLINK-1_18`, `FLINK-1_19`. * `serviceExecutionRole` - (Required) The ARN of the [IAM role](/docs/providers/aws/r/iam_role.html) used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources. * `applicationConfiguration` - (Optional) The application's configuration * `applicationMode` - (Optional) The application's mode. Valid values are `STREAMING`, `INTERACTIVE`. @@ -569,4 +569,4 @@ Using `terraform import`, import `aws_kinesisanalyticsv2_application` using the % terraform import aws_kinesisanalyticsv2_application.example arn:aws:kinesisanalytics:us-west-2:123456789012:application/example-sql-application ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lakeformation_data_lake_settings.html.markdown b/website/docs/cdktf/typescript/r/lakeformation_data_lake_settings.html.markdown index 61a0561920f..c354ebccab6 100644 --- a/website/docs/cdktf/typescript/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/cdktf/typescript/r/lakeformation_data_lake_settings.html.markdown @@ -89,6 +89,7 @@ class MyConvertedCode extends TerraformStack { new LakeformationDataLakeSettings(this, "example", { admins: [test.arn, Token.asString(awsIamRoleTest.arn)], allowExternalDataFiltering: true, + allow_full_table_external_data_access: true, authorizedSessionTagValueList: ["Amazon EMR"], createDatabaseDefaultPermissions: [ { @@ -125,6 +126,7 @@ The following arguments are optional: * `allowExternalDataFiltering` - (Optional) Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `externalDataFilteringAllowList` - (Optional) A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `authorizedSessionTagValueList` - (Optional) Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. +* `allow_full_table_external_data_access` - (Optional) Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. ~> **NOTE:** Although optional, not including `admins`, `createDatabaseDefaultPermissions`, `createTableDefaultPermissions`, and/or `trustedResourceOwners` results in the setting being cleared. @@ -146,4 +148,4 @@ The following arguments are optional: This resource exports no additional attributes. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lambda_event_source_mapping.html.markdown b/website/docs/cdktf/typescript/r/lambda_event_source_mapping.html.markdown index 9656a0e0004..65da9c34a2d 100644 --- a/website/docs/cdktf/typescript/r/lambda_event_source_mapping.html.markdown +++ b/website/docs/cdktf/typescript/r/lambda_event_source_mapping.html.markdown @@ -320,7 +320,7 @@ class MyConvertedCode extends TerraformStack { ### scaling_config Configuration Block -* `maximumConcurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between `2` and `1000`. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). +* `maximumConcurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to `2`. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase the concurrency beyond 1000. ### self_managed_event_source Configuration Block @@ -381,4 +381,4 @@ Using `terraform import`, import Lambda event source mappings using the `UUID` ( % terraform import aws_lambda_event_source_mapping.event_source_mapping 12345kxodurf3443 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/launch_template.html.markdown b/website/docs/cdktf/typescript/r/launch_template.html.markdown index 5ff878a8102..ca46124b2c5 100644 --- a/website/docs/cdktf/typescript/r/launch_template.html.markdown +++ b/website/docs/cdktf/typescript/r/launch_template.html.markdown @@ -131,7 +131,7 @@ This resource supports the following arguments: * `hibernationOptions` - (Optional) The hibernation options for the instance. See [Hibernation Options](#hibernation-options) below for more details. * `iamInstanceProfile` - (Optional) The IAM Instance Profile to launch the instance with. See [Instance Profile](#instance-profile) below for more details. -* `imageId` - (Optional) The AMI from which to launch the instance. +* `imageId` - (Optional) The AMI from which to launch the instance or use a Systems Manager parameter convention e.g. `resolve:ssm:parameter-name`. See [docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/create-launch-template.html#use-an-ssm-parameter-instead-of-an-ami-id) for more details. * `instanceInitiatedShutdownBehavior` - (Optional) Shutdown behavior for the instance. Can be `stop` or `terminate`. (Default: `stop`). * `instanceMarketOptions` - (Optional) The market (purchasing) option for the instance. See [Market Options](#market-options) @@ -233,7 +233,7 @@ Attach an elastic GPU the instance. The `elasticGpuSpecifications` block supports the following: -* `type` - The [Elastic GPU Type](https://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/elastic-gpus.html#elastic-gpus-basics) +* `type` - The [Elastic GPU Type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-graphics.html#elastic-graphics-basics) ### Elastic Inference Accelerator @@ -352,6 +352,7 @@ This configuration block supports the following: * ssd - solid state drive ``` +* `maxSpotPriceAsPercentageOfOptimalOnDemandPrice` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spotMaxPricePercentageOverLowestPrice` * `memoryGibPerVcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. * `min` - (Optional) Minimum. May be a decimal number, e.g. `0.5`. * `max` - (Optional) Maximum. May be a decimal number, e.g. `0.5`. @@ -368,7 +369,7 @@ This configuration block supports the following: If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. * `requireHibernateSupport` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -* `spotMaxPricePercentageOverLowestPrice` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +* `spotMaxPricePercentageOverLowestPrice` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `maxSpotPriceAsPercentageOfOptimalOnDemandPrice` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. * `totalLocalStorageGb` - (Optional) Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. @@ -530,4 +531,4 @@ Using `terraform import`, import Launch Templates using the `id`. For example: % terraform import aws_launch_template.web lt-12345678 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lb_target_group.html.markdown b/website/docs/cdktf/typescript/r/lb_target_group.html.markdown index bc77aeba500..e0f36008a9e 100644 --- a/website/docs/cdktf/typescript/r/lb_target_group.html.markdown +++ b/website/docs/cdktf/typescript/r/lb_target_group.html.markdown @@ -154,6 +154,47 @@ class MyConvertedCode extends TerraformStack { ``` +### Target group with health requirements + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { LbTargetGroup } from "./.gen/providers/aws/lb-target-group"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new LbTargetGroup(this, "tcp-example", { + name: "tf-example-lb-nlb-tg", + port: 80, + protocol: "TCP", + target_group_health: [ + { + dns_failover: [ + { + minimum_healthy_targets_count: "1", + minimum_healthy_targets_percentage: "off", + }, + ], + unhealthy_state_routing: [ + { + minimum_healthy_targets_count: "1", + minimum_healthy_targets_percentage: "off", + }, + ], + }, + ], + vpcId: main.id, + }); + } +} + +``` + ## Argument Reference This resource supports the following arguments: @@ -180,6 +221,7 @@ This resource supports the following arguments: * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `targetFailover` - (Optional) Target failover block. Only applicable for Gateway Load Balancer target groups. See [target_failover](#target_failover) for more information. * `targetHealthState` - (Optional) Target health state block. Only applicable for Network Load Balancer target groups when `protocol` is `TCP` or `TLS`. See [target_health_state](#target_health_state) for more information. +* `target_group_health` - (Optional) Target health requirements block. See [target_group_health](#target_group_health) for more information. * `targetType` - (Optional, Forces new resource) Type of target that you must specify when registering targets with this target group. See [doc](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateTargetGroup.html) for supported values. The default is `instance`. @@ -244,6 +286,29 @@ This resource supports the following arguments: * `enableUnhealthyConnectionTermination` - (Optional) Indicates whether the load balancer terminates connections to unhealthy targets. Possible values are `true` or `false`. Default: `true`. +### target_group_health + +~> **NOTE:** This block is only supported by Application Load Balancers and Network Load Balancers. + +The `target_group_health` block supports the following: + +* `dns_failover` - (Optional) Block to configure DNS Failover requirements. See [DNS Failover](#dns_failover) below for details on attributes. +* `unhealthy_state_routing` - (Optional) Block to configure Unhealthy State Routing requirements. See [Unhealthy State Routing](#unhealthy_state_routing) below for details on attributes. + +### dns_failover + +The `dns_failover` block supports the following: + +* `minimum_healthy_targets_count` - (Optional) The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from `1` to the maximum number of targets. The default is `off`. +* `minimum_healthy_targets_percentage` - (Optional) The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from `1` to `100`. The default is `off`. + +### unhealthy_state_routing + +The `unhealthy_state_routing` block supports the following: + +* `minimum_healthy_targets_count` - (Optional) The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `1` to the maximum number of targets. The default is `1`. +* `minimum_healthy_targets_percentage` - (Optional) The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from `1` to `100`. The default is `off`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -287,4 +352,4 @@ Using `terraform import`, import Target Groups using their ARN. For example: % terraform import aws_lb_target_group.app_front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:targetgroup/app-front-end/20cfe21448b66314 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/lightsail_container_service.html.markdown b/website/docs/cdktf/typescript/r/lightsail_container_service.html.markdown index c59412dd1fa..1f319e8b060 100644 --- a/website/docs/cdktf/typescript/r/lightsail_container_service.html.markdown +++ b/website/docs/cdktf/typescript/r/lightsail_container_service.html.markdown @@ -166,7 +166,7 @@ class MyConvertedCode extends TerraformStack { container service. For more information, see [Enabling and managing custom domains for your Amazon Lightsail container services](https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-container-services-certificates). -This argument supports the following arguments: +This resource supports the following arguments: * `name` - (Required) The name for the container service. Names must be of length 1 to 63, and be unique within each AWS Region in your Lightsail account. @@ -260,4 +260,4 @@ Using `terraform import`, import Lightsail Container Service using the `name`. F % terraform import aws_lightsail_container_service.my_container_service container-service-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/medialive_channel.html.markdown b/website/docs/cdktf/typescript/r/medialive_channel.html.markdown index 7a4e9403038..9b85d06f819 100644 --- a/website/docs/cdktf/typescript/r/medialive_channel.html.markdown +++ b/website/docs/cdktf/typescript/r/medialive_channel.html.markdown @@ -164,8 +164,8 @@ The following arguments are optional: ### Input Settings -* `audio_selectors` - (Optional) Used to select the audio stream to decode for inputs that have multiple. See [Audio Selectors](#audio-selectors) for more details. -* `caption_selectors` - (Optional) Used to select the caption input to use for inputs that have multiple available. See [Caption Selectors](#caption-selectors) for more details. +* `audioSelector` - (Optional) Used to select the audio stream to decode for inputs that have multiple. See [Audio Selectors](#audio-selectors) for more details. +* `captionSelector` - (Optional) Used to select the caption input to use for inputs that have multiple available. See [Caption Selectors](#caption-selectors) for more details. * `deblockFilter` - (Optional) Enable or disable the deblock filter when filtering. * `denoiseFilter` - (Optional) Enable or disable the denoise filter when filtering. * `filterStrength` - (Optional) Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest). @@ -826,4 +826,4 @@ Using `terraform import`, import MediaLive Channel using the `channelId`. For ex % terraform import aws_medialive_channel.example 1234567 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mskconnect_connector.html.markdown b/website/docs/cdktf/typescript/r/mskconnect_connector.html.markdown index fff0926aaf2..460e98426dd 100644 --- a/website/docs/cdktf/typescript/r/mskconnect_connector.html.markdown +++ b/website/docs/cdktf/typescript/r/mskconnect_connector.html.markdown @@ -86,105 +86,145 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `capacity` - (Required) Information about the capacity allocated to the connector. See below. +* `capacity` - (Required) Information about the capacity allocated to the connector. See [`capacity` Block](#capacity-block) for details. * `connectorConfiguration` - (Required) A map of keys to values that represent the configuration for the connector. -* `description` - (Optional) A summary description of the connector. -* `kafkaCluster` - (Required) Specifies which Apache Kafka cluster to connect to. See below. -* `kafkaClusterClientAuthentication` - (Required) Details of the client authentication used by the Apache Kafka cluster. See below. -* `kafkaClusterEncryptionInTransit` - (Required) Details of encryption in transit to the Apache Kafka cluster. See below. +* `kafkaCluster` - (Required) Specifies which Apache Kafka cluster to connect to. See [`kafkaCluster` Block](#kafka_cluster-block) for details. +* `kafkaClusterClientAuthentication` - (Required) Details of the client authentication used by the Apache Kafka cluster. See [`kafkaClusterClientAuthentication` Block](#kafka_cluster_client_authentication-block) for details. +* `kafkaClusterEncryptionInTransit` - (Required) Details of encryption in transit to the Apache Kafka cluster. See [`kafkaClusterEncryptionInTransit` Block](#kafka_cluster_encryption_in_transit-block) for details. * `kafkaconnectVersion` - (Required) The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins. -* `logDelivery` - (Optional) Details about log delivery. See below. * `name` - (Required) The name of the connector. -* `plugin` - (Required) Specifies which plugins to use for the connector. See below. +* `plugin` - (Required) Specifies which plugins to use for the connector. See [`plugin` Block](#plugin-block) for details. * `serviceExecutionRoleArn` - (Required) The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket. -* `workerConfiguration` - (Optional) Specifies which worker configuration to use with the connector. See below. -### capacity Configuration Block +The following arguments are optional: + +* `description` - (Optional) A summary description of the connector. +* `logDelivery` - (Optional) Details about log delivery. See [`logDelivery` Block](#log_delivery-block) for details. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `workerConfiguration` - (Optional) Specifies which worker configuration to use with the connector. See [`workerConfiguration` Block](#worker_configuration-block) for details. + +### `capacity` Block + +The `capacity` configuration block supports the following arguments: + +* `autoscaling` - (Optional) Information about the auto scaling parameters for the connector. See [`autoscaling` Block](#autoscaling-block) for details. +* `provisionedCapacity` - (Optional) Details about a fixed capacity allocated to a connector. See [`provisionedCapacity` Block](#provisioned_capacity-block) for details. -* `autoscaling` - (Optional) Information about the auto scaling parameters for the connector. See below. -* `provisionedCapacity` - (Optional) Details about a fixed capacity allocated to a connector. See below. +### `autoscaling` Block -### autoscaling Configuration Block +The `autoscaling` configuration block supports the following arguments: * `maxWorkerCount` - (Required) The maximum number of workers allocated to the connector. * `mcuCount` - (Optional) The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: `1`, `2`, `4`, `8`. The default value is `1`. * `minWorkerCount` - (Required) The minimum number of workers allocated to the connector. -* `scaleInPolicy` - (Optional) The scale-in policy for the connector. See below. -* `scaleOutPolicy` - (Optional) The scale-out policy for the connector. See below. +* `scaleInPolicy` - (Optional) The scale-in policy for the connector. See [`scaleInPolicy` Block](#scale_in_policy-block) for details. +* `scaleOutPolicy` - (Optional) The scale-out policy for the connector. See [`scaleOutPolicy` Block](#scale_out_policy-block) for details. -### scale_in_policy Configuration Block +### `scaleInPolicy` Block + +The `scaleInPolicy` configuration block supports the following arguments: * `cpuUtilizationPercentage` - (Required) Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered. -### scale_out_policy Configuration Block +### `scaleOutPolicy` Block + +The `scaleOutPolicy` configuration block supports the following arguments: * `cpuUtilizationPercentage` - (Required) The CPU utilization percentage threshold at which you want connector scale out to be triggered. -### provisioned_capacity Configuration Block +### `provisionedCapacity` Block + +The `provisionedCapacity` configuration block supports the following arguments: * `mcuCount` - (Optional) The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: `1`, `2`, `4`, `8`. The default value is `1`. * `workerCount` - (Required) The number of workers that are allocated to the connector. -### kafka_cluster Configuration Block +### `kafkaCluster` Block + +The `kafkaCluster` configuration block supports the following arguments: + +* `apacheKafkaCluster` - (Required) The Apache Kafka cluster to which the connector is connected. See [`apacheKafkaCluster` Block](#apache_kafka_cluster-block) for details. -* `apacheKafkaCluster` - (Required) The Apache Kafka cluster to which the connector is connected. +### `apacheKafkaCluster` Block -### apache_kafka_cluster Configuration Block +The `apacheKafkaCluster` configuration block supports the following arguments: * `bootstrapServers` - (Required) The bootstrap servers of the cluster. -* `vpc` - (Required) Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. +* `vpc` - (Required) Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See [`vpc` Block](#vpc-block) for details. -### vpc Configuration Block +### `vpc` Block + +The `vpc` configuration block supports the following arguments: * `securityGroups` - (Required) The security groups for the connector. * `subnets` - (Required) The subnets for the connector. -### kafka_cluster_client_authentication Configuration Block +### `kafkaClusterClientAuthentication` Block + +The `kafkaClusterClientAuthentication` configuration block supports the following arguments: * `authenticationType` - (Optional) The type of client authentication used to connect to the Apache Kafka cluster. Valid values: `IAM`, `NONE`. A value of `NONE` means that no client authentication is used. The default value is `NONE`. -### kafka_cluster_encryption_in_transit Configuration Block +### `kafkaClusterEncryptionInTransit` Block + +The `kafkaClusterEncryptionInTransit` configuration block supports the following arguments: * `encryptionType` - (Optional) The type of encryption in transit to the Apache Kafka cluster. Valid values: `PLAINTEXT`, `TLS`. The default values is `PLAINTEXT`. -### log_delivery Configuration Block +### `logDelivery` Block + +The `logDelivery` configuration block supports the following arguments: + +* `workerLogDelivery` - (Required) The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See [`workerLogDelivery` Block](#worker_log_delivery-block) for details. + +### `workerLogDelivery` Block -* `workerLogDelivery` - (Required) The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below. +The `workerLogDelivery` configuration block supports the following arguments: -### worker_log_delivery Configuration Block +* `cloudwatchLogs` - (Optional) Details about delivering logs to Amazon CloudWatch Logs. See [`cloudwatchLogs` Block](#cloudwatch_logs-block) for details. +* `firehose` - (Optional) Details about delivering logs to Amazon Kinesis Data Firehose. See [`firehose` Block](#firehose-block) for details. +* `s3` - (Optional) Details about delivering logs to Amazon S3. See [`s3` Block](#s3-block) for deetails. -* `cloudwatchLogs` - (Optional) Details about delivering logs to Amazon CloudWatch Logs. See below. -* `firehose` - (Optional) Details about delivering logs to Amazon Kinesis Data Firehose. See below. -* `s3` - (Optional) Details about delivering logs to Amazon S3. See below. +### `cloudwatchLogs` Block -### cloudwatch_logs Configuration Block +The `cloudwatchLogs` configuration block supports the following arguments: * `enabled` - (Optional) Whether log delivery to Amazon CloudWatch Logs is enabled. * `logGroup` - (Required) The name of the CloudWatch log group that is the destination for log delivery. -### firehose Configuration Block +### `firehose` Block + +The `firehose` configuration block supports the following arguments: * `deliveryStream` - (Optional) The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery. * `enabled` - (Required) Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose. -### s3 Configuration Block +### `s3` Block + +The `s3` configuration block supports the following arguments: * `bucket` - (Optional) The name of the S3 bucket that is the destination for log delivery. * `enabled` - (Required) Specifies whether connector logs get sent to the specified Amazon S3 destination. * `prefix` - (Optional) The S3 prefix that is the destination for log delivery. -### plugin Configuration Block +### `plugin` Block + +The `plugin` configuration block supports the following argumens: -* `customPlugin` - (Required) Details about a custom plugin. See below. +* `customPlugin` - (Required) Details about a custom plugin. See [`customPlugin` Block](#custom_plugin-block) for details. -### custom_plugin Configuration Block +### `customPlugin` Block + +The `customPlugin` configuration block supports the following arguments: * `arn` - (Required) The Amazon Resource Name (ARN) of the custom plugin. * `revision` - (Required) The revision of the custom plugin. -### worker_configuration Configuration Block +### `workerConfiguration` Block + +The `workerConfiguration` configuration block supports the following arguments: * `arn` - (Required) The Amazon Resource Name (ARN) of the worker configuration. * `revision` - (Required) The revision of the worker configuration. @@ -194,6 +234,7 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - The Amazon Resource Name (ARN) of the connector. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `version` - The current version of the connector. ## Timeouts @@ -236,4 +277,4 @@ Using `terraform import`, import MSK Connect Connector using the connector's `ar % terraform import aws_mskconnect_connector.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:connector/example/264edee4-17a3-412e-bd76-6681cfc93805-3' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mskconnect_custom_plugin.html.markdown b/website/docs/cdktf/typescript/r/mskconnect_custom_plugin.html.markdown index 6af1a05c761..e3224848a45 100644 --- a/website/docs/cdktf/typescript/r/mskconnect_custom_plugin.html.markdown +++ b/website/docs/cdktf/typescript/r/mskconnect_custom_plugin.html.markdown @@ -65,23 +65,28 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `name` - (Required) The name of the custom plugin.. -* `contentType` - (Required) The type of the plugin file. Allowed values are `ZIP` and `JAR`. -* `location` - (Required) Information about the location of a custom plugin. See below. +* `name` - (Required, Forces new resource) The name of the custom plugin.. +* `contentType` - (Required, Forces new resource) The type of the plugin file. Allowed values are `ZIP` and `JAR`. +* `location` - (Required, Forces new resource) Information about the location of a custom plugin. See [`location` Block](#location-block) for details. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. The following arguments are optional: -* `description` - (Optional) A summary description of the custom plugin. +* `description` - (Optional, Forces new resource) A summary description of the custom plugin. -### location Argument Reference +### `location` Block -* `s3` - (Required) Information of the plugin file stored in Amazon S3. See below. +The `location` configuration block supports the following arguments: -#### location s3 Argument Reference +* `s3` - (Required, Forces new resource) Information of the plugin file stored in Amazon S3. See [`s3` Block](#s3-block) for details.. -* `bucketArn` - (Required) The Amazon Resource Name (ARN) of an S3 bucket. -* `fileKey` - (Required) The file key for an object in an S3 bucket. -* `objectVersion` - (Optional) The version of an object in an S3 bucket. +### `s3` Block + +The `s3` configuration Block supports the following arguments: + +* `bucketArn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of an S3 bucket. +* `fileKey` - (Required, Forces new resource) The file key for an object in an S3 bucket. +* `objectVersion` - (Optional, Forces new resource) The version of an object in an S3 bucket. ## Attribute Reference @@ -90,6 +95,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - the Amazon Resource Name (ARN) of the custom plugin. * `latestRevision` - an ID of the latest successfully created revision of the custom plugin. * `state` - the state of the custom plugin. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts @@ -130,4 +136,4 @@ Using `terraform import`, import MSK Connect Custom Plugin using the plugin's `a % terraform import aws_mskconnect_custom_plugin.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:custom-plugin/debezium-example/abcdefgh-1234-5678-9abc-defghijklmno-4' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mskconnect_worker_configuration.html.markdown b/website/docs/cdktf/typescript/r/mskconnect_worker_configuration.html.markdown index 0223161ff45..5d318283188 100644 --- a/website/docs/cdktf/typescript/r/mskconnect_worker_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/mskconnect_worker_configuration.html.markdown @@ -42,12 +42,13 @@ class MyConvertedCode extends TerraformStack { The following arguments are required: -* `name` - (Required) The name of the worker configuration. -* `propertiesFileContent` - (Required) Contents of connect-distributed.properties file. The value can be either base64 encoded or in raw format. +* `name` - (Required, Forces new resource) The name of the worker configuration. +* `propertiesFileContent` - (Required, Forces new resource) Contents of connect-distributed.properties file. The value can be either base64 encoded or in raw format. The following arguments are optional: -* `description` - (Optional) A summary description of the worker configuration. +* `description` - (Optional, Forces new resource) A summary description of the worker configuration. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -55,6 +56,13 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - the Amazon Resource Name (ARN) of the worker configuration. * `latestRevision` - an ID of the latest successfully created revision of the worker configuration. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `delete` - (Default `10m`) ## Import @@ -88,4 +96,4 @@ Using `terraform import`, import MSK Connect Worker Configuration using the plug % terraform import aws_mskconnect_worker_configuration.example 'arn:aws:kafkaconnect:eu-central-1:123456789012:worker-configuration/example/8848493b-7fcc-478c-a646-4a52634e3378-4' ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/mwaa_environment.html.markdown b/website/docs/cdktf/typescript/r/mwaa_environment.html.markdown index fae3c144c29..498d09188fa 100644 --- a/website/docs/cdktf/typescript/r/mwaa_environment.html.markdown +++ b/website/docs/cdktf/typescript/r/mwaa_environment.html.markdown @@ -171,6 +171,7 @@ This resource supports the following arguments: * `airflowConfigurationOptions` - (Optional) The `airflowConfigurationOptions` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options. * `airflowVersion` - (Optional) Airflow version of your environment, will be set by default to the latest version that MWAA supports. * `dagS3Path` - (Required) The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). +* `endpointManagement` - (Optional) Defines whether the VPC endpoints configured for the environment are created and managed by the customer or by AWS. If set to `SERVICE`, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to `CUSTOMER`, you must create, and manage, the VPC endpoints for your VPC. Defaults to `SERVICE` if not set. * `environmentClass` - (Optional) Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes. * `executionRoleArn` - (Required) The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification. * `kmsKey` - (Optional) The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information. @@ -269,4 +270,4 @@ Using `terraform import`, import MWAA Environment using `Name`. For example: % terraform import aws_mwaa_environment.example MyAirflowEnvironment ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkfirewall_tls_inspection_configuration.html.markdown b/website/docs/cdktf/typescript/r/networkfirewall_tls_inspection_configuration.html.markdown new file mode 100644 index 00000000000..47dfe63c955 --- /dev/null +++ b/website/docs/cdktf/typescript/r/networkfirewall_tls_inspection_configuration.html.markdown @@ -0,0 +1,545 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_tls_inspection_configuration" +description: |- + Terraform resource for managing an AWS Network Firewall TLS Inspection Configuration. +--- + + + +# Resource: aws_networkfirewall_tls_inspection_configuration + +Terraform resource for managing an AWS Network Firewall TLS Inspection Configuration. + +## Example Usage + +~> **NOTE:** You must configure either inbound inspection, outbound inspection, or both. + +### Basic inbound/ingress inspection + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallTlsInspectionConfiguration } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkfirewallTlsInspectionConfiguration(this, "example", { + description: "example", + encryption_configuration: [ + { + key_id: "AWS_OWNED_KMS_KEY", + type: "AWS_OWNED_KMS_KEY", + }, + ], + name: "example", + tls_inspection_configuration: [ + { + server_certificate_configuration: [ + { + scope: [ + { + destination: [ + { + address_definition: "0.0.0.0/0", + }, + ], + destination_ports: [ + { + from_port: 443, + to_port: 443, + }, + ], + protocols: [6], + source: [ + { + address_definition: "0.0.0.0/0", + }, + ], + source_ports: [ + { + from_port: 0, + to_port: 65535, + }, + ], + }, + ], + server_certificate: [ + { + resource_arn: example1.arn, + }, + ], + }, + ], + }, + ], + }); + } +} + +``` + +### Basic outbound/engress inspection + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallTlsInspectionConfiguration } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkfirewallTlsInspectionConfiguration(this, "example", { + description: "example", + encryption_configuration: [ + { + key_id: "AWS_OWNED_KMS_KEY", + type: "AWS_OWNED_KMS_KEY", + }, + ], + name: "example", + tls_inspection_configuration: [ + { + server_certificate_configuration: [ + { + certificate_authority_arn: example1.arn, + check_certificate_revocation_status: [ + { + revoked_status_action: "REJECT", + unknown_status_action: "PASS", + }, + ], + scope: [ + { + destination: [ + { + address_definition: "0.0.0.0/0", + }, + ], + destination_ports: [ + { + from_port: 443, + to_port: 443, + }, + ], + protocols: [6], + source: [ + { + address_definition: "0.0.0.0/0", + }, + ], + source_ports: [ + { + from_port: 0, + to_port: 65535, + }, + ], + }, + ], + }, + ], + }, + ], + }); + } +} + +``` + +### Inbound with encryption configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallTlsInspectionConfiguration } from "./.gen/providers/aws/"; +import { KmsKey } from "./.gen/providers/aws/kms-key"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new KmsKey(this, "example", { + deletionWindowInDays: 7, + description: "example", + }); + const awsNetworkfirewallTlsInspectionConfigurationExample = + new NetworkfirewallTlsInspectionConfiguration(this, "example_1", { + description: "example", + encryption_configuration: [ + { + key_id: example.arn, + type: "CUSTOMER_KMS", + }, + ], + name: "example", + tls_inspection_configuration: [ + { + server_certificate_configuration: [ + { + scopes: [ + { + destination_ports: [ + { + from_port: 443, + to_port: 443, + }, + ], + destinations: [ + { + address_definition: "0.0.0.0/0", + }, + ], + protocols: [6], + source_ports: [ + { + from_port: 0, + to_port: 65535, + }, + ], + sources: [ + { + address_definition: "0.0.0.0/0", + }, + ], + }, + ], + server_certificate: [ + { + resource_arn: example1.arn, + }, + ], + }, + ], + }, + ], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsNetworkfirewallTlsInspectionConfigurationExample.overrideLogicalId( + "example" + ); + } +} + +``` + +### Outbound with encryption configuration + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallTlsInspectionConfiguration } from "./.gen/providers/aws/"; +import { KmsKey } from "./.gen/providers/aws/kms-key"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new KmsKey(this, "example", { + deletionWindowInDays: 7, + description: "example", + }); + const awsNetworkfirewallTlsInspectionConfigurationExample = + new NetworkfirewallTlsInspectionConfiguration(this, "example_1", { + description: "example", + encryption_configuration: [ + { + key_id: example.arn, + type: "CUSTOMER_KMS", + }, + ], + name: "example", + tls_inspection_configuration: [ + { + server_certificate_configurations: [ + { + certificate_authority_arn: example1.arn, + check_certificate_revocation_status: [ + { + revoked_status_action: "REJECT", + unknown_status_action: "PASS", + }, + ], + scope: [ + { + destination: [ + { + address_definition: "0.0.0.0/0", + }, + ], + destination_ports: [ + { + from_port: 443, + to_port: 443, + }, + ], + protocols: [6], + source: [ + { + address_definition: "0.0.0.0/0", + }, + ], + source_ports: [ + { + from_port: 0, + to_port: 65535, + }, + ], + }, + ], + }, + ], + }, + ], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsNetworkfirewallTlsInspectionConfigurationExample.overrideLogicalId( + "example" + ); + } +} + +``` + +### Combined inbound and outbound + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallTlsInspectionConfiguration } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkfirewallTlsInspectionConfiguration(this, "example", { + description: "example", + encryption_configuration: [ + { + key_id: "AWS_OWNED_KMS_KEY", + type: "AWS_OWNED_KMS_KEY", + }, + ], + name: "example", + tls_inspection_configuration: [ + { + server_certificate_configuration: [ + { + certificate_authority_arn: example1.arn, + check_certificate_revocation_status: [ + { + revoked_status_action: "REJECT", + unknown_status_action: "PASS", + }, + ], + scope: [ + { + destination: [ + { + address_definition: "0.0.0.0/0", + }, + ], + destination_ports: [ + { + from_port: 443, + to_port: 443, + }, + ], + protocols: [6], + source: [ + { + address_definition: "0.0.0.0/0", + }, + ], + source_ports: [ + { + from_port: 0, + to_port: 65535, + }, + ], + }, + ], + server_certificate: [ + { + resource_arn: example2.arn, + }, + ], + }, + ], + }, + ], + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required, Forces new resource) Descriptive name of the TLS inspection configuration. +* `tls_inspection_configuration` - (Required) TLS inspection configuration block. Detailed below. + +The following arguments are optional: + +* `description` - (Optional) Description of the TLS inspection configuration. +* `encryptionConfiguration` - (Optional) Encryption configuration block. Detailed below. + +### Encryption Configuration + +* `keyId` - (Optional) ARN of the Amazon Web Services Key Management Service (KMS) customer managed key. +* `type` - (Optional) Type of KMS key to use for encryption of your Network Firewall resources. Valid values: `AWS_OWNED_KMS_KEY`, `CUSTOMER_KMS`. + +### TLS Inspection Configuration + +* `server_certificate_configuration` - (Required) Server certificate configurations that are associated with the TLS configuration. Detailed below. + +### Server Certificate Configuration + +The `server_certificate_configuration` block supports the following arguments: + +* `certificateAuthorityArn` - (Optional) ARN of the imported certificate authority (CA) certificate within Certificate Manager (ACM) to use for outbound SSL/TLS inspection. See [Using SSL/TLS certificates with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html) for limitations on CA certificates. +* `check_certificate_revocation_status` (Optional) - Check Certificate Revocation Status block. Detailed below. +* `scope` (Required) - Scope block. Detailed below. +* `serverCertificate` - (Optional) Server certificates to use for inbound SSL/TLS inspection. See [Using SSL/TLS certificates with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html). + +### Check Certificate Revocation Status + +The `check_certificate_revocation_status` block supports the following arguments: + +~> **NOTE To check the certificate revocation status, you must also specify a `certificateAuthorityArn` in `server_certificate_configuration`. + +`revoked_status_action` - (Optional) how Network Firewall processes traffic when it determines that the certificate presented by the server in the SSL/TLS connection has a revoked status. See [Checking certificate revocation status](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html#tls-inspection-check-certificate-revocation-status) for details. Valid values: `PASS`, `DROP`, `REJECT`. +`unknown_status_action` - (Optional) How Network Firewall processes traffic when it determines that the certificate presented by the server in the SSL/TLS connection has an unknown status, or a status that cannot be determined for any other reason, including when the service is unable to connect to the OCSP and CRL endpoints for the certificate. See [Checking certificate revocation status](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html#tls-inspection-check-certificate-revocation-status) for details. Valid values: `PASS`, `DROP`, `REJECT`. + +### Scopes + +The `scope` block supports the following arguments: + +* `destination` - (Required) Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See [Destination](#destination) below for details. +* `destination_ports` - (Optional) Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See [Destination Ports](#destination-ports) below for details. +* `protocols` - (Optional) Set of protocols to inspect for, specified using the protocol's assigned internet protocol number (IANA). Network Firewall currently supports TCP only. Valid values: `6` +* `source` - (Optional) Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See [Source](#source) below for details. +* `source_ports` - (Optional) Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See [Source Ports](#source-ports) below for details. + +### Destination + +The `destination` block supports the following argument: + +* `addressDefinition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + +### Destination Ports + +The `destination_ports` block supports the following arguments: + +* `from_ports` - (Required) The lower limit of the port range. This must be less than or equal to the `toPort`. +* `to_ports` - (Optional) The upper limit of the port range. This must be greater than or equal to the `fromPort`. + +### Source + +The `source` block supports the following argument: + +* `addressDefinition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + +### Source Ports + +The `source_ports` block supports the following arguments: + +* `fromPort` - (Required) The lower limit of the port range. This must be less than or equal to the `toPort`. +* `toPort` - (Optional) The upper limit of the port range. This must be greater than or equal to the `fromPort`. + +### Server Certificates + +The `serverCertificate` block supports the following arguments: + +* `resourceArn` - (Optional) ARN of the Certificate Manager SSL/TLS server certificate that's used for inbound SSL/TLS inspection. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the TLS Inspection Configuration. +* `certificateAuthority` - Certificate Manager certificate block. See [Certificate Authority](#certificate-authority) below for details. +* `certificates` - List of certificate blocks describing certificates associated with the TLS inspection configuration. See [Certificates](#certificates) below for details. +* `number_of_associations` - Number of firewall policies that use this TLS inspection configuration. +* `tls_inspection_configuration_id` - A unique identifier for the TLS inspection configuration. +* `updateToken` - String token used when updating the rule group. + +### Certificate Authority + +The `certificateAuthority` block exports the following attributes: + +* `certificateArn` - ARN of the certificate. +* `certificate_serial` - Serial number of the certificate. +* `status` - Status of the certificate. +* `statusMessage` - Details about the certificate status, including information about certificate errors. + +### Certificates + +The `certificates` block exports the following attributes: + +* `certificateArn` - ARN of the certificate. +* `certificate_serial` - Serial number of the certificate. +* `status` - Status of the certificate. +* `statusMessage` - Details about the certificate status, including information about certificate errors. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall TLS Inspection Configuration using the `arn`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkfirewallTlsInspectionConfiguration } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + NetworkfirewallTlsInspectionConfiguration.generateConfigForImport( + this, + "example", + "arn:aws:network-firewall::::tls-configuration/example" + ); + } +} + +``` + +Using `terraform import`, import Network Firewall TLS Inspection Configuration using the `arn`. For example: + +```console +% terraform import aws_networkfirewall_tls_inspection_configuration.example arn:aws:network-firewall::::tls-configuration/example +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmonitor_monitor.html.markdown b/website/docs/cdktf/typescript/r/networkmonitor_monitor.html.markdown new file mode 100644 index 00000000000..94fe6bfeeee --- /dev/null +++ b/website/docs/cdktf/typescript/r/networkmonitor_monitor.html.markdown @@ -0,0 +1,90 @@ +--- +subcategory: "CloudWatch Network Monitor" +layout: "aws" +page_title: "AWS: aws_networkmonitor_monitor" +description: |- + Terraform resource for managing an Amazon Network Monitor Monitor. +--- + + + +# Resource: aws_networkmonitor_monitor + +Terraform resource for managing an AWS Network Monitor Monitor. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkmonitorMonitor } from "./.gen/providers/aws/networkmonitor-monitor"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new NetworkmonitorMonitor(this, "example", { + aggregationPeriod: 30, + monitorName: "example", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +- `monitorName` - (Required) The name of the monitor. + +The following arguments are optional: + +- `aggregationPeriod` - (Optional) The time, in seconds, that metrics are aggregated and sent to Amazon CloudWatch. Valid values are either 30 or 60. +- `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +- `arn` - The ARN of the monitor. +- `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmonitor_monitor` using the monitor name. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkmonitorMonitor } from "./.gen/providers/aws/networkmonitor-monitor"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + NetworkmonitorMonitor.generateConfigForImport( + this, + "example", + "monitor-7786087912324693644" + ); + } +} + +``` + +Using `terraform import`, import `aws_networkmonitor_monitor` using the monitor name. For example: + +```console +% terraform import aws_networkmonitor_monitor.example monitor-7786087912324693644 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/networkmonitor_probe.html.markdown b/website/docs/cdktf/typescript/r/networkmonitor_probe.html.markdown new file mode 100644 index 00000000000..e3b2f4209f8 --- /dev/null +++ b/website/docs/cdktf/typescript/r/networkmonitor_probe.html.markdown @@ -0,0 +1,110 @@ +--- +subcategory: "CloudWatch Network Monitor" +layout: "aws" +page_title: "AWS: aws_networkmonitor_probe" +description: |- + Terraform resource for managing an Amazon Network Monitor Probe. +--- + + + +# Resource: aws_networkmonitor_probe + +Terraform resource for managing an AWS Network Monitor Probe. + +## Example Usage + +### Basic Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkmonitorMonitor } from "./.gen/providers/aws/networkmonitor-monitor"; +import { NetworkmonitorProbe } from "./.gen/providers/aws/networkmonitor-probe"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new NetworkmonitorMonitor(this, "example", { + aggregationPeriod: 30, + monitorName: "example", + }); + const awsNetworkmonitorProbeExample = new NetworkmonitorProbe( + this, + "example_1", + { + destination: "127.0.0.1", + destinationPort: 80, + monitorName: example.monitorName, + packetSize: 200, + protocol: "TCP", + sourceArn: Token.asString(awsSubnetExample.arn), + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsNetworkmonitorProbeExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +- `destination` - (Required) The destination IP address. This must be either IPV4 or IPV6. +- `destinationPort` - (Optional) The port associated with the destination. This is required only if the protocol is TCP and must be a number between 1 and 65536. +- `monitorName` - (Required) The name of the monitor. +- `protocol` - (Required) The protocol used for the network traffic between the source and destination. This must be either TCP or ICMP. +- `sourceArn` - (Required) The ARN of the subnet. +- `packetSize` - (Optional) The size of the packets sent between the source and destination. This must be a number between 56 and 8500. + +The following arguments are optional: + +- `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +- `arn` - The ARN of the attachment. +- `sourceArn` - The ARN of the subnet. +- `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmonitor_probe` using the monitor name and probe id. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { NetworkmonitorProbe } from "./.gen/providers/aws/networkmonitor-probe"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + NetworkmonitorProbe.generateConfigForImport( + this, + "example", + "monitor-7786087912324693644,probe-3qm8p693i4fi1h8lqylzkbp42e" + ); + } +} + +``` + +Using `terraform import`, import `aws_networkmonitor_probe` using the monitor name and probe id. For example: + +```console +% terraform import aws_networkmonitor_probe.example monitor-7786087912324693644,probe-3qm8p693i4fi1h8lqylzkbp42e +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/oam_link.html.markdown b/website/docs/cdktf/typescript/r/oam_link.html.markdown index 8463066b743..9d493781d25 100644 --- a/website/docs/cdktf/typescript/r/oam_link.html.markdown +++ b/website/docs/cdktf/typescript/r/oam_link.html.markdown @@ -41,6 +41,65 @@ class MyConvertedCode extends TerraformStack { ``` +### Log Group Filtering + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OamLink } from "./.gen/providers/aws/oam-link"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new OamLink(this, "example", { + labelTemplate: "$AccountName", + linkConfiguration: { + logGroupConfiguration: { + filter: + "LogGroupName LIKE 'aws/lambda/%' OR LogGroupName LIKE 'AWSLogs%'", + }, + }, + resourceTypes: ["AWS::Logs::LogGroup"], + sinkIdentifier: test.id, + }); + } +} + +``` + +### Metric Filtering + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { OamLink } from "./.gen/providers/aws/oam-link"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new OamLink(this, "example", { + labelTemplate: "$AccountName", + linkConfiguration: { + metricConfiguration: { + filter: "Namespace IN ('AWS/EC2', 'AWS/ELB', 'AWS/S3')", + }, + }, + resourceTypes: ["AWS::CloudWatch::Metric"], + sinkIdentifier: test.id, + }); + } +} + +``` + ## Argument Reference The following arguments are required: @@ -51,13 +110,34 @@ The following arguments are required: The following arguments are optional: +* `linkConfiguration` - (Optional) Configuration for creating filters that specify that only some metric namespaces or log groups are to be shared from the source account to the monitoring account. See [`linkConfiguration` Block](#link_configuration-block) for details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### `linkConfiguration` Block + +The `linkConfiguration` configuration block supports the following arguments: + +* `logGroupConfiguration` - (Optional) Configuration for filtering which log groups are to send log events from the source account to the monitoring account. See [`logGroupConfiguration` Block](#log_group_configuration-block) for details. +* `metricConfiguration` - (Optional) Configuration for filtering which metric namespaces are to be shared from the source account to the monitoring account. See [`metricConfiguration` Block](#metric_configuration-block) for details. + +### `logGroupConfiguration` Block + +The `logGroupConfiguration` configuration block supports the following arguments: + +* `filter` - (Required) Filter string that specifies which log groups are to share their log events with the monitoring account. See [LogGroupConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_LogGroupConfiguration.html) for details. + +### `metricConfiguration` Block + +The `metricConfiguration` configuration block supports the following arguments: + +* `filter` - (Required) Filter string that specifies which metrics are to be shared with the monitoring account. See [MetricConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_MetricConfiguration.html) for details. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the link. +* `id` - ARN of the link. * `label` - Label that is assigned to this link. * `linkId` - ID string that AWS generated as part of the link ARN. * `sinkArn` - ARN of the sink that is used for this link. @@ -102,4 +182,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Link us % terraform import aws_oam_link.example arn:aws:oam:us-west-2:123456789012:link/link-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/oam_sink.html.markdown b/website/docs/cdktf/typescript/r/oam_sink.html.markdown index d9c6ab0752f..83e197721ac 100644 --- a/website/docs/cdktf/typescript/r/oam_sink.html.markdown +++ b/website/docs/cdktf/typescript/r/oam_sink.html.markdown @@ -54,6 +54,7 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Sink. +* `id` - ARN of the Sink. * `sinkId` - ID string that AWS generated as part of the sink ARN. ## Timeouts @@ -96,4 +97,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Sink us % terraform import aws_oam_sink.example arn:aws:oam:us-west-2:123456789012:sink/sink-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/oam_sink_policy.html.markdown b/website/docs/cdktf/typescript/r/oam_sink_policy.html.markdown index 0a156f024b1..62a3f6d2427 100644 --- a/website/docs/cdktf/typescript/r/oam_sink_policy.html.markdown +++ b/website/docs/cdktf/typescript/r/oam_sink_policy.html.markdown @@ -77,6 +77,7 @@ The following arguments are required: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Sink. +* `id` - ARN of the sink to attach this policy to. * `sinkId` - ID string that AWS generated as part of the sink ARN. ## Timeouts @@ -118,4 +119,4 @@ Using `terraform import`, import CloudWatch Observability Access Manager Sink Po % terraform import aws_oam_sink_policy.example arn:aws:oam:us-west-2:123456789012:sink/sink-id ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/opensearch_domain.html.markdown b/website/docs/cdktf/typescript/r/opensearch_domain.html.markdown index f8d6a002407..d695a2e4128 100644 --- a/website/docs/cdktf/typescript/r/opensearch_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/opensearch_domain.html.markdown @@ -451,6 +451,7 @@ The following arguments are optional: * `engineVersion` - (Optional) Either `Elasticsearch_X.Y` or `OpenSearch_X.Y` to specify the engine version for the Amazon OpenSearch Service domain. For example, `OpenSearch_1.0` or `Elasticsearch_7.9`. See [Creating and managing Amazon OpenSearch Service domains](http://docs.aws.amazon.com/opensearch-service/latest/developerguide/createupdatedomains.html#createdomains). Defaults to the lastest version of OpenSearch. +* `ipAddressType` - (Optional) The IP address type for the endpoint. Valid values are `ipv4` and `dualstack`. * `encryptAtRest` - (Optional) Configuration block for encrypt at rest options. Only available for [certain instance types](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/encryption-at-rest.html). Detailed below. * `logPublishingOptions` - (Optional) Configuration block for publishing slow and application logs to CloudWatch Logs. This block can be declared multiple times, for each log_type, within the same resource. Detailed below. * `nodeToNodeEncryption` - (Optional) Configuration block for node-to-node encryption options. Detailed below. @@ -639,4 +640,4 @@ Using `terraform import`, import OpenSearch domains using the `domainName`. For % terraform import aws_opensearch_domain.example domain_name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/quicksight_account_subscription.html.markdown b/website/docs/cdktf/typescript/r/quicksight_account_subscription.html.markdown index a52d923a688..73ae23f9a38 100644 --- a/website/docs/cdktf/typescript/r/quicksight_account_subscription.html.markdown +++ b/website/docs/cdktf/typescript/r/quicksight_account_subscription.html.markdown @@ -56,6 +56,7 @@ The following arguments are optional: * `directoryId` - (Optional) Active Directory ID that is associated with your Amazon QuickSight account. * `emailAddress` - (Optional) Email address of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `firstName` - (Optional) First name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. +* `iamIdentityCenterInstanceArn` - (Optional) The Amazon Resource Name (ARN) for the IAM Identity Center instance. * `lastName` - (Optional) Last name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `readerGroup` - (Optional) Reader group associated with your Active Direcrtory. * `realm` - (Optional) Realm of the Active Directory that is associated with your Amazon QuickSight account. @@ -77,4 +78,4 @@ This resource exports the following attributes in addition to the arguments abov You cannot import this resource. - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_certificate.html.markdown b/website/docs/cdktf/typescript/r/rds_certificate.html.markdown new file mode 100644 index 00000000000..3860899b1d4 --- /dev/null +++ b/website/docs/cdktf/typescript/r/rds_certificate.html.markdown @@ -0,0 +1,77 @@ +--- +subcategory: "RDS (Relational Database)" +layout: "aws" +page_title: "AWS: aws_rds_certificate" +description: |- + Terraform resource for managing an AWS RDS (Relational Database) Certificate. +--- + + + +# Resource: aws_rds_certificate + +Provides a resource to override the system-default Secure Sockets Layer/Transport Layer Security (SSL/TLS) certificate for Amazon RDS for new DB instances in the current AWS region. + +~> **NOTE:** Removing this Terraform resource removes the override. New DB instances will use the system-default certificate for the current AWS region. + +## Example Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { RdsCertificate } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new RdsCertificate(this, "example", { + certificate_identifier: "rds-ca-rsa4096-g1", + }); + } +} + +``` + +## Argument Reference + +The following arguments are required: + +* `certificate_identifier` - (Required) Certificate identifier. For example, `rds-ca-rsa4096-g1`. Refer to [AWS RDS (Relational Database) Certificate Identifier](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.CertificateIdentifier) for more information. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the RDS certificate override. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { RdsCertificate } from "./.gen/providers/aws/"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + RdsCertificate.generateConfigForImport(this, "example", "${default}"); + } +} + +``` + +Using `terraform import`, import the default EBS encryption state. For example: + +```console +% terraform import aws_rds_certificate.example default +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster.html.markdown index 691a1347693..2578d941fe1 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster.html.markdown @@ -18,6 +18,8 @@ Changes to an RDS Cluster can occur when you manually change a parameter, such a ~> **Note:** Multi-AZ DB clusters are supported only for the MySQL and PostgreSQL DB engines. +~> **Note:** `caCertificateIdentifier` is only supported for Multi-AZ DB clusters. + ~> **Note:** using `applyImmediately` can result in a brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][4] for more information. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. @@ -327,7 +329,7 @@ the AWS official documentation : * [create-db-cluster](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-cluster.html) * [modify-db-cluster](https://docs.aws.amazon.com/cli/latest/reference/rds/modify-db-cluster.html) -This argument supports the following arguments: +This resource supports the following arguments: * `allocatedStorage` - (Optional, Required for Multi-AZ DB cluster) The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. * `allowMajorVersionUpgrade` - (Optional) Enable to allow major engine version upgrades when changing engine versions. Defaults to `false`. @@ -338,6 +340,7 @@ This argument supports the following arguments: A maximum of 3 AZs can be configured. * `backtrackWindow` - (Optional) Target backtrack window, in seconds. Only available for `aurora` and `aurora-mysql` engines currently. To disable backtracking, set this value to `0`. Defaults to `0`. Must be between `0` and `259200` (72 hours) * `backupRetentionPeriod` - (Optional) Days to retain backups for. Default `1` +* `caCertificateIdentifier` - (Optional) The CA certificate identifier to use for the DB cluster's server certificate. * `clusterIdentifierPrefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `clusterIdentifier`. * `clusterIdentifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `copyTagsToSnapshot` – (Optional, boolean) Copy all Cluster `tags` to snapshots. Default is `false`. @@ -559,6 +562,8 @@ This resource exports the following attributes in addition to the arguments abov * `clusterMembers` – List of RDS Instances that are a part of this cluster * `availabilityZones` - Availability zone of the instance * `backupRetentionPeriod` - Backup retention period +* `caCertificateIdentifier` - CA identifier of the CA certificate used for the DB instance's server certificate +* `caCertificateValidTill` - Expiration date of the DB instance’s server certificate * `preferredBackupWindow` - Daily time range during which the backups happen * `preferredMaintenanceWindow` - Maintenance window * `endpoint` - DNS address of the RDS instance @@ -630,4 +635,4 @@ Using `terraform import`, import RDS Clusters using the `clusterIdentifier`. For % terraform import aws_rds_cluster.aurora_cluster aurora-prod-cluster ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_activity_stream.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_activity_stream.html.markdown index 43044ab78d5..9d6c9b055a9 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_activity_stream.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_activity_stream.html.markdown @@ -87,7 +87,7 @@ class MyConvertedCode extends TerraformStack { For more detailed documentation about each argument, refer to the [AWS official documentation][3]. -This argument supports the following arguments: +This resource supports the following arguments: * `resourceArn` - (Required, Forces new resources) The Amazon Resource Name (ARN) of the DB cluster. * `mode` - (Required, Forces new resources) Specifies the mode of the database activity stream. Database events such as a change or access generate an activity stream event. The database session can handle these events either synchronously or asynchronously. One of: `sync`, `async`. @@ -137,4 +137,4 @@ Using `terraform import`, import RDS Aurora Cluster Database Activity Streams us [2]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_StartActivityStream.html [3]: https://docs.aws.amazon.com/cli/latest/reference/rds/start-activity-stream.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_endpoint.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_endpoint.html.markdown index 28a99e622e5..20e3d8b4fe0 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_endpoint.html.markdown @@ -10,7 +10,7 @@ description: |- # Resource: aws_rds_cluster_endpoint -Manages an RDS Aurora Cluster Endpoint. +Manages an RDS Aurora Cluster Custom Endpoint. You can refer to the [User Guide][1]. ## Example Usage @@ -88,7 +88,7 @@ class MyConvertedCode extends TerraformStack { For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-cluster-endpoint.html). -This argument supports the following arguments: +This resource supports the following arguments: * `clusterIdentifier` - (Required, Forces new resources) The cluster identifier. * `clusterEndpointIdentifier` - (Required, Forces new resources) The identifier to use for the new endpoint. This parameter is stored as a lowercase string. @@ -140,4 +140,4 @@ Using `terraform import`, import RDS Clusters Endpoint using the `clusterEndpoin [1]: https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/Aurora.Overview.Endpoints.html#Aurora.Endpoints.Cluster - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/rds_cluster_instance.html.markdown b/website/docs/cdktf/typescript/r/rds_cluster_instance.html.markdown index f32fbff6b96..c498c4bead6 100644 --- a/website/docs/cdktf/typescript/r/rds_cluster_instance.html.markdown +++ b/website/docs/cdktf/typescript/r/rds_cluster_instance.html.markdown @@ -76,7 +76,7 @@ class MyConvertedCode extends TerraformStack { For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `applyImmediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. * `autoMinorVersionUpgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Default `true`. @@ -173,4 +173,4 @@ Using `terraform import`, import RDS Cluster Instances using the `identifier`. F % terraform import aws_rds_cluster_instance.prod_instance_1 aurora-cluster-instance-1 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule.html.markdown b/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule.html.markdown index 1e7518e6ed1..f484aa80674 100644 --- a/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/route53_resolver_firewall_rule.html.markdown @@ -72,8 +72,10 @@ This resource supports the following arguments: * `blockOverrideTtl` - (Required if `blockResponse` is `OVERRIDE`) The recommended amount of time, in seconds, for the DNS resolver or web browser to cache the provided override record. Minimum value of 0. Maximum value of 604800. * `blockResponse` - (Required if `action` is `BLOCK`) The way that you want DNS Firewall to block the request. Valid values: `NODATA`, `NXDOMAIN`, `OVERRIDE`. * `firewallDomainListId` - (Required) The ID of the domain list that you want to use in the rule. +* `firewallDomainRedirectionAction` - (Optional) Evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. Valid values are `INSPECT_REDIRECTION_DOMAIN` and `TRUST_REDIRECTION_DOMAIN`. Default value is `INSPECT_REDIRECTION_DOMAIN`. * `firewallRuleGroupId` - (Required) The unique identifier of the firewall rule group where you want to create the rule. * `priority` - (Required) The setting that determines the processing order of the rule in the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from the lowest setting. +* `qType` - (Optional) The query type you want the rule to evaluate. Additional details can be found [here](https://en.wikipedia.org/wiki/List_of_DNS_record_types) ## Attribute Reference @@ -113,4 +115,4 @@ Using `terraform import`, import Route 53 Resolver DNS Firewall rules using the % terraform import aws_route53_resolver_firewall_rule.example rslvr-frg-0123456789abcdef:rslvr-fdl-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53domains_delegation_signer_record.html.markdown b/website/docs/cdktf/typescript/r/route53domains_delegation_signer_record.html.markdown index b4b675d2c52..979a47467ef 100644 --- a/website/docs/cdktf/typescript/r/route53domains_delegation_signer_record.html.markdown +++ b/website/docs/cdktf/typescript/r/route53domains_delegation_signer_record.html.markdown @@ -132,7 +132,7 @@ class MyConvertedCode extends TerraformStack { ## Argument Reference -This argument supports the following arguments: +This resource supports the following arguments: * `domainName` - (Required) The name of the domain that will have its parent DNS zone updated with the Delegation Signer record. * `signingAttributes` - (Required) The information about a key, including the algorithm, public key-value, and flags. @@ -185,4 +185,4 @@ Using `terraform import`, import delegation signer records using the domain name % terraform import aws_route53domains_delegation_signer_record.example example.com,40DE3534F5324DBDAC598ACEDB5B1E26A5368732D9C791D1347E4FBDDF6FC343 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route53domains_registered_domain.html.markdown b/website/docs/cdktf/typescript/r/route53domains_registered_domain.html.markdown index 586bc31cde4..84153dc2f03 100644 --- a/website/docs/cdktf/typescript/r/route53domains_registered_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/route53domains_registered_domain.html.markdown @@ -53,7 +53,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** You must specify the same privacy setting for `adminPrivacy`, `registrantPrivacy` and `techPrivacy`. -This argument supports the following arguments: +This resource supports the following arguments: * `adminContact` - (Optional) Details about the domain administrative contact. See [Contact Blocks](#contact-blocks) for more details. * `adminPrivacy` - (Optional) Whether domain administrative contact information is concealed from WHOIS queries. Default: `true`. @@ -151,4 +151,4 @@ Using `terraform import`, import domains using the domain name. For example: % terraform import aws_route53domains_registered_domain.example example.com ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/route_table_association.html.markdown b/website/docs/cdktf/typescript/r/route_table_association.html.markdown index 21486015d1e..bb846b8237a 100644 --- a/website/docs/cdktf/typescript/r/route_table_association.html.markdown +++ b/website/docs/cdktf/typescript/r/route_table_association.html.markdown @@ -61,7 +61,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** Please note that one of either `subnetId` or `gatewayId` is required. -This argument supports the following arguments: +This resource supports the following arguments: * `subnetId` - (Optional) The subnet ID to create an association. Conflicts with `gatewayId`. * `gatewayId` - (Optional) The gateway ID to create an association. Conflicts with `subnetId`. @@ -149,4 +149,4 @@ With EC2 Internet Gateways: % terraform import aws_route_table_association.assoc igw-01b3a60780f8d034a/rtb-656c65616e6f72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/s3_bucket_object_lock_configuration.html.markdown b/website/docs/cdktf/typescript/r/s3_bucket_object_lock_configuration.html.markdown index affe0634d83..7f246854992 100644 --- a/website/docs/cdktf/typescript/r/s3_bucket_object_lock_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/s3_bucket_object_lock_configuration.html.markdown @@ -74,8 +74,8 @@ This resource supports the following arguments: * `expectedBucketOwner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `objectLockEnabled` - (Optional, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. Defaults to `Enabled`. Valid values: `Enabled`. * `rule` - (Optional) Configuration block for specifying the Object Lock rule for the specified object. [See below](#rule). -* `token` - (Optional) Token to allow Object Lock to be enabled for an existing bucket. You must contact AWS support for the bucket's "Object Lock token". -The token is generated in the back-end when [versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/manage-versioning-examples.html) is enabled on a bucket. For more details on versioning, see the [`aws_s3_bucket_versioning` resource](s3_bucket_versioning.html.markdown). +* `token` - (Optional,Deprecated) This argument is deprecated and no longer needed to enable Object Lock. +To enable Object Lock for an existing bucket, you must first enable versioning on the bucket and then enable Object Lock. For more details on versioning, see the [`aws_s3_bucket_versioning` resource](s3_bucket_versioning.html.markdown). ### rule @@ -146,4 +146,4 @@ If the owner (account ID) of the source bucket differs from the account used to % terraform import aws_s3_bucket_object_lock_configuration.example bucket-name,123456789012 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_domain.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_domain.html.markdown index 66e8ca0bb13..10d79ebb07f 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_domain.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_domain.html.markdown @@ -86,9 +86,11 @@ class MyConvertedCode extends TerraformStack { const example = new SagemakerAppImageConfig(this, "example", { appImageConfigName: "example", kernelGatewayImageConfig: { - kernelSpec: { - name: "example", - }, + kernelSpec: [ + { + name: "example", + }, + ], }, }); const awsSagemakerImageExample = new SagemakerImage(this, "example_1", { @@ -268,6 +270,7 @@ The following arguments are optional: * `defaultResourceSpec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [`defaultResourceSpec` Block](#default_resource_spec-block) below. * `lifecycleConfigArns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. +* `customImage` - (Optional) A list of custom SageMaker images that are configured to run as a CodeEditor app. see [`customImage` Block](#custom_image-block) below. ##### `codeRepository` Block @@ -364,4 +367,4 @@ Using `terraform import`, import SageMaker Domains using the `id`. For example: % terraform import aws_sagemaker_domain.test_domain d-8jgsjtilstu8 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_endpoint_configuration.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_endpoint_configuration.html.markdown index 2ef1d499adc..8933e13fd68 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_endpoint_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_endpoint_configuration.html.markdown @@ -66,6 +66,7 @@ This resource supports the following arguments: * `containerStartupHealthCheckTimeoutInSeconds` - (Optional) The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see [How Your Container Should Respond to Health Check (Ping) Requests](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html#your-algorithms-inference-algo-ping-requests). Valid values between `60` and `3600`. * `coreDumpConfig` - (Optional) Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. * `enableSsmAccess` - (Optional) You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. +* `inferenceAmiVersion` - (Optional) Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. * `initialInstanceCount` - (Optional) Initial number of instances used for auto-scaling. * `instanceType` - (Optional) The type of instance to start. * `initialVariantWeight` - (Optional) Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to `1.0`. @@ -102,7 +103,7 @@ This resource supports the following arguments: #### capture_options -* `captureMode` - (Required) Specifies the data to be captured. Should be one of `Input` or `Output`. +* `captureMode` - (Required) Specifies the data to be captured. Should be one of `Input`, `Output` or `InputAndOutput`. #### capture_content_type_header @@ -171,4 +172,4 @@ Using `terraform import`, import endpoint configurations using the `name`. For e % terraform import aws_sagemaker_endpoint_configuration.test_endpoint_config endpoint-config-foo ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_user_profile.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_user_profile.html.markdown index 44c9c3d87bc..a411429c9f3 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_user_profile.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_user_profile.html.markdown @@ -108,6 +108,7 @@ This resource supports the following arguments: * `defaultResourceSpec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default_resource_spec) below. * `lifecycleConfigArns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. +* `customImage` - (Optional) A list of custom SageMaker images that are configured to run as a CodeEditor app. see [Custom Image](#custom_image) below. #### r_session_app_settings @@ -242,4 +243,4 @@ Using `terraform import`, import SageMaker User Profiles using the `arn`. For ex % terraform import aws_sagemaker_user_profile.test_user_profile arn:aws:sagemaker:us-west-2:123456789012:user-profile/domain-id/profile-name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_workforce.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_workforce.html.markdown index 9406cc3ddaf..d4918864788 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_workforce.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_workforce.html.markdown @@ -122,12 +122,14 @@ This resource supports the following arguments: ### Oidc Config +* `authenticationRequestExtraParams` - (Optional) A string to string map of identifiers specific to the custom identity provider (IdP) being used. * `authorizationEndpoint` - (Required) The OIDC IdP authorization endpoint used to configure your private workforce. * `clientId` - (Required) The OIDC IdP client ID used to configure your private workforce. * `clientSecret` - (Required) The OIDC IdP client secret used to configure your private workforce. * `issuer` - (Required) The OIDC IdP issuer used to configure your private workforce. * `jwksUri` - (Required) The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. * `logoutEndpoint` - (Required) The OIDC IdP logout endpoint used to configure your private workforce. +* `scope` - (Optional) An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access. * `tokenEndpoint` - (Required) The OIDC IdP token endpoint used to configure your private workforce. * `userInfoEndpoint` - (Required) The OIDC IdP user information endpoint used to configure your private workforce. @@ -178,4 +180,4 @@ Using `terraform import`, import SageMaker Workforces using the `workforceName`. % terraform import aws_sagemaker_workforce.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sagemaker_workteam.html.markdown b/website/docs/cdktf/typescript/r/sagemaker_workteam.html.markdown index 23a266510b6..d1aee2214bb 100644 --- a/website/docs/cdktf/typescript/r/sagemaker_workteam.html.markdown +++ b/website/docs/cdktf/typescript/r/sagemaker_workteam.html.markdown @@ -89,6 +89,7 @@ This resource supports the following arguments: * `workteamName` - (Required) The name of the workforce. * `memberDefinition` - (Required) A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognitoMemberDefinition`. For workforces created using your own OIDC identity provider (IdP) use `oidcMemberDefinition`. Do not provide input for both of these parameters in a single request. see [Member Definition](#member-definition) details below. * `notificationConfiguration` - (Optional) Configures notification of workers regarding available or expiring work items. see [Notification Configuration](#notification-configuration) details below. +* `workerAccessConfiguration` - (Optional) Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL. see [Worker Access Configuration](#worker-access-configuration) details below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Member Definition @@ -110,6 +111,19 @@ This resource supports the following arguments: * `notificationTopicArn` - (Required) The ARN for the SNS topic to which notifications should be published. +### Worker Access Configuration + +* `s3Presign` - (Required) Defines any Amazon S3 resource constraints. see [S3 Presign](#s3-presign) details below. + +#### S3 Presign + +* `iamPolicyConstraints` - (Required) Use this parameter to specify the allowed request source. Possible sources are either SourceIp or VpcSourceIp. see [IAM Policy Constraints](#iam-policy-constraints) details below. + +##### IAM Policy Constraints + +* `sourceIp` - (Optional) When SourceIp is Enabled the worker's IP address when a task is rendered in the worker portal is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. This IP address is checked by Amazon S3 and must match in order for the Amazon S3 resource to be rendered in the worker portal. Valid values are `Enabled` or `Disabled` +* `vpcSourceIp` - (Optional) When VpcSourceIp is Enabled the worker's IP address when a task is rendered in private worker portal inside the VPC is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. To render the task successfully Amazon S3 checks that the presigned URL is being accessed over an Amazon S3 VPC Endpoint, and that the worker's IP address matches the IP address in the IAM policy. To learn more about configuring private worker portal, see [Use Amazon VPC mode from a private worker portal](https://docs.aws.amazon.com/sagemaker/latest/dg/samurai-vpc-worker-portal.html). Valid values are `Enabled` or `Disabled` + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: @@ -147,4 +161,4 @@ Using `terraform import`, import SageMaker Workteams using the `workteamName`. F % terraform import aws_sagemaker_workteam.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/service_discovery_service.html.markdown b/website/docs/cdktf/typescript/r/service_discovery_service.html.markdown index 6f214e16413..ff7f22df1e9 100644 --- a/website/docs/cdktf/typescript/r/service_discovery_service.html.markdown +++ b/website/docs/cdktf/typescript/r/service_discovery_service.html.markdown @@ -119,44 +119,44 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: -* `name` - (Required, ForceNew) The name of the service. +* `name` - (Required, Forces new resource) The name of the service. * `description` - (Optional) The description of the service. -* `dnsConfig` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. -* `healthCheckConfig` - (Optional) A complex type that contains settings for an optional health check. Only for Public DNS namespaces. -* `forceDestroy` - (Optional, Default:false ) A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. -* `healthCheckCustomConfig` - (Optional, ForceNew) A complex type that contains settings for ECS managed health checks. +* `dnsConfig` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dnsConfig` Block](#dns_config-block) for details. +* `healthCheckConfig` - (Optional) A complex type that contains settings for an optional health check. Only for Public DNS namespaces. See [`healthCheckConfig` Block](#health_check_config-block) for details. +* `forceDestroy` - (Optional) A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. Defaults to `false`. +* `healthCheckCustomConfig` - (Optional, Forces new resource) A complex type that contains settings for ECS managed health checks. See [`healthCheckCustomConfig` Block](#health_check_custom_config-block) for details. * `namespaceId` - (Optional) The ID of the namespace that you want to use to create the service. * `type` - (Optional) If present, specifies that the service instances are only discoverable using the `DiscoverInstances` API operation. No DNS records is registered for the service instances. The only valid value is `HTTP`. * `tags` - (Optional) A map of tags to assign to the service. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### dns_config +### `dnsConfig` Block -This argument supports the following arguments: +The `dnsConfig` configuration block supports the following arguments: -* `namespaceId` - (Required, ForceNew) The ID of the namespace to use for DNS configuration. -* `dnsRecords` - (Required) An array that contains one DnsRecord object for each resource record set. +* `namespaceId` - (Required, Forces new resource) The ID of the namespace to use for DNS configuration. +* `dnsRecords` - (Required) An array that contains one DnsRecord object for each resource record set. See [`dnsRecords` Block](#dns_records-block) for details. * `routingPolicy` - (Optional) The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED -#### dns_records +#### `dnsRecords` Block -This argument supports the following arguments: +The `dnsRecords` configuration block supports the following arguments: * `ttl` - (Required) The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. -* `type` - (Required, ForceNew) The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME +* `type` - (Required, Forces new resource) The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME -### health_check_config +### `healthCheckConfig` Block -This argument supports the following arguments: +The `healthCheckConfig` configuration block supports the following arguments: * `failureThreshold` - (Optional) The number of consecutive health checks. Maximum value of 10. * `resourcePath` - (Optional) The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. -* `type` - (Optional, ForceNew) The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP +* `type` - (Optional, Forces new resource) The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP -### health_check_custom_config +### `healthCheckCustomConfig` Block -This argument supports the following arguments: +The `healthCheckCustomConfig` configuration block supports the following arguments: -* `failureThreshold` - (Optional, ForceNew) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. +* `failureThreshold` - (Optional, Forces new resource) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. ## Attribute Reference @@ -198,4 +198,4 @@ Using `terraform import`, import Service Discovery Service using the service ID. % terraform import aws_service_discovery_service.example 0123456789 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_product.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_product.html.markdown index 14337a7e033..837a74e0fba 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_product.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_product.html.markdown @@ -55,7 +55,7 @@ The following arguments are required: * `name` - (Required) Name of the product. * `owner` - (Required) Owner of the product. -* `provisioningArtifactParameters` - (Required) Configuration block for provisioning artifact (i.e., version) parameters. Detailed below. +* `provisioningArtifactParameters` - (Required) Configuration block for provisioning artifact (i.e., version) parameters. See [`provisioningArtifactParameters` Block](#provisioning_artifact_parameters-block) for details. * `type` - (Required) Type of product. See [AWS Docs](https://docs.aws.amazon.com/servicecatalog/latest/dg/API_CreateProduct.html#API_CreateProduct_RequestSyntax) for valid list of values. The following arguments are optional: @@ -68,9 +68,9 @@ The following arguments are optional: * `supportUrl` - (Optional) Contact URL for product support. * `tags` - (Optional) Tags to apply to the product. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### provisioning_artifact_parameters +### `provisioningArtifactParameters` Block -This argument supports the following arguments: +The `provisioningArtifactParameters` configuration block supports the following arguments: * `description` - (Optional) Description of the provisioning artifact (i.e., version), including how it differs from the previous provisioning artifact. * `disableTemplateValidation` - (Optional) Whether AWS Service Catalog stops validating the specified provisioning artifact template even if it is invalid. @@ -131,4 +131,4 @@ Using `terraform import`, import `aws_servicecatalog_product` using the product % terraform import aws_servicecatalog_product.example prod-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/servicecatalog_provisioned_product.html.markdown b/website/docs/cdktf/typescript/r/servicecatalog_provisioned_product.html.markdown index d92070b7029..53eaa3a0942 100644 --- a/website/docs/cdktf/typescript/r/servicecatalog_provisioned_product.html.markdown +++ b/website/docs/cdktf/typescript/r/servicecatalog_provisioned_product.html.markdown @@ -72,24 +72,24 @@ The following arguments are optional: * `productName` - (Optional) Name of the product. You must provide `productId` or `productName`, but not both. * `provisioningArtifactId` - (Optional) Identifier of the provisioning artifact. For example, `pa-4abcdjnxjj6ne`. You must provide the `provisioningArtifactId` or `provisioningArtifactName`, but not both. * `provisioningArtifactName` - (Optional) Name of the provisioning artifact. You must provide the `provisioningArtifactId` or `provisioningArtifactName`, but not both. -* `provisioningParameters` - (Optional) Configuration block with parameters specified by the administrator that are required for provisioning the product. See details below. +* `provisioningParameters` - (Optional) Configuration block with parameters specified by the administrator that are required for provisioning the product. See [`provisioningParameters` Block](#provisioning_parameters-block) for details. * `retainPhysicalResources` - (Optional) _Only applies to deleting._ Whether to delete the Service Catalog provisioned product but leave the CloudFormation stack, stack set, or the underlying resources of the deleted provisioned product. The default value is `false`. -* `stackSetProvisioningPreferences` - (Optional) Configuration block with information about the provisioning preferences for a stack set. See details below. +* `stackSetProvisioningPreferences` - (Optional) Configuration block with information about the provisioning preferences for a stack set. See [`stackSetProvisioningPreferences` Block](#stack_set_provisioning_preferences-block) for details. * `tags` - (Optional) Tags to apply to the provisioned product. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### provisioning_parameters +### `provisioningParameters` Block -This argument supports the following arguments: +The `provisioningParameters` configuration block supports the following arguments: * `key` - (Required) Parameter key. * `usePreviousValue` - (Optional) Whether to ignore `value` and keep the previous parameter value. Ignored when initially provisioning a product. * `value` - (Optional) Parameter value. -### stack_set_provisioning_preferences +### `stackSetProvisioningPreferences` Block All of the `stackSetProvisioningPreferences` are only applicable to a `CFN_STACKSET` provisioned product type. -This argument supports the following arguments: +The `stackSetProvisioningPreferences` configuration block supports the following arguments: * `accounts` - (Optional) One or more AWS accounts that will have access to the provisioned product. The AWS accounts specified should be within the list of accounts in the STACKSET constraint. To get the list of accounts in the STACKSET constraint, use the `aws_servicecatalog_provisioning_parameters` data source. If no values are specified, the default value is all accounts from the STACKSET constraint. * `failureToleranceCount` - (Optional) Number of accounts, per region, for which this operation can fail before AWS Service Catalog stops the operation in that region. If the operation is stopped in a region, AWS Service Catalog doesn't attempt the operation in any subsequent regions. You must specify either `failureToleranceCount` or `failureTolerancePercentage`, but not both. The default value is 0 if no value is specified. @@ -168,4 +168,4 @@ Using `terraform import`, import `aws_servicecatalog_provisioned_product` using % terraform import aws_servicecatalog_provisioned_product.example pp-dnigbtea24ste ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/sesv2_configuration_set.html.markdown b/website/docs/cdktf/typescript/r/sesv2_configuration_set.html.markdown index 40e7f0e2dae..18ae995c0a4 100644 --- a/website/docs/cdktf/typescript/r/sesv2_configuration_set.html.markdown +++ b/website/docs/cdktf/typescript/r/sesv2_configuration_set.html.markdown @@ -56,51 +56,61 @@ class MyConvertedCode extends TerraformStack { This resource supports the following arguments: * `configurationSetName` - (Required) The name of the configuration set. -* `deliveryOptions` - (Optional) An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. -* `reputationOptions` - (Optional) An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. -* `sendingOptions` - (Optional) An object that defines whether or not Amazon SES can send email that you send using the configuration set. -* `suppressionOptions` - (Optional) An object that contains information about the suppression list preferences for your account. +* `deliveryOptions` - (Optional) An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. See [`deliveryOptions` Block](#delivery_options-block) for details. +* `reputationOptions` - (Optional) An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. See [`reputationOptions` Block](#reputation_options-block) for details. +* `sendingOptions` - (Optional) An object that defines whether or not Amazon SES can send email that you send using the configuration set. See [`sendingOptions` Block](#sending_options-block) for details. +* `suppressionOptions` - (Optional) An object that contains information about the suppression list preferences for your account. See [`suppressionOptions` Block](#suppression_options-block) for details. * `tags` - (Optional) A map of tags to assign to the service. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `trackingOptions` - (Optional) An object that defines the open and click tracking options for emails that you send using the configuration set. -* `vdmOptions` - (Optional) An object that defines the VDM settings that apply to emails that you send using the configuration set. +* `trackingOptions` - (Optional) An object that defines the open and click tracking options for emails that you send using the configuration set. See [`trackingOptions` Block](#tracking_options-block) for details. +* `vdmOptions` - (Optional) An object that defines the VDM settings that apply to emails that you send using the configuration set. See [`vdmOptions` Block](#vdm_options-block) for details. -### delivery_options +### `deliveryOptions` Block -This argument supports the following arguments: +The `deliveryOptions` configuration block supports the following arguments: * `sendingPoolName` - (Optional) The name of the dedicated IP pool to associate with the configuration set. * `tlsPolicy` - (Optional) Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Valid values: `REQUIRE`, `OPTIONAL`. -### reputation_options +### `reputationOptions` Block -This argument supports the following arguments: +The `reputationOptions` configuration block supports the following arguments: * `reputationMetricsEnabled` - (Optional) If `true`, tracking of reputation metrics is enabled for the configuration set. If `false`, tracking of reputation metrics is disabled for the configuration set. -### sending_options +### `sendingOptions` Block -This argument supports the following arguments: +The `sendingOptions` configuration block supports the following arguments: * `sendingEnabled` - (Optional) If `true`, email sending is enabled for the configuration set. If `false`, email sending is disabled for the configuration set. -### suppression_options +### `suppressionOptions` Block + +The `suppressionOptions` configuration block supports the following arguments: * `suppressedReasons` - (Optional) A list that contains the reasons that email addresses are automatically added to the suppression list for your account. Valid values: `BOUNCE`, `COMPLAINT`. -### tracking_options +### `trackingOptions` Block + +The `trackingOptions` configuration block supports the following arguments: * `customRedirectDomain` - (Required) The domain to use for tracking open and click events. -### vdm_options +### `vdmOptions` Block + +The `vdmOptions` configuration block supports the following arguments: -* `dashboardOptions` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Dashboard. -* `guardianOptions` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Guardian. +* `dashboardOptions` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Dashboard. See [`dashboardOptions` Block](#dashboard_options-block) for details. +* `guardianOptions` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Guardian. See [`guardianOptions` Block](#guardian_options-block) for details. -### dashboard_options +### `dashboardOptions` Block + +The `dashboardOptions` configuration block supports the following arguments: * `engagementMetrics` - (Optional) Specifies the status of your VDM engagement metrics collection. Valid values: `ENABLED`, `DISABLED`. -### guardian_options +### `guardianOptions` Block + +The `guardianOptions` configuration block supports the following arguments: * `optimizedSharedDelivery` - (Optional) Specifies the status of your VDM optimized shared delivery. Valid values: `ENABLED`, `DISABLED`. @@ -140,4 +150,4 @@ Using `terraform import`, import SESv2 (Simple Email V2) Configuration Set using % terraform import aws_sesv2_configuration_set.example example ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssm_association.html.markdown b/website/docs/cdktf/typescript/r/ssm_association.html.markdown index eb022ed851a..8406ad7b5f0 100644 --- a/website/docs/cdktf/typescript/r/ssm_association.html.markdown +++ b/website/docs/cdktf/typescript/r/ssm_association.html.markdown @@ -150,6 +150,7 @@ This resource supports the following arguments: * `parameters` - (Optional) A block of arbitrary string parameters to pass to the SSM document. * `scheduleExpression` - (Optional) A [cron or rate expression](https://docs.aws.amazon.com/systems-manager/latest/userguide/reference-cron-and-rate-expressions.html) that specifies when the association runs. * `syncCompliance` - (Optional) The mode for generating association compliance. You can specify `AUTO` or `MANUAL`. +* `tags` - (Optional) A map of tags to assign to the object. If configured with a provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `targets` - (Optional) A block containing the targets of the SSM association. Targets are documented below. AWS currently supports a maximum of 5 targets. * `waitForSuccessTimeoutSeconds` - (Optional) The number of seconds to wait for the association status to be `Success`. If `Success` status is not reached within the given time, create opration will fail. @@ -173,6 +174,7 @@ This resource exports the following attributes in addition to the arguments abov * `instanceId` - The instance id that the SSM document was applied to. * `name` - The name of the SSM document to apply. * `parameters` - Additional parameters passed to the SSM document. +* `tagsAll` - A map of tags assigned to the resource, including those inherited from the provider [`defaultTags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import @@ -206,4 +208,4 @@ Using `terraform import`, import SSM associations using the `associationId`. For % terraform import aws_ssm_association.test-association 10abcdef-0abc-1234-5678-90abcdef123456 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_account_assignment.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_account_assignment.html.markdown index af59681bbc6..76f478bd008 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_account_assignment.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_account_assignment.html.markdown @@ -101,28 +101,26 @@ import { SsoadminPermissionSet } from "./.gen/providers/aws/ssoadmin-permission- class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - const example = new IdentitystoreGroup(this, "example", { - description: "Admin Group", - displayName: "Admin", - identityStoreId: Token.asString( - Fn.lookupNested(Fn.tolist(ssoInstance.identityStoreIds), ["0"]) - ), - }); - const dataAwsSsoadminInstancesExample = new DataAwsSsoadminInstances( + const example = new DataAwsSsoadminInstances(this, "example", {}); + const awsIdentitystoreGroupExample = new IdentitystoreGroup( this, "example_1", - {} + { + description: "Admin Group", + displayName: "Admin", + identityStoreId: Token.asString( + Fn.lookupNested(Fn.tolist(example.identityStoreIds), ["0"]) + ), + } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ - dataAwsSsoadminInstancesExample.overrideLogicalId("example"); + awsIdentitystoreGroupExample.overrideLogicalId("example"); const awsSsoadminPermissionSetExample = new SsoadminPermissionSet( this, "example_2", { instanceArn: Token.asString( - Fn.lookupNested(Fn.tolist(dataAwsSsoadminInstancesExample.arns), [ - "0", - ]) + Fn.lookupNested(Fn.tolist(example.arns), ["0"]) ), name: "Example", } @@ -131,10 +129,10 @@ class MyConvertedCode extends TerraformStack { awsSsoadminPermissionSetExample.overrideLogicalId("example"); new SsoadminAccountAssignment(this, "account_assignment", { instanceArn: Token.asString( - Fn.lookupNested(Fn.tolist(dataAwsSsoadminInstancesExample.arns), ["0"]) + Fn.lookupNested(Fn.tolist(example.arns), ["0"]) ), permissionSetArn: Token.asString(awsSsoadminPermissionSetExample.arn), - principalId: example.groupId, + principalId: Token.asString(awsIdentitystoreGroupExample.groupId), principalType: "GROUP", targetId: "123456789012", targetType: "AWS_ACCOUNT", @@ -143,9 +141,7 @@ class MyConvertedCode extends TerraformStack { new SsoadminManagedPolicyAttachment(this, "example_4", { dependsOn: [awsSsoadminAccountAssignmentExample], instanceArn: Token.asString( - Fn.lookupNested(Fn.tolist(dataAwsSsoadminInstancesExample.arns), [ - "0", - ]) + Fn.lookupNested(Fn.tolist(example.arns), ["0"]) ), managedPolicyArn: "arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup", permissionSetArn: Token.asString(awsSsoadminPermissionSetExample.arn), @@ -213,4 +209,4 @@ Using `terraform import`, import SSO Account Assignments using the `principalId` % terraform import aws_ssoadmin_account_assignment.example f81d4fae-7dec-11d0-a765-00a0c91e6bf6,GROUP,1234567890,AWS_ACCOUNT,arn:aws:sso:::permissionSet/ssoins-0123456789abcdef/ps-0123456789abcdef,arn:aws:sso:::instance/ssoins-0123456789abcdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/ssoadmin_managed_policy_attachment.html.markdown b/website/docs/cdktf/typescript/r/ssoadmin_managed_policy_attachment.html.markdown index 99cb334a686..a282d7900a8 100644 --- a/website/docs/cdktf/typescript/r/ssoadmin_managed_policy_attachment.html.markdown +++ b/website/docs/cdktf/typescript/r/ssoadmin_managed_policy_attachment.html.markdown @@ -80,28 +80,26 @@ import { SsoadminPermissionSet } from "./.gen/providers/aws/ssoadmin-permission- class MyConvertedCode extends TerraformStack { constructor(scope: Construct, name: string) { super(scope, name); - const example = new IdentitystoreGroup(this, "example", { - description: "Admin Group", - displayName: "Admin", - identityStoreId: Token.asString( - Fn.lookupNested(Fn.tolist(ssoInstance.identityStoreIds), ["0"]) - ), - }); - const dataAwsSsoadminInstancesExample = new DataAwsSsoadminInstances( + const example = new DataAwsSsoadminInstances(this, "example", {}); + const awsIdentitystoreGroupExample = new IdentitystoreGroup( this, "example_1", - {} + { + description: "Admin Group", + displayName: "Admin", + identityStoreId: Token.asString( + Fn.lookupNested(Fn.tolist(example.identityStoreIds), ["0"]) + ), + } ); /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ - dataAwsSsoadminInstancesExample.overrideLogicalId("example"); + awsIdentitystoreGroupExample.overrideLogicalId("example"); const awsSsoadminPermissionSetExample = new SsoadminPermissionSet( this, "example_2", { instanceArn: Token.asString( - Fn.lookupNested(Fn.tolist(dataAwsSsoadminInstancesExample.arns), [ - "0", - ]) + Fn.lookupNested(Fn.tolist(example.arns), ["0"]) ), name: "Example", } @@ -110,10 +108,10 @@ class MyConvertedCode extends TerraformStack { awsSsoadminPermissionSetExample.overrideLogicalId("example"); new SsoadminAccountAssignment(this, "account_assignment", { instanceArn: Token.asString( - Fn.lookupNested(Fn.tolist(dataAwsSsoadminInstancesExample.arns), ["0"]) + Fn.lookupNested(Fn.tolist(example.arns), ["0"]) ), permissionSetArn: Token.asString(awsSsoadminPermissionSetExample.arn), - principalId: example.groupId, + principalId: Token.asString(awsIdentitystoreGroupExample.groupId), principalType: "GROUP", targetId: "123456789012", targetType: "AWS_ACCOUNT", @@ -122,9 +120,7 @@ class MyConvertedCode extends TerraformStack { new SsoadminManagedPolicyAttachment(this, "example_4", { dependsOn: [awsSsoadminAccountAssignmentExample], instanceArn: Token.asString( - Fn.lookupNested(Fn.tolist(dataAwsSsoadminInstancesExample.arns), [ - "0", - ]) + Fn.lookupNested(Fn.tolist(example.arns), ["0"]) ), managedPolicyArn: "arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup", permissionSetArn: Token.asString(awsSsoadminPermissionSetExample.arn), @@ -190,4 +186,4 @@ Using `terraform import`, import SSO Managed Policy Attachments using the `manag % terraform import aws_ssoadmin_managed_policy_attachment.example arn:aws:iam::aws:policy/AlexaForBusinessDeviceSetup,arn:aws:sso:::permissionSet/ssoins-2938j0x8920sbj72/ps-80383020jr9302rk,arn:aws:sso:::instance/ssoins-2938j0x8920sbj72 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/storagegateway_gateway.html.markdown b/website/docs/cdktf/typescript/r/storagegateway_gateway.html.markdown index 77c992ddecd..4b01000bbde 100644 --- a/website/docs/cdktf/typescript/r/storagegateway_gateway.html.markdown +++ b/website/docs/cdktf/typescript/r/storagegateway_gateway.html.markdown @@ -191,7 +191,7 @@ class MyConvertedCode extends TerraformStack { ~> **NOTE:** One of `activationKey` or `gatewayIpAddress` must be provided for resource creation (gateway activation). Neither is required for resource import. If using `gatewayIpAddress`, Terraform must be able to make an HTTP (port 80) GET request to the specified IP address from where it is running. -This argument supports the following arguments: +This resource supports the following arguments: * `gatewayName` - (Required) Name of the gateway. * `gatewayTimezone` - (Required) Time zone for the gateway. The time zone is of the format "GMT", "GMT-hr:mm", or "GMT+hr:mm". For example, `GMT-4:00` indicates the time is 4 hours behind GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule. @@ -321,4 +321,4 @@ class MyConvertedCode extends TerraformStack { ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/verifiedpermissions_identity_source.html.markdown b/website/docs/cdktf/typescript/r/verifiedpermissions_identity_source.html.markdown new file mode 100644 index 00000000000..bc241c17fbb --- /dev/null +++ b/website/docs/cdktf/typescript/r/verifiedpermissions_identity_source.html.markdown @@ -0,0 +1,227 @@ +--- +subcategory: "Verified Permissions" +layout: "aws" +page_title: "AWS: aws_verifiedpermissions_identity_source" +description: |- + Terraform resource for managing an AWS Verified Permissions Identity Source. +--- + + + +# Resource: aws_verifiedpermissions_identity_source + +Terraform resource for managing an AWS Verified Permissions Identity Source. + +## Example Usage + +### Cognito User Pool Configuration Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { CognitoUserPool } from "./.gen/providers/aws/cognito-user-pool"; +import { CognitoUserPoolClient } from "./.gen/providers/aws/cognito-user-pool-client"; +import { VerifiedpermissionsIdentitySource } from "./.gen/providers/aws/verifiedpermissions-identity-source"; +import { VerifiedpermissionsPolicyStore } from "./.gen/providers/aws/verifiedpermissions-policy-store"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new CognitoUserPool(this, "example", { + name: "example", + }); + const awsCognitoUserPoolClientExample = new CognitoUserPoolClient( + this, + "example_1", + { + explicitAuthFlows: ["ADMIN_NO_SRP_AUTH"], + name: "example", + userPoolId: example.id, + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsCognitoUserPoolClientExample.overrideLogicalId("example"); + const awsVerifiedpermissionsPolicyStoreExample = + new VerifiedpermissionsPolicyStore(this, "example_2", { + validationSettings: [ + { + mode: "STRICT", + }, + ], + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVerifiedpermissionsPolicyStoreExample.overrideLogicalId("example"); + const awsVerifiedpermissionsIdentitySourceExample = + new VerifiedpermissionsIdentitySource(this, "example_3", { + configuration: [ + { + cognitoUserPoolConfiguration: [ + { + clientIds: [Token.asString(awsCognitoUserPoolClientExample.id)], + userPoolArn: example.arn, + }, + ], + }, + ], + policyStoreId: Token.asString( + awsVerifiedpermissionsPolicyStoreExample.id + ), + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVerifiedpermissionsIdentitySourceExample.overrideLogicalId("example"); + } +} + +``` + +### OpenID Connect Configuration Usage + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { VerifiedpermissionsIdentitySource } from "./.gen/providers/aws/verifiedpermissions-identity-source"; +import { VerifiedpermissionsPolicyStore } from "./.gen/providers/aws/verifiedpermissions-policy-store"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new VerifiedpermissionsPolicyStore(this, "example", { + validationSettings: [ + { + mode: "STRICT", + }, + ], + }); + const awsVerifiedpermissionsIdentitySourceExample = + new VerifiedpermissionsIdentitySource(this, "example_1", { + configuration: [ + { + openIdConnectConfiguration: [ + { + entityIdPrefix: "MyOIDCProvider", + groupConfiguration: [ + { + groupClaim: "groups", + groupEntityType: "MyCorp::UserGroup", + }, + ], + issuer: "https://auth.example.com", + tokenSelection: [ + { + accessTokenOnly: [ + { + audiences: ["https://myapp.example.com"], + principalIdClaim: "sub", + }, + ], + }, + ], + }, + ], + }, + ], + policyStoreId: example.id, + principalEntityType: "MyCorp::User", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsVerifiedpermissionsIdentitySourceExample.overrideLogicalId("example"); + } +} + +``` + +## Argument Reference + +* `policyStoreId` - (Required) Specifies the ID of the policy store in which you want to store this identity source. +* `configuration`- (Required) Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. See [Configuration](#configuration) below. +* `principalEntityType`- (Optional) Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source. + +### Configuration + +* `cognitoUserPoolConfiguration` - (Required) Specifies the configuration details of an Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. See [Cognito User Pool Configuration](#cognito-user-pool-configuration) below. +* `openIdConnectConfiguration` - (Required) Specifies the configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. See [Open ID Connect Configuration](#open-id-connect-configuration) below. + +#### Cognito User Pool Configuration + +* `userPoolArn` - (Required) The Amazon Resource Name (ARN) of the Amazon Cognito user pool that contains the identities to be authorized. +* `clientIds` - (Optional) The unique application client IDs that are associated with the specified Amazon Cognito user pool. +* `groupConfiguration` - (Optional) The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. See [Group Configuration](#group-configuration) below. + +#### Group Configuration + +* `groupEntityType` - (Required) The name of the schema entity type that's mapped to the user pool group. Defaults to `AWS::CognitoGroup`. + +#### Open ID Connect Configuration + +* `issuer` - (Required) The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path `.well-known/openid-configuration`. +* `tokenSelection` - (Required) The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source. See [Token Selection](#token-selection) below. +* `entityIdPrefix` - (Optional) A descriptive string that you want to prefix to user entities from your OIDC identity provider. +* `groupConfiguration` - (Optional) The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. See [Group Configuration](#open-id-group-configuration) below. + +#### Token Selection + +* `accessTokenOnly` - (Optional) The OIDC configuration for processing access tokens. See [Access Token Only](#access-token-only) below. +* `identityTokenOnly` - (Optional) The OIDC configuration for processing identity (ID) tokens. See [Identity Token Only](#identity-token-only) below. + +#### Access Token Only + +* `audiences` - (Optional) The access token aud claim values that you want to accept in your policy store. +* `principalIdClaim` - (Optional) The claim that determines the principal in OIDC access tokens. + +#### Identity Token Only + +* `clientIds` - (Optional) The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. +* `groupEntityType` - (Optional) The claim that determines the principal in OIDC access tokens. + +#### Open ID Group Configuration + +* `groupClaim` - (Required) The token claim that you want Verified Permissions to interpret as group membership. For example, `groups`. +* `groupEntityType` - (Required) The policy store entity type that you want to map your users' group claim to. For example, `MyCorp::UserGroup`. A group entity type is an entity that can have a user entity type as a member. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `policyId` - The Policy ID of the policy. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Verified Permissions Identity Source using the `policy_store_id:identity_source_id`. For example: + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { VerifiedpermissionsIdentitySource } from "./.gen/providers/aws/verifiedpermissions-identity-source"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + VerifiedpermissionsIdentitySource.generateConfigForImport( + this, + "example", + "policy-store-id-12345678:identity-source-id-12345678" + ); + } +} + +``` + +Using `terraform import`, import Verified Permissions Identity Source using the `policy_store_id:identity_source_id`. For example: + +```console +% terraform import aws_verifiedpermissions_identity_source.example policy-store-id-12345678:identity-source-id-12345678 +``` + + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_endpoint.html.markdown b/website/docs/cdktf/typescript/r/vpc_endpoint.html.markdown index 6de5a830f0e..21034d39398 100644 --- a/website/docs/cdktf/typescript/r/vpc_endpoint.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_endpoint.html.markdown @@ -97,6 +97,41 @@ class MyConvertedCode extends TerraformStack { ``` +### Interface Endpoint Type with User-Defined IP Address + +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { VpcEndpoint } from "./.gen/providers/aws/vpc-endpoint"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + new VpcEndpoint(this, "ec2", { + serviceName: "com.amazonaws.us-west-2.ec2", + subnetConfiguration: [ + { + ipv4: "10.0.1.10", + subnetId: example1.id, + }, + { + ipv4: "10.0.2.10", + subnetId: example2.id, + }, + ], + subnetIds: [example1.id, example2.id], + vpcEndpointType: "Interface", + vpcId: example.id, + }); + } +} + +``` + ### Gateway Load Balancer Endpoint Type ```typescript @@ -198,6 +233,7 @@ Defaults to `false`. * `dnsOptions` - (Optional) The DNS options for the endpoint. See dns_options below. * `ipAddressType` - (Optional) The IP address type for the endpoint. Valid values are `ipv4`, `dualstack`, and `ipv6`. * `routeTableIds` - (Optional) One or more route table IDs. Applicable for endpoints of type `Gateway`. +* `subnetConfiguration` - (Optional) Subnet configuration for the endpoint, used to select specific IPv4 and/or IPv6 addresses to the endpoint. See subnet_configuration below. * `subnetIds` - (Optional) The ID of one or more subnets in which to create a network interface for the endpoint. Applicable for endpoints of type `GatewayLoadBalancer` and `Interface`. Interface type endpoints cannot function without being assigned to a subnet. * `securityGroupIds` - (Optional) The ID of one or more security groups to associate with the network interface. Applicable for endpoints of type `Interface`. If no security groups are specified, the VPC's [default security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html#DefaultSecurityGroup) is associated with the endpoint. @@ -209,6 +245,12 @@ If no security groups are specified, the VPC's [default security group](https:// * `dnsRecordIpType` - (Optional) The DNS records created for the endpoint. Valid values are `ipv4`, `dualstack`, `service-defined`, and `ipv6`. * `privateDnsOnlyForInboundResolverEndpoint` - (Optional) Indicates whether to enable private DNS only for inbound endpoints. This option is available only for services that support both gateway and interface endpoints. It routes traffic that originates from the VPC to the gateway endpoint and traffic that originates from on-premises to the interface endpoint. Default is `false`. Can only be specified if private_dns_enabled is `true`. +### subnet_configuration + +* `ipv4` - (Optional) The IPv4 address to assign to the endpoint network interface in the subnet. You must provide an IPv4 address if the VPC endpoint supports IPv4. +* `ipv6` - (Optional) The IPv6 address to assign to the endpoint network interface in the subnet. You must provide an IPv6 address if the VPC endpoint supports IPv6. +* `subnet` - (Optional) The ID of the subnet. Must have a corresponding subnet in the `subnetIds` argument. + ## Timeouts [Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): @@ -265,4 +307,4 @@ Using `terraform import`, import VPC Endpoints using the VPC endpoint `id`. For % terraform import aws_vpc_endpoint.endpoint1 vpce-3ecf2a57 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_peering_connection.html.markdown b/website/docs/cdktf/typescript/r/vpc_peering_connection.html.markdown index 9836c5e336b..5e6d1e85e79 100644 --- a/website/docs/cdktf/typescript/r/vpc_peering_connection.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_peering_connection.html.markdown @@ -160,7 +160,7 @@ can be done using the [`autoAccept`](vpc_peering_connection.html#auto_accept) at Connection has to be made active manually using other means. See [notes](vpc_peering_connection.html#notes) below for more information. -This argument supports the following arguments: +This resource supports the following arguments: * `peerOwnerId` - (Optional) The AWS account ID of the target peer VPC. Defaults to the account ID the [AWS provider][1] is currently connected to, so must be managed if connecting cross-account. @@ -239,4 +239,4 @@ Using `terraform import`, import VPC Peering resources using the VPC peering `id [1]: /docs/providers/aws/index.html - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/vpc_security_group_egress_rule.html.markdown b/website/docs/cdktf/typescript/r/vpc_security_group_egress_rule.html.markdown index 1a0f97a95aa..d89dbbfe417 100644 --- a/website/docs/cdktf/typescript/r/vpc_security_group_egress_rule.html.markdown +++ b/website/docs/cdktf/typescript/r/vpc_security_group_egress_rule.html.markdown @@ -49,7 +49,7 @@ class MyConvertedCode extends TerraformStack { ~> **Note** Although `cidrIpv4`, `cidrIpv6`, `prefixListId`, and `referencedSecurityGroupId` are all marked as optional, you *must* provide one of them in order to configure the destination of the traffic. The `fromPort` and `toPort` arguments are required unless `ipProtocol` is set to `-1` or `icmpv6`. -This argument supports the following arguments: +This resource supports the following arguments: * `cidrIpv4` - (Optional) The destination IPv4 CIDR range. * `cidrIpv6` - (Optional) The destination IPv6 CIDR range. @@ -102,4 +102,4 @@ Using `terraform import`, import security group egress rules using the `security % terraform import aws_vpc_security_group_egress_rule.example sgr-02108b27edd666983 ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_web_acl.html.markdown b/website/docs/cdktf/typescript/r/wafv2_web_acl.html.markdown index 8078acb1802..ac0ccd0c599 100644 --- a/website/docs/cdktf/typescript/r/wafv2_web_acl.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_web_acl.html.markdown @@ -991,7 +991,7 @@ Inspect a single header. Provide the name of the header to inspect, for example, The `singleHeader` block supports the following arguments: -* `name` - (Optional) Name of the query header to inspect. This setting must be provided as lower case characters. +* `name` - (Required) Name of the query header to inspect. This setting must be provided as lower case characters. ### `singleQueryArgument` Block @@ -999,7 +999,7 @@ Inspect a single query argument. Provide the name of the query argument to inspe The `singleQueryArgument` block supports the following arguments: -* `name` - (Optional) Name of the query header to inspect. This setting must be provided as lower case characters. +* `name` - (Required) Name of the query header to inspect. This setting must be provided as lower case characters. ### `body` Block @@ -1217,4 +1217,4 @@ Using `terraform import`, import WAFv2 Web ACLs using `ID/Name/Scope`. For examp % terraform import aws_wafv2_web_acl.example a1b2c3d4-d5f6-7777-8888-9999aaaabbbbcccc/example/REGIONAL ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_web_acl_association.html.markdown b/website/docs/cdktf/typescript/r/wafv2_web_acl_association.html.markdown index c265f7f1b13..305468e2955 100644 --- a/website/docs/cdktf/typescript/r/wafv2_web_acl_association.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_web_acl_association.html.markdown @@ -19,68 +19,97 @@ Creates a WAFv2 Web ACL Association. ## Example Usage -```terraform -resource "aws_api_gateway_rest_api" "example" { - body = jsonencode({ - openapi = "3.0.1" - info = { - title = "example" - version = "1.0" - } - paths = { - "/path1" = { - get = { - x-amazon-apigateway-integration = { - httpMethod = "GET" - payloadFormatVersion = "1.0" - type = "HTTP_PROXY" - uri = "https://ip-ranges.amazonaws.com/ip-ranges.json" - } - } +```typescript +// DO NOT EDIT. Code generated by 'cdktf convert' - Please report bugs at https://cdk.tf/bug +import { Construct } from "constructs"; +import { Fn, Token, TerraformStack } from "cdktf"; +/* + * Provider bindings are generated by running `cdktf get`. + * See https://cdk.tf/provider-generation for more details. + */ +import { ApiGatewayDeployment } from "./.gen/providers/aws/api-gateway-deployment"; +import { ApiGatewayRestApi } from "./.gen/providers/aws/api-gateway-rest-api"; +import { ApiGatewayStage } from "./.gen/providers/aws/api-gateway-stage"; +import { Wafv2WebAcl } from "./.gen/providers/aws/wafv2-web-acl"; +import { Wafv2WebAclAssociation } from "./.gen/providers/aws/wafv2-web-acl-association"; +class MyConvertedCode extends TerraformStack { + constructor(scope: Construct, name: string) { + super(scope, name); + const example = new ApiGatewayRestApi(this, "example", { + body: Token.asString( + Fn.jsonencode({ + info: { + title: "example", + version: "1.0", + }, + openapi: "3.0.1", + paths: { + "/path1": { + get: { + "x-amazon-apigateway-integration": { + httpMethod: "GET", + payloadFormatVersion: "1.0", + type: "HTTP_PROXY", + uri: "https://ip-ranges.amazonaws.com/ip-ranges.json", + }, + }, + }, + }, + }) + ), + name: "example", + }); + const awsWafv2WebAclExample = new Wafv2WebAcl(this, "example_1", { + defaultAction: { + allow: {}, + }, + name: "web-acl-association-example", + scope: "REGIONAL", + visibilityConfig: { + cloudwatchMetricsEnabled: false, + metricName: "friendly-metric-name", + sampledRequestsEnabled: false, + }, + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWafv2WebAclExample.overrideLogicalId("example"); + const awsApiGatewayDeploymentExample = new ApiGatewayDeployment( + this, + "example_2", + { + lifecycle: { + createBeforeDestroy: true, + }, + restApiId: example.id, + triggers: { + redeployment: Token.asString( + Fn.sha1(Token.asString(Fn.jsonencode(example.body))) + ), + }, } - } - }) - - name = "example" -} - -resource "aws_api_gateway_deployment" "example" { - rest_api_id = aws_api_gateway_rest_api.example.id - - triggers = { - redeployment = sha1(jsonencode(aws_api_gateway_rest_api.example.body)) - } - - lifecycle { - create_before_destroy = true - } -} - -resource "aws_api_gateway_stage" "example" { - deployment_id = aws_api_gateway_deployment.example.id - rest_api_id = aws_api_gateway_rest_api.example.id - stage_name = "example" -} - -resource "aws_wafv2_web_acl" "example" { - name = "web-acl-association-example" - scope = "REGIONAL" - - default_action { - allow {} - } - - visibility_config { - cloudwatch_metrics_enabled = false - metric_name = "friendly-metric-name" - sampled_requests_enabled = false + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsApiGatewayDeploymentExample.overrideLogicalId("example"); + const awsApiGatewayStageExample = new ApiGatewayStage(this, "example_3", { + deploymentId: Token.asString(awsApiGatewayDeploymentExample.id), + restApiId: example.id, + stageName: "example", + }); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsApiGatewayStageExample.overrideLogicalId("example"); + const awsWafv2WebAclAssociationExample = new Wafv2WebAclAssociation( + this, + "example_4", + { + resourceArn: Token.asString(awsApiGatewayStageExample.arn), + webAclArn: Token.asString(awsWafv2WebAclExample.arn), + } + ); + /*This allows the Terraform resource name to match the original name. You can remove the call if you don't need them to match.*/ + awsWafv2WebAclAssociationExample.overrideLogicalId("example"); } } -resource "aws_wafv2_web_acl_association" "example" { - resource_arn = aws_api_gateway_stage.example.arn - web_acl_arn = aws_wafv2_web_acl.example.arn -} ``` ## Argument Reference @@ -132,4 +161,4 @@ Using `terraform import`, import WAFv2 Web ACL Association using `WEB_ACL_ARN,RE % terraform import aws_wafv2_web_acl_association.example arn:aws:wafv2:...7ce849ea,arn:aws:apigateway:...ages/name ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/cdktf/typescript/r/wafv2_web_acl_logging_configuration.html.markdown b/website/docs/cdktf/typescript/r/wafv2_web_acl_logging_configuration.html.markdown index d963cfb8397..cf5ec396d6c 100644 --- a/website/docs/cdktf/typescript/r/wafv2_web_acl_logging_configuration.html.markdown +++ b/website/docs/cdktf/typescript/r/wafv2_web_acl_logging_configuration.html.markdown @@ -253,7 +253,7 @@ To redact a single header, provide the name of the header to be redacted. For ex The `singleHeader` block supports the following arguments: -* `name` - (Optional) Name of the query header to redact. This setting must be provided in lowercase characters. +* `name` - (Required) Name of the query header to redact. This setting must be provided in lowercase characters. ## Attribute Reference @@ -293,4 +293,4 @@ Using `terraform import`, import WAFv2 Web ACL Logging Configurations using the % terraform import aws_wafv2_web_acl_logging_configuration.example arn:aws:wafv2:us-west-2:123456789012:regional/webacl/test-logs/a1b2c3d4-5678-90ab-cdef ``` - \ No newline at end of file + \ No newline at end of file diff --git a/website/docs/d/apigatewayv2_api.html.markdown b/website/docs/d/apigatewayv2_api.html.markdown index 7629375d4e1..07420ec5bc8 100644 --- a/website/docs/d/apigatewayv2_api.html.markdown +++ b/website/docs/d/apigatewayv2_api.html.markdown @@ -23,7 +23,7 @@ data "aws_apigatewayv2_api" "example" { The arguments of this data source act as filters for querying the available APIs in the current region. The given filters must match exactly one API whose data will be exported as attributes. -This argument supports the following arguments: +This data source supports the following arguments: * `api_id` - (Required) API identifier. diff --git a/website/docs/d/appstream_image.html.markdown b/website/docs/d/appstream_image.html.markdown new file mode 100644 index 00000000000..39049797f03 --- /dev/null +++ b/website/docs/d/appstream_image.html.markdown @@ -0,0 +1,74 @@ +--- +subcategory: "AppStream 2.0" +layout: "aws" +page_title: "AWS: aws_appstream_image" +description: |- + Terraform data source for describing an AWS AppStream 2.0 Appstream Image. +--- + +# Data Source: aws_appstream_image + +Terraform data source for managing an AWS AppStream 2.0 Image. + +### Basic Usage + +```terraform +data "aws_appstream_image" "test" { + name = "AppStream-WinServer2019-06-17-2024" + type = "PUBLIC" + most_recent = true +} + +``` + +## Argument Reference + +The following arguments are optional: + +* `name` - Name of the image being searched for. Cannot be used with name_regex or arn. +* `name_regex` - Regular expression name of the image being searched for. Cannot be used with arn or name. +* `arn` - Arn of the image being searched for. Cannot be used with name_regex or name. +* `type` - The type of image which must be (PUBLIC, PRIVATE, or SHARED). +* `most_recent` - Boolean that if it is set to true and there are multiple images returned the most recent will be returned. If it is set to false and there are multiple images return the datasource will error. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `application` - A application object that contains the following: + * `app_block_arn` - The app block ARN of the application. + * `created_time` - The time at which the application was created within the app block. + * `description` - The description of the application. + * `display_name` - The application name to display. + * `enabled` - Bool based on if the application is enabled. + * `icon_s3_location` - A list named icon_s3_location that contains the following: + * `s3_bucket` - S3 bucket of the S3 object. + * `s3_key` - S3 key of the S3 object. + * `icon_url` - URL of the application icon. This URL may be time-limited. + * `instance_families` - List of the instance families of the application. + * `launch_parameters` - Arguments that are passed to the application at it's launch. + * `launch_path` - Path to the application's excecutable in the instance. + * `metadata` - String to string map that contains additional attributes used to describe the application. + * `Name` - Name of the application. + * `platforms` - Array of strings describing the platforms on which the application can run. + Values will be from: WINDOWS | WINDOWS_SERVER_2016 | WINDOWS_SERVER_2019 | WINDOWS_SERVER_2022 | AMAZON_LINUX2 + * `working_directory` - Working directory for the application. +* `appstream_agent_version` - Version of the AppStream 2.0 agent to use for instances that are launched from this image. Has a maximum length of 100 characters. +* `arn` - ARN of the image. +* `base_image_arn` - ARN of the image from which the image was created. +* `created_time` - Time at which this image was created. +* `description` - Description of image. +* `display_name` - Image name to display. +* `image_builder_name` - The name of the image builder that was used to created the private image. If the image is sharedthen the value is null. +* `image_builder_supported` - Boolean to indicate whether an image builder can be launched from this image. +* `image error` - Resource error object that describes the error containing the following: + * `error_code` - Error code of the image. Values will be from: IAM_SERVICE_ROLE_MISSING_ENI_DESCRIBE_ACTION | IAM_SERVICE_ROLE_MISSING_ENI_CREATE_ACTION | IAM_SERVICE_ROLE_MISSING_ENI_DELETE_ACTION | NETWORK_INTERFACE_LIMIT_EXCEEDED | INTERNAL_SERVICE_ERROR | IAM_SERVICE_ROLE_IS_MISSING | MACHINE_ROLE_IS_MISSING | STS_DISABLED_IN_REGION | SUBNET_HAS_INSUFFICIENT_IP_ADDRESSES | IAM_SERVICE_ROLE_MISSING_DESCRIBE_SUBNET_ACTION | SUBNET_NOT_FOUND | IMAGE_NOT_FOUND | INVALID_SUBNET_CONFIGURATION | SECURITY_GROUPS_NOT_FOUND | IGW_NOT_ATTACHED | IAM_SERVICE_ROLE_MISSING_DESCRIBE_SECURITY_GROUPS_ACTION | FLEET_STOPPED | FLEET_INSTANCE_PROVISIONING_FAILURE | DOMAIN_JOIN_ERROR_FILE_NOT_FOUND | DOMAIN_JOIN_ERROR_ACCESS_DENIED | DOMAIN_JOIN_ERROR_LOGON_FAILURE | DOMAIN_JOIN_ERROR_INVALID_PARAMETER | DOMAIN_JOIN_ERROR_MORE_DATA | DOMAIN_JOIN_ERROR_NO_SUCH_DOMAIN | DOMAIN_JOIN_ERROR_NOT_SUPPORTED | DOMAIN_JOIN_NERR_INVALID_WORKGROUP_NAME | DOMAIN_JOIN_NERR_WORKSTATION_NOT_STARTED | DOMAIN_JOIN_ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED | DOMAIN_JOIN_NERR_PASSWORD_EXPIRED | DOMAIN_JOIN_INTERNAL_SERVICE_ERROR as the values. + * `error_message` - Error message of the image. + * `error_timestamp` - Time when the error occurred. +* `image_permissions` - List of strings describing the image permissions containing the following: + * `allow_fleet` - Boolean indicating if the image can be used for a fleet. + * `allow_image_builder` - indicated whether the image can be used for an image builder. +* `platform` - Operating system platform of the image. Values will be from: WINDOWS | WINDOWS_SERVER_2016 | WINDOWS_SERVER_2019 | WINDOWS_SERVER_2022 | AMAZON_LINUX2 +* `public_image_released_date` - Release date of base image if public. For private images, it is the release date of the base image that it was created from. +* `state` - Current state of image. Image starts in PENDING state which changes to AVAILABLE if creation passes and FAILED if it fails. Values will be from: PENDING | AVAILABLE | FAILED | COPYING | DELETING | CREATING | IMPORTING. +* `visibility` - Visibility type enum indicating whether the image is PUBLIC, PRIVATE, or SHARED. Valid values include: PUBLIC | PRIVATE | SHARED. diff --git a/website/docs/d/autoscaling_group.html.markdown b/website/docs/d/autoscaling_group.html.markdown index 0f504cd1cd9..a3eeb424916 100644 --- a/website/docs/d/autoscaling_group.html.markdown +++ b/website/docs/d/autoscaling_group.html.markdown @@ -80,6 +80,7 @@ interpolation. * `instance_generations` - List of instance generation names. * `local_storage` - Indicates whether instance types with instance store volumes are included, excluded, or required. * `local_storage_types` - List of local storage type names. + * `max_spot_price_as_percentage_of_optimal_on_demand_price` - Price protection threshold for Spot Instances. * `memory_gib_per_vcpu` - List of objects describing the minimum and maximum amount of memory (GiB) per vCPU. * `min` - Minimum. * `max` - Maximum. diff --git a/website/docs/d/backup_plan.html.markdown b/website/docs/d/backup_plan.html.markdown index a1b18fce908..511b76d30d7 100644 --- a/website/docs/d/backup_plan.html.markdown +++ b/website/docs/d/backup_plan.html.markdown @@ -30,5 +30,6 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the backup plan. * `name` - Display name of a backup plan. +* `rule` - Rules of a backup plan. * `tags` - Metadata that you can assign to help organize the plans you create. * `version` - Unique, randomly generated, Unicode, UTF-8 encoded string that serves as the version ID of the backup plan. diff --git a/website/docs/d/bedrock_custom_model.html.markdown b/website/docs/d/bedrock_custom_model.html.markdown index fb90ff3a7dc..6896f912a48 100644 --- a/website/docs/d/bedrock_custom_model.html.markdown +++ b/website/docs/d/bedrock_custom_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_model" description: |- diff --git a/website/docs/d/bedrock_custom_models.html.markdown b/website/docs/d/bedrock_custom_models.html.markdown index eb55165956d..1c3619b7ca6 100644 --- a/website/docs/d/bedrock_custom_models.html.markdown +++ b/website/docs/d/bedrock_custom_models.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_models" description: |- diff --git a/website/docs/d/bedrock_foundation_model.html.markdown b/website/docs/d/bedrock_foundation_model.html.markdown index 9d830af415a..385e389cee3 100644 --- a/website/docs/d/bedrock_foundation_model.html.markdown +++ b/website/docs/d/bedrock_foundation_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_foundation_model" description: |- diff --git a/website/docs/d/bedrock_foundation_models.html.markdown b/website/docs/d/bedrock_foundation_models.html.markdown index 3a7c27daa41..ff436f1b633 100644 --- a/website/docs/d/bedrock_foundation_models.html.markdown +++ b/website/docs/d/bedrock_foundation_models.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_foundation_models" description: |- diff --git a/website/docs/d/cloudfront_origin_access_control.html.markdown b/website/docs/d/cloudfront_origin_access_control.html.markdown new file mode 100644 index 00000000000..c9b58cb8d83 --- /dev/null +++ b/website/docs/d/cloudfront_origin_access_control.html.markdown @@ -0,0 +1,36 @@ +--- +subcategory: "CloudFront" +layout: "aws" +page_title: "AWS: aws_cloudfront_origin_access_control" +description: |- + Use this data source to retrieve information for an Amazon CloudFront origin access control config. +--- + +# Data Source: aws_cloudfront_origin_access_control + +Use this data source to retrieve information for an Amazon CloudFront origin access control config. + +## Example Usage + +The below example retrieves a CloudFront origin access control config. + +```terraform +data "aws_cloudfront_origin_access_identity" "example" { + id = "E2T5VTFBZJ3BJB" +} +``` + +## Argument Reference + +* `id` (Required) - The identifier for the origin access control settings. For example: `E2T5VTFBZJ3BJB`. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `description` - A description of the origin access control. +* `etag` - Current version of the origin access control's information. For example: `E2QWRUHAPOMQZL`. +* `name` - A name to identify the origin access control. +* `origin_access_control_origin_type` - The type of origin that this origin access control is for. +* `signing_behavior` - Specifies which requests CloudFront signs. +* `signing_protocol` - The signing protocol of the origin access control, which determines how CloudFront signs (authenticates) requests. diff --git a/website/docs/d/cognito_user_pool.html.markdown b/website/docs/d/cognito_user_pool.html.markdown new file mode 100644 index 00000000000..6f2c959d0f2 --- /dev/null +++ b/website/docs/d/cognito_user_pool.html.markdown @@ -0,0 +1,118 @@ +--- +subcategory: "Cognito IDP (Identity Provider)" +layout: "aws" +page_title: "AWS: aws_cognito_user_pool" +description: |- + Terraform data source for managing an AWS Cognito User Pool. +--- + +# Data Source: aws_cognito_user_pool + +Terraform data source for managing an AWS Cognito User Pool. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_cognito_user_pool" "example" { + user_pool_id = "us-west-2_aaaaaaaaa" +} +``` + +## Argument Reference + +The following arguments are required: + +* `user_pool_id` - (Required) The cognito pool ID + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the User Pool. +* [account_recovery_setting](#account-recover-setting) - The available verified method a user can use to recover their password when they call ForgotPassword. You can use this setting to define a preferred method when a user has more than one method available. With this setting, SMS doesn't qualify for a valid password recovery mechanism if the user also has SMS multi-factor authentication (MFA) activated. In the absence of this setting, Amazon Cognito uses the legacy behavior to determine the recovery method where SMS is preferred through email. +* [admin_create_user_config](#admin-create-user-config) - The configuration for AdminCreateUser requests. +* `auto_verified_attributes` - The attributes that are auto-verified in a user pool. +* `creation_date` - The date and time, in ISO 8601 format, when the item was created. +* `custom_domain` - A custom domain name that you provide to Amazon Cognito. This parameter applies only if you use a custom domain to host the sign-up and sign-in pages for your application. An example of a custom domain name might be auth.example.com. +* `deletion_protection` - When active, DeletionProtection prevents accidental deletion of your user pool. Before you can delete a user pool that you have protected against deletion, you must deactivate this feature. +* [device_configuration](#device-configuration) - The device-remembering configuration for a user pool. A null value indicates that you have deactivated device remembering in your user pool. +* `domain` - The domain prefix, if the user pool has a domain associated with it. +* [email_configuration](#email-configuration) - The email configuration of your user pool. The email configuration type sets your preferred sending method, AWS Region, and sender for messages from your user pool. +* `estimated_number_of_users` - A number estimating the size of the user pool. +* [lambda_config](#lambda-config) - The AWS Lambda triggers associated with the user pool. +* `last_modified_date` - The date and time, in ISO 8601 format, when the item was modified. +* `mfa_configuration` - Can be one of the following values: `OFF` | `ON` | `OPTIONAL` +* `name` - The name of the user pool. +* [schema_attributes](#schema-attributes) - A list of the user attributes and their properties in your user pool. The attribute schema contains standard attributes, custom attributes with a custom: prefix, and developer attributes with a dev: prefix. For more information, see User pool attributes. +* `sms_authentication_message` - The contents of the SMS authentication message. +* `sms_configuration_failure` - The reason why the SMS configuration can't send the messages to your users. +* `sms_verification_message` - The contents of the SMS authentication message. +* `user_pool_tags` - The tags that are assigned to the user pool. A tag is a label that you can apply to user pools to categorize and manage them in different ways, such as by purpose, owner, environment, or other criteria. +* `username_attributes` - Specifies whether a user can use an email address or phone number as a username when they sign up. + +### account recover setting + +* [recovery_mechanism](#recovery-mechanism) - Details about an individual recovery mechanism. + +### recovery mechanism + +* `name` - Name of the recovery mechanism (e.g., email, phone number). +* `priority` - Priority of this mechanism in the recovery process (lower numbers are higher priority). + +### admin create user config + +* `allow_admin_create_user_only` - Whether only admins can create users. +* `unused_account_validity_days` - Number of days an unconfirmed user account remains valid. +* [invite_message_template](#invite-message-template) - Templates for invitation messages. + +### invite message template + +* `email_message` - Email message content. +* `email_subject` - Email message subject. +* `sms_message` - SMS message content. + +### device configuration + +* `challenge_required_on_new_device` - Whether a challenge is required on new devices. +* `device_only_remembered_on_user_prompt` - Whether devices are only remembered if the user prompts it. + +### email configuration + +* `configuration_set` - Configuration set used for sending emails. +* `email_sending_account` - Email sending account. +* `from` - Email sender address. +* `reply_to_email_address` - Reply-to email address. +* `source_arn` - Source Amazon Resource Name (ARN) for emails. + +### lambda config + +* [custom_email_sender](#lambda-function) - Configuration for a custom email sender Lambda function. +* [custom_sms_sender](#lambda-function) - Configuration for a custom SMS sender Lambda function +* [pre_token_generation_config](#lambda-function) - Configuration for a Lambda function that executes before token generation. + +### lambda function + +* `lambda_arn` - ARN of the Lambda function. +* `lambda_version` - Version of the Lambda function. + +### schema attributes + +* `attribute_data_type` - Data type of the attribute (e.g., string, number). +* `developer_only_attribute` - Whether the attribute is for developer use only. +* `mutable` - Whether the attribute can be changed after user creation. +* `name` - Name of the attribute. +* `required` - Whether the attribute is required during user registration. +* [number_attribute_constraints](#number-attribute-constraints) - Constraints for numeric attributes. +* [string_attribute_constraints](#string-attribute-constraints) - Constraints for string attributes. + +### number attribute constraints + +* `max_value` - Maximum allowed value. +* `min_value` - Minimum allowed value. + +### string attribute constraints + +* `max_length` - Maximum allowed length. +* `min_length` - Minimum allowed length. diff --git a/website/docs/d/connect_contact_flow.html.markdown b/website/docs/d/connect_contact_flow.html.markdown index 699c5780a78..8c5d6f976eb 100644 --- a/website/docs/d/connect_contact_flow.html.markdown +++ b/website/docs/d/connect_contact_flow.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_contact_flow" "test" { ~> **NOTE:** `instance_id` and one of either `name` or `contact_flow_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `contact_flow_id` - (Optional) Returns information on a specific Contact Flow by contact flow id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance diff --git a/website/docs/d/connect_contact_flow_module.html.markdown b/website/docs/d/connect_contact_flow_module.html.markdown index 9b984df18d5..cd6a0bd277c 100644 --- a/website/docs/d/connect_contact_flow_module.html.markdown +++ b/website/docs/d/connect_contact_flow_module.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_contact_flow_module" "example" { ~> **NOTE:** `instance_id` and one of either `name` or `contact_flow_module_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `contact_flow_module_id` - (Optional) Returns information on a specific Contact Flow Module by contact flow module id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance diff --git a/website/docs/d/connect_hours_of_operation.html.markdown b/website/docs/d/connect_hours_of_operation.html.markdown index 0d80251e9b1..cbb0fc50d64 100644 --- a/website/docs/d/connect_hours_of_operation.html.markdown +++ b/website/docs/d/connect_hours_of_operation.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_hours_of_operation" "test" { ~> **NOTE:** `instance_id` and one of either `name` or `hours_of_operation_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `hours_of_operation_id` - (Optional) Returns information on a specific Hours of Operation by hours of operation id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance diff --git a/website/docs/d/connect_instance.html.markdown b/website/docs/d/connect_instance.html.markdown index 56530d9fb29..21de387283f 100644 --- a/website/docs/d/connect_instance.html.markdown +++ b/website/docs/d/connect_instance.html.markdown @@ -32,7 +32,7 @@ data "aws_connect_instance" "foo" { ~> **NOTE:** One of either `instance_id` or `instance_alias` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instance_id` - (Optional) Returns information on a specific connect instance by id diff --git a/website/docs/d/connect_queue.html.markdown b/website/docs/d/connect_queue.html.markdown index 252d869ec7b..f890a6d4ac5 100644 --- a/website/docs/d/connect_queue.html.markdown +++ b/website/docs/d/connect_queue.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_queue" "example" { ~> **NOTE:** `instance_id` and one of either `name` or `queue_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `queue_id` - (Optional) Returns information on a specific Queue by Queue id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance diff --git a/website/docs/d/connect_quick_connect.html.markdown b/website/docs/d/connect_quick_connect.html.markdown index 549b5f45bc9..d8f5a1e5aa9 100644 --- a/website/docs/d/connect_quick_connect.html.markdown +++ b/website/docs/d/connect_quick_connect.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_quick_connect" "example" { ~> **NOTE:** `instance_id` and one of either `name` or `quick_connect_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `quick_connect_id` - (Optional) Returns information on a specific Quick Connect by Quick Connect id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance diff --git a/website/docs/d/connect_routing_profile.html.markdown b/website/docs/d/connect_routing_profile.html.markdown index befe6c3ce31..8b038549c62 100644 --- a/website/docs/d/connect_routing_profile.html.markdown +++ b/website/docs/d/connect_routing_profile.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_routing_profile" "example" { ~> **NOTE:** `instance_id` and one of either `name` or `routing_profile_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instance_id` - Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Routing Profile by name diff --git a/website/docs/d/connect_security_profile.html.markdown b/website/docs/d/connect_security_profile.html.markdown index ace74b991ae..b5f174a2616 100644 --- a/website/docs/d/connect_security_profile.html.markdown +++ b/website/docs/d/connect_security_profile.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_security_profile" "example" { ~> **NOTE:** `instance_id` and one of either `name` or `security_profile_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `security_profile_id` - (Optional) Returns information on a specific Security Profile by Security Profile id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance diff --git a/website/docs/d/connect_user.html.markdown b/website/docs/d/connect_user.html.markdown index 0a786bff723..b07fb0a0f20 100644 --- a/website/docs/d/connect_user.html.markdown +++ b/website/docs/d/connect_user.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_user" "example" { ~> **NOTE:** `instance_id` and one of either `name` or `user_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific User by name diff --git a/website/docs/d/connect_user_hierarchy_group.html.markdown b/website/docs/d/connect_user_hierarchy_group.html.markdown index 2e245151214..c2f126ef5f4 100644 --- a/website/docs/d/connect_user_hierarchy_group.html.markdown +++ b/website/docs/d/connect_user_hierarchy_group.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_user_hierarchy_group" "example" { ~> **NOTE:** `instance_id` and one of either `name` or `hierarchy_group_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `hierarchy_group_id` - (Optional) Returns information on a specific hierarchy group by hierarchy group id * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance diff --git a/website/docs/d/connect_vocabulary.html.markdown b/website/docs/d/connect_vocabulary.html.markdown index a6018d72d7e..3dcb8042592 100644 --- a/website/docs/d/connect_vocabulary.html.markdown +++ b/website/docs/d/connect_vocabulary.html.markdown @@ -34,7 +34,7 @@ data "aws_connect_vocabulary" "example" { ~> **NOTE:** `instance_id` and one of either `name` or `vocabulary_id` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `instance_id` - (Required) Reference to the hosting Amazon Connect Instance * `name` - (Optional) Returns information on a specific Vocabulary by name diff --git a/website/docs/d/db_snapshot.html.markdown b/website/docs/d/db_snapshot.html.markdown index 181cc3d8eb5..c2b0922fdd2 100644 --- a/website/docs/d/db_snapshot.html.markdown +++ b/website/docs/d/db_snapshot.html.markdown @@ -49,7 +49,7 @@ resource "aws_db_instance" "dev" { ~> **NOTE:** One of either `db_instance_identifier` or `db_snapshot_identifier` is required. -This argument supports the following arguments: +This data source supports the following arguments: * `most_recent` - (Optional) If more than one result is returned, use the most recent Snapshot. diff --git a/website/docs/d/ec2_transit_gateway_peering_attachments.html.markdown b/website/docs/d/ec2_transit_gateway_peering_attachments.html.markdown new file mode 100644 index 00000000000..65a32adeb34 --- /dev/null +++ b/website/docs/d/ec2_transit_gateway_peering_attachments.html.markdown @@ -0,0 +1,61 @@ +--- +subcategory: "Transit Gateway" +layout: "aws" +page_title: "AWS: aws_ec2_transit_gateway_peering_attachments" +description: |- + Get information on EC2 Transit Gateway Peering Attachments +--- + +# Data Source: aws_ec2_transit_gateway_peering_attachments + +Get information on EC2 Transit Gateway Peering Attachments. + +## Example Usage + +### All Resources + +```hcl +data "aws_ec2_transit_gateway_peering_attachments" "test" {} +``` + +### By Filter + +```hcl +data "aws_ec2_transit_gateway_peering_attachments" "filtered" { + filter { + name = "state" + values = ["pendingAcceptance"] + } +} + +data "aws_ec2_transit_gateway_peering_attachment" "unit" { + count = length(data.aws_ec2_transit_gateway_peering_attachments.filtered.ids) + id = data.aws_ec2_transit_gateway_peering_attachments.filtered.ids[count.index] +} +``` + +## Argument Reference + +This data source supports the following arguments: + +* `filter` - (Optional) One or more configuration blocks containing name-values filters. Detailed below. + +### filter Argument Reference + +* `name` - (Required) Name of the field to filter by, as defined by [the underlying AWS API][1] +* `values` - (Required) List of one or more values for the filter. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `ids` A list of all attachments ids matching the filter. You can retrieve more information about the attachment using the [aws_ec2_transit_gateway_peering_attachment][2] data source, searching by identifier. + +[1]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeTransitGatewayPeeringAttachments.html +[2]: https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ec2_transit_gateway_peering_attachment + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `read` - (Default `20m`) diff --git a/website/docs/d/ecr_lifecycle_policy_document.html.markdown b/website/docs/d/ecr_lifecycle_policy_document.html.markdown index 398d9001e97..8e1a57df3c0 100644 --- a/website/docs/d/ecr_lifecycle_policy_document.html.markdown +++ b/website/docs/d/ecr_lifecycle_policy_document.html.markdown @@ -45,14 +45,14 @@ Each document configuration may have one or more `rule` blocks, which each accep * `action` (Optional) - Specifies the action type. * `type` (Required) - The supported value is `expire`. * `description` (Optional) - Describes the purpose of a rule within a lifecycle policy. -* `priority` (Required) - Sets the order in which rules are evaluated, lowest to highest. When you add rules to a lifecycle policy, you must give them each a unique value for `priority`. Values do not need to be sequential across rules in a policy. A rule with a `tag_status` value of any must have the highest value for `priority` and be evaluated last. +* `priority` (Required) - Sets the order in which rules are evaluated, lowest to highest. When you add rules to a lifecycle policy, you must give them each a unique value for `priority`. Values do not need to be sequential across rules in a policy. A rule with a `tag_status` value of "any" must have the highest value for `priority` and be evaluated last. * `selection` (Required) - Collects parameters describing the selection criteria for the ECR lifecycle policy: - * `tag_status` (Required) - Determines whether the lifecycle policy rule that you are adding specifies a tag for an image. Acceptable options are tagged, untagged, or any. If you specify any, then all images have the rule applied to them. If you specify tagged, then you must also specify a `tag_prefix_list` value. If you specify untagged, then you must omit `tag_prefix_list`. - * `tag_pattern_list` (Required if `tag_status` is set to tagged and `tag_prefix_list` isn't specified) - You must specify a comma-separated list of image tag patterns that may contain wildcards (*) on which to take action with your lifecycle policy. For example, if your images are tagged as prod, prod1, prod2, and so on, you would use the tag pattern list prod* to specify all of them. If you specify multiple tags, only the images with all specified tags are selected. There is a maximum limit of four wildcards (*) per string. For example, ["*test*1*2*3", "test*1*2*3*"] is valid but ["test*1*2*3*4*5*6"] is invalid. - * `tag_prefix_list` (Required if `tag_status` is set to tagged and `tag_pattern_list` isn't specified) - You must specify a comma-separated list of image tag prefixes on which to take action with your lifecycle policy. For example, if your images are tagged as prod, prod1, prod2, and so on, you would use the tag prefix prod to specify all of them. If you specify multiple tags, only images with all specified tags are selected. - * `count_type` (Required) - Specify a count type to apply to the images. If `count_type` is set to imageCountMoreThan, you also specify `count_number` to create a rule that sets a limit on the number of images that exist in your repository. If `count_type` is set to sinceImagePushed, you also specify `count_unit` and `count_number` to specify a time limit on the images that exist in your repository. - * `count_unit` (Required if `count_type` is set to sinceImagePushed) - Specify a count unit of days to indicate that as the unit of time, in addition to `count_number`, which is the number of days. - * `count_number` (Required) - Specify a count number. If the `count_type` used is imageCountMoreThan, then the value is the maximum number of images that you want to retain in your repository. If the `count_type` used is sinceImagePushed, then the value is the maximum age limit for your images. + * `tag_status` (Required) - Determines whether the lifecycle policy rule that you are adding specifies a tag for an image. Acceptable options are "tagged", "untagged", or "any". If you specify "any", then all images have the rule applied to them. If you specify "tagged", then you must also specify a `tag_prefix_list` value. If you specify "untagged", then you must omit `tag_prefix_list`. + * `tag_pattern_list` (Required if `tag_status` is set to "tagged" and `tag_prefix_list` isn't specified) - You must specify a comma-separated list of image tag patterns that may contain wildcards (\*) on which to take action with your lifecycle policy. For example, if your images are tagged as `prod`, `prod1`, `prod2`, and so on, you would use the tag pattern list `["prod\*"]` to specify all of them. If you specify multiple tags, only the images with all specified tags are selected. There is a maximum limit of four wildcards (\*) per string. For example, `["*test*1*2*3", "test*1*2*3*"]` is valid but `["test*1*2*3*4*5*6"]` is invalid. + * `tag_prefix_list` (Required if `tag_status` is set to "tagged" and `tag_pattern_list` isn't specified) - You must specify a comma-separated list of image tag prefixes on which to take action with your lifecycle policy. For example, if your images are tagged as `prod`, `prod1`, `prod2`, and so on, you would use the tag prefix "prod" to specify all of them. If you specify multiple tags, only images with all specified tags are selected. + * `count_type` (Required) - Specify a count type to apply to the images. If `count_type` is set to "imageCountMoreThan", you also specify `count_number` to create a rule that sets a limit on the number of images that exist in your repository. If `count_type` is set to "sinceImagePushed", you also specify `count_unit` and `count_number` to specify a time limit on the images that exist in your repository. + * `count_unit` (Required if `count_type` is set to "sinceImagePushed") - Specify a count unit of days to indicate that as the unit of time, in addition to `count_number`, which is the number of days. + * `count_number` (Required) - Specify a count number. If the `count_type` used is "imageCountMoreThan", then the value is the maximum number of images that you want to retain in your repository. If the `count_type` used is "sinceImagePushed", then the value is the maximum age limit for your images. ## Attribute Reference diff --git a/website/docs/d/fsx_ontap_file_system.html.markdown b/website/docs/d/fsx_ontap_file_system.html.markdown index e75fbd3ef00..a36ddb08ab8 100644 --- a/website/docs/d/fsx_ontap_file_system.html.markdown +++ b/website/docs/d/fsx_ontap_file_system.html.markdown @@ -35,7 +35,9 @@ In addition to all arguments above, the following attributes are exported: * `daily_automatic_backup_start_time` - The preferred time (in `HH:MM` format) to take daily automatic backups, in the UTC time zone. * `deployment_type` - The file system deployment type. * `disk_iops_configuration` - The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system, specifying the number of provisioned IOPS and the provision mode. See [Disk IOPS](#disk-iops) Below. -* `dns_name` - DNS name for the file system (e.g. `fs-12345678.corp.example.com`). +* `dns_name` - DNS name for the file system. + + **Note:** This attribute does not apply to FSx for ONTAP file systems and is consequently not set. You can access your FSx for ONTAP file system and volumes via a [Storage Virtual Machine (SVM)](fsx_ontap_storage_virtual_machine.html) using its DNS name or IP address. * `endpoint_ip_address_range` - (Multi-AZ only) Specifies the IP address range in which the endpoints to access your file system exist. * `endpoints` - The Management and Intercluster FileSystemEndpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. See [FileSystemEndpoints](#file-system-endpoints) below. * `ha_pairs` - The number of HA pairs for the file system. diff --git a/website/docs/d/lakeformation_data_lake_settings.html.markdown b/website/docs/d/lakeformation_data_lake_settings.html.markdown index 1a1cdaef72a..9a90d7e4938 100644 --- a/website/docs/d/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/d/lakeformation_data_lake_settings.html.markdown @@ -36,6 +36,7 @@ This data source exports the following attributes in addition to the arguments a * `allow_external_data_filtering` - Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `external_data_filtering_allow_list` - A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `authorized_session_tag_value_list` - Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. +* `allow_full_table_external_data_access` - Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. ### create_database_default_permissions diff --git a/website/docs/d/launch_configuration.html.markdown b/website/docs/d/launch_configuration.html.markdown index b88554f70cc..bedeea69b00 100644 --- a/website/docs/d/launch_configuration.html.markdown +++ b/website/docs/d/launch_configuration.html.markdown @@ -41,6 +41,7 @@ This data source exports the following attributes in addition to the arguments a * `http_put_response_hop_limit` - The desired HTTP PUT response hop limit for instance metadata requests. * `security_groups` - List of associated Security Group IDS. * `associate_public_ip_address` - Whether a Public IP address is associated with the instance. +* `primary_ipv6` - Whether the first IPv6 GUA will be made the primary IPv6 address. * `user_data` - User Data of the instance. * `enable_monitoring` - Whether Detailed Monitoring is Enabled. * `ebs_optimized` - Whether the launched EC2 instance will be EBS-optimized. diff --git a/website/docs/d/msk_cluster.html.markdown b/website/docs/d/msk_cluster.html.markdown index 42e33e43690..8eddada87fc 100644 --- a/website/docs/d/msk_cluster.html.markdown +++ b/website/docs/d/msk_cluster.html.markdown @@ -38,6 +38,7 @@ This data source exports the following attributes in addition to the arguments a * `bootstrap_brokers_sasl_iam` - One or more DNS names (or IP addresses) and SASL IAM port pairs. For example, `b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9098`. This attribute will have a value if `encryption_info.0.encryption_in_transit.0.client_broker` is set to `TLS_PLAINTEXT` or `TLS` and `client_authentication.0.sasl.0.iam` is set to `true`. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. * `bootstrap_brokers_sasl_scram` - One or more DNS names (or IP addresses) and SASL SCRAM port pairs. For example, `b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9096`. This attribute will have a value if `encryption_info.0.encryption_in_transit.0.client_broker` is set to `TLS_PLAINTEXT` or `TLS` and `client_authentication.0.sasl.0.scram` is set to `true`. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. * `bootstrap_brokers_tls` - One or more DNS names (or IP addresses) and TLS port pairs. For example, `b-1.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-2.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094,b-3.exampleClusterName.abcde.c2.kafka.us-east-1.amazonaws.com:9094`. This attribute will have a value if `encryption_info.0.encryption_in_transit.0.client_broker` is set to `TLS_PLAINTEXT` or `TLS`. The resource sorts the list alphabetically. AWS may not always return all endpoints so the values may not be stable across applies. +* `broker_node_group_info` - Configuration block for the broker nodes of the Kafka cluster. * `cluster_uuid` - UUID of the MSK cluster, for use in IAM policies. * `kafka_version` - Apache Kafka version. * `number_of_broker_nodes` - Number of broker nodes in the cluster. diff --git a/website/docs/d/mskconnect_connector.html.markdown b/website/docs/d/mskconnect_connector.html.markdown index 4504eba101b..3b2ac700e8b 100644 --- a/website/docs/d/mskconnect_connector.html.markdown +++ b/website/docs/d/mskconnect_connector.html.markdown @@ -30,4 +30,5 @@ This data source exports the following attributes in addition to the arguments a * `arn` - ARN of the connector. * `description` - Summary description of the connector. +* `tags` - A map of tags assigned to the resource. * `version` - Current version of the connector. diff --git a/website/docs/d/mskconnect_custom_plugin.html.markdown b/website/docs/d/mskconnect_custom_plugin.html.markdown index ec2f1f1ee41..862696f93ff 100644 --- a/website/docs/d/mskconnect_custom_plugin.html.markdown +++ b/website/docs/d/mskconnect_custom_plugin.html.markdown @@ -32,3 +32,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - a summary description of the custom plugin. * `latest_revision` - an ID of the latest successfully created revision of the custom plugin. * `state` - the state of the custom plugin. +* `tags` - A map of tags assigned to the resource. diff --git a/website/docs/d/mskconnect_worker_configuration.html.markdown b/website/docs/d/mskconnect_worker_configuration.html.markdown index ebca2b24581..8fa497cf586 100644 --- a/website/docs/d/mskconnect_worker_configuration.html.markdown +++ b/website/docs/d/mskconnect_worker_configuration.html.markdown @@ -32,3 +32,4 @@ This data source exports the following attributes in addition to the arguments a * `description` - a summary description of the worker configuration. * `latest_revision` - an ID of the latest successfully created revision of the worker configuration. * `properties_file_content` - contents of connect-distributed.properties file. +* `tags` - A map of tags assigned to the resource. diff --git a/website/docs/d/networkmanager_core_network_policy_document.html.markdown b/website/docs/d/networkmanager_core_network_policy_document.html.markdown index ebab0508e4f..1d5ea8919bd 100644 --- a/website/docs/d/networkmanager_core_network_policy_document.html.markdown +++ b/website/docs/d/networkmanager_core_network_policy_document.html.markdown @@ -170,6 +170,7 @@ The following arguments are available: * `core_network_configuration` (Required) - The core network configuration section defines the Regions where a core network should operate. For AWS Regions that are defined in the policy, the core network creates a Core Network Edge where you can connect attachments. After it's created, each Core Network Edge is peered with every other defined Region and is configured with consistent segment and routing across all Regions. Regions cannot be removed until the associated attachments are deleted. Detailed below. * `segments` (Required) - Block argument that defines the different segments in the network. Here you can provide descriptions, change defaults, and provide explicit Regional operational and route filters. The names defined for each segment are used in the `segment_actions` and `attachment_policies` section. Each segment is created, and operates, as a completely separated routing domain. By default, attachments can only communicate with other attachments in the same segment. Detailed below. * `segment_actions` (Optional) - A block argument, `segment_actions` define how routing works between segments. By default, attachments can only communicate with other attachments in the same segment. Detailed below. +* `network_function_groups` (Optional) - Block argument that defines the service insertion actions you want to include. Detailed below. ### `attachment_policies` @@ -180,15 +181,17 @@ The following arguments are available: * `conditions` (Required) - A block argument. Detailed Below. * `description` (Optional) - A user-defined description that further helps identify the rule. * `rule_number` (Required) - An integer from `1` to `65535` indicating the rule's order number. Rules are processed in order from the lowest numbered rule to the highest. Rules stop processing when a rule is matched. It's important to make sure that you number your rules in the exact order that you want them processed. +* `add_to_network_function_group` (Optional) - The name of the network function group to attach to the attachment policy. ### `action` The following arguments are available: -* `association_method` (Required) - Defines how a segment is mapped. Values can be `constant` or `tag`. `constant` statically defines the segment to associate the attachment to. `tag` uses the value of a tag to dynamically try to map to a segment.reference_policies_elements_condition_operators.html) to evaluate. +* `association_method` (Optional) - Defines how a segment is mapped. Values can be `constant` or `tag`. `constant` statically defines the segment to associate the attachment to. `tag` uses the value of a tag to dynamically try to map to a segment.reference_policies_elements_condition_operators.html) to evaluate. * `segment` (Optional) - Name of the `segment` to share as defined in the `segments` section. This is used only when the `association_method` is `constant`. * `tag_value_of_key` (Optional) - Maps the attachment to the value of a known key. This is used with the `association_method` is `tag`. For example a `tag` of `stage = “test”`, will map to a segment named `test`. The value must exactly match the name of a segment. This allows you to have many segments, but use only a single rule without having to define multiple nearly identical conditions. This prevents creating many similar conditions that all use the same keys to map to segments. * `require_acceptance` (Optional) - Determines if this mapping should override the segment value for `require_attachment_acceptance`. You can only set this to `true`, indicating that this setting applies only to segments that have `require_attachment_acceptance` set to `false`. If the segment already has the default `require_attachment_acceptance`, you can set this to inherit segment’s acceptance value. +* `add_to_network_function_group` (Optional) - The name of the network function group to attach to the attachment policy. ### `conditions` @@ -232,20 +235,33 @@ The following arguments are available: ### `segment_actions` -`segment_actions` have differnet outcomes based on their `action` argument value. There are 2 valid values for `action`: `create-route` & `share`. Behaviors of the below arguments changed depending on the `action` you specify. For more details on their use see the [AWS documentation](https://docs.aws.amazon.com/vpc/latest/cloudwan/cloudwan-policies-json.html#cloudwan-segment-actions-json). +`segment_actions` have different outcomes based on their `action` argument value. Behaviors of the below arguments changed depending on the `action` you specify. For more details on their use see the [AWS documentation](https://docs.aws.amazon.com/vpc/latest/cloudwan/cloudwan-policies-json.html#cloudwan-segment-actions-json). ~> **NOTE:** `share_with` and `share_with_except` break from the AWS API specification. The API has 1 argument `share-with` and it can accept 3 input types as valid (`"*"`, `[""]`, or `{ except: [""]}`). To emulate this behavior, `share_with` is always a list that can accept the argument `["*"]` as valid for `"*"` and `share_with_except` is a that can accept `[""]` as valid for `{ except: [""]}`. You may only specify one of: `share_with` or `share_with_except`. The following arguments are available: -* `action` (Required) - Action to take for the chosen segment. Valid values `create-route` or `share`. +* `action` (Required) - Action to take for the chosen segment. Valid values: `create-route`, `share`, `send-via` and `send-to`. * `description` (Optional) - A user-defined string describing the segment action. * `destination_cidr_blocks` (Optional) - List of strings containing CIDRs. You can define the IPv4 and IPv6 CIDR notation for each AWS Region. For example, `10.1.0.0/16` or `2001:db8::/56`. This is an array of CIDR notation strings. * `destinations` (Optional) - A list of strings. Valid values include `["blackhole"]` or a list of attachment ids. -* `mode` (Optional) - String. This mode places the attachment and return routes in each of the `share_with` segments. Valid values include: `attachment-route`. +* `mode` (Optional) - String. When `action` is `share`, a `mode` value of `attachment-route` places the attachment and return routes in each of the `share_with` segments. When `action` is `send-via`, indicates the mode used for packets. Valid values: `attachment-route`, `single-hop`, `dual-hop`. * `segment` (Optional) - Name of the segment. * `share_with` (Optional) - A list of strings to share with. Must be a substring is all segments. Valid values include: `["*"]` or `[""]`. * `share_with_except` (Optional) - A set subtraction of segments to not share with. +* `when_sent_to` (Optional) - The destination segments for the `send-via` or `send-to` `action`. + * `segments` (Optional) - A list of strings. The list of segments that the `send-via` `action` uses. +* `via` (Optional) - The network function groups and any edge overrides associated with the action. + * `network_function_groups` (Optional) - A list of strings. The network function group to use for the service insertion action. + * `with_edge_override` (Optional) - Any edge overrides and the preferred edge to use. + * `edge_sets` (Optional) - A list of strings. The list of edges associated with the network function group. + * `use_edge` (Optional) - The preferred edge to use. + +### `network_function_groups` + +* `name` (Required) - This identifies the network function group container. +* `description` (Optional) - Optional description of the network function group. +* `require_attachment_acceptance` (Required) - This will be either `true`, that attachment acceptance is required, or `false`, that it is not required. ## Attribute Reference diff --git a/website/docs/d/oam_link.html.markdown b/website/docs/d/oam_link.html.markdown index fe7024000d9..2c197ba6162 100644 --- a/website/docs/d/oam_link.html.markdown +++ b/website/docs/d/oam_link.html.markdown @@ -31,8 +31,29 @@ The following arguments are required: This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the link. +* `id` - ARN of the link. * `label` - Label that is assigned to this link. * `label_template` - Human-readable name used to identify this source account when you are viewing data from it in the monitoring account. +* `link_configuration` - Configuration for creating filters that specify that only some metric namespaces or log groups are to be shared from the source account to the monitoring account. See [`link_configuration` Block](#link_configuration-block) for details. * `link_id` - ID string that AWS generated as part of the link ARN. * `resource_types` - Types of data that the source account shares with the monitoring account. * `sink_arn` - ARN of the sink that is used for this link. + +### `link_configuration` Block + +The `link_configuration` configuration block supports the following arguments: + +* `log_group_configuration` - Configuration for filtering which log groups are to send log events from the source account to the monitoring account. See [`log_group_configuration` Block](#log_group_configuration-block) for details. +* `metric_configuration` - Configuration for filtering which metric namespaces are to be shared from the source account to the monitoring account. See [`metric_configuration` Block](#metric_configuration-block) for details. + +### `log_group_configuration` Block + +The `log_group_configuration` configuration block supports the following arguments: + +* `filter` - Filter string that specifies which log groups are to share their log events with the monitoring account. See [LogGroupConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_LogGroupConfiguration.html) for details. + +### `metric_configuration` Block + +The `metric_configuration` configuration block supports the following arguments: + +* `filter` - Filter string that specifies which metrics are to be shared with the monitoring account. See [MetricConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_MetricConfiguration.html) for details. diff --git a/website/docs/d/oam_sink.html.markdown b/website/docs/d/oam_sink.html.markdown index f251532eac8..c42cdc5c276 100644 --- a/website/docs/d/oam_sink.html.markdown +++ b/website/docs/d/oam_sink.html.markdown @@ -31,6 +31,7 @@ The following arguments are required: This data source exports the following attributes in addition to the arguments above: * `arn` - ARN of the sink. +* `id` - ARN of the sink. * `name` - Name of the sink. * `sink_id` - Random ID string that AWS generated as part of the sink ARN. * `tags` - Tags assigned to the sink. diff --git a/website/docs/d/organizations_policy.html.markdown b/website/docs/d/organizations_policy.html.markdown index cc7deeb961b..5cd2cd8e4db 100644 --- a/website/docs/d/organizations_policy.html.markdown +++ b/website/docs/d/organizations_policy.html.markdown @@ -21,8 +21,8 @@ data "aws_organizations_policies_for_target" "current" { target_id = data.aws_organizations_organization.current.roots[0].id filter = "SERVICE_CONTROL_POLICY" } -data "aws_organizational_policies" "test" { - policy_id = data.aws_organizations_organizational_policies.current.policies[0].id +data "aws_organizations_policy" "test" { + policy_id = data.aws_organizations_policies_for_target.current.policies[0].id } ``` diff --git a/website/docs/d/route53_zone.html.markdown b/website/docs/d/route53_zone.html.markdown index 6993f7ee7f7..8ab4fac1b1c 100644 --- a/website/docs/d/route53_zone.html.markdown +++ b/website/docs/d/route53_zone.html.markdown @@ -35,10 +35,9 @@ resource "aws_route53_record" "www" { The arguments of this data source act as filters for querying the available Hosted Zone. You have to use `zone_id` or `name`, not both of them. The given filter must match exactly one -Hosted Zone. If you use `name` field for private Hosted Zone, you need to add `private_zone` field to `true` +Hosted Zone. If you use `name` field for private Hosted Zone, you need to add `private_zone` field to `true`. * `zone_id` - (Optional) Hosted Zone id of the desired Hosted Zone. - * `name` - (Optional) Hosted Zone name of the desired Hosted Zone. * `private_zone` - (Optional) Used with `name` field to get a private Hosted Zone. * `vpc_id` - (Optional) Used with `name` field to get a private Hosted Zone associated with the vpc_id (in this case, private_zone is not mandatory). @@ -56,8 +55,12 @@ The following attribute is additionally exported: * `arn` - ARN of the Hosted Zone. * `caller_reference` - Caller Reference of the Hosted Zone. * `comment` - Comment field of the Hosted Zone. +* `linked_service_principal` - The service that created the Hosted Zone (e.g., `servicediscovery.amazonaws.com`). +* `linked_service_description` - The description provided by the service that created the Hosted Zone (e.g., `arn:aws:servicediscovery:us-east-1:1234567890:namespace/ns-xxxxxxxxxxxxxxxx`). +* `name` - The Hosted Zone name. * `name_servers` - List of DNS name servers for the Hosted Zone. * `primary_name_server` - The Route 53 name server that created the SOA record. +* `private_zone` - Indicates whether this is a private hosted zone. * `resource_record_set_count` - The number of Record Set in the Hosted Zone. -* `linked_service_principal` - The service that created the Hosted Zone (e.g., `servicediscovery.amazonaws.com`). -* `linked_service_description` - The description provided by the service that created the Hosted Zone (e.g., `arn:aws:servicediscovery:us-east-1:1234567890:namespace/ns-xxxxxxxxxxxxxxxx`). +* `tags` - A map of tags assigned to the Hosted Zone. +* `zone_id` - The Hosted Zone identifier. diff --git a/website/docs/d/service_discovery_service.html.markdown b/website/docs/d/service_discovery_service.html.markdown index 22907175db7..de8d3a8c996 100644 --- a/website/docs/d/service_discovery_service.html.markdown +++ b/website/docs/d/service_discovery_service.html.markdown @@ -33,37 +33,37 @@ This data source exports the following attributes in addition to the arguments a * `id` - ID of the service. * `arn` - ARN of the service. * `description` - Description of the service. -* `dns_config` - Complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. -* `health_check_config` - Complex type that contains settings for an optional health check. Only for Public DNS namespaces. -* `health_check_custom_config` - A complex type that contains settings for ECS managed health checks. +* `dns_config` - Complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dns_config` Block](#dns_config-block) for details. +* `health_check_config` - Complex type that contains settings for an optional health check. Only for Public DNS namespaces. See [`health_check_config` Block](#health_check_config-block) for details. +* `health_check_custom_config` - A complex type that contains settings for ECS managed health checks. See [`health_check_custom_config` Block](#health_check_custom_config-block) for details. * `tags` - Map of tags to assign to the service. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `tags_all` - (**Deprecated**) Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). -### dns_config +### `dns_config` Block -This argument supports the following arguments: +The `dns_config` configuration block supports the following arguments: * `namespace_id` - ID of the namespace to use for DNS configuration. -* `dns_records` - An array that contains one DnsRecord object for each resource record set. +* `dns_records` - An array that contains one DnsRecord object for each resource record set. See [`dns_records` Block](#dns_records-block) for details. * `routing_policy` - Routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED -#### dns_records +#### `dns_records` Block -This argument supports the following arguments: +The `dns_records` configuration block supports the following arguments: * `ttl` - Amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. * `type` - Type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME -### health_check_config +### `health_check_config` Block -This argument supports the following arguments: +The `health_check_config` configuration block supports the following arguments: * `failure_threshold` - Number of consecutive health checks. Maximum value of 10. * `resource_path` - Path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. * `type` - The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP -### health_check_custom_config +### `health_check_custom_config` Block -This argument supports the following arguments: +The `health_check_custom_config` configuration block supports the following arguments: * `failure_threshold` - The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. diff --git a/website/docs/d/timestreamwrite_database.html.markdown b/website/docs/d/timestreamwrite_database.html.markdown new file mode 100644 index 00000000000..5acacf0bbe9 --- /dev/null +++ b/website/docs/d/timestreamwrite_database.html.markdown @@ -0,0 +1,38 @@ +--- +subcategory: "Timestream Write" +layout: "aws" +page_title: "AWS: aws_timestreamwrite_database" +description: |- + Terraform data source for managing an AWS Timestream Write Database. +--- + +# Data Source: aws_timestreamwrite_database + +Terraform data source for managing an AWS Timestream Write Database. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_timestreamwrite_database" "test" { + name = "database-example" +} +``` + +## Argument Reference + +The following arguments are required: + +* `database_name` – (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - The ARN that uniquely identifies this database. +* `created_time` - Creation time of database. +* `database_name` – (Required) The name of the Timestream database. Minimum length of 3. Maximum length of 256. +* `kms_key_id` - The ARN of the KMS key used to encrypt the data stored in the database. +* `last_updated_time` - Last time database was updated. +* `table_count` - Total number of tables in the Timestream database. diff --git a/website/docs/d/timestreamwrite_table.html.markdown b/website/docs/d/timestreamwrite_table.html.markdown new file mode 100644 index 00000000000..05e3449ae1f --- /dev/null +++ b/website/docs/d/timestreamwrite_table.html.markdown @@ -0,0 +1,55 @@ +--- +subcategory: "Timestream Write" +layout: "aws" +page_title: "AWS: aws_timestreamwrite_table" +description: |- + Terraform data source for managing an AWS Timestream Write Table. +--- + +# Data Source: aws_timestreamwrite_table + +Terraform data source for managing an AWS Timestream Write Table. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_timestreamwrite_table" "test" { + database_name = aws_timestreamwrite_database.test.database_name + name = aws_timestreamwrite_table.test.table_name +} +``` + +## Argument Reference + +The following arguments are required: + +* `database_name` - (Required) Name of the Timestream database. +* `name` - (Required) Name of the Timestream table. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `arn` - ARN that uniquely identifies the table. +* `creation_time` - Time that table was created. +* `database_name` - Name of database. +* `last_updated_time` - Last time table was updated. +* `magnetic_store_write_properties` - Object containing the following attributes to desribe magnetic store writes. + * `enable_magnetic_store_writes` - Flag that is set based on if magnetic store writes are enabled. + * `magnetic_store_rejected_data_location` - Object containing the following attributes to describe error reports for records rejected during magnetic store writes. + * `s3_configuration` - Object containing the following attributes to describe the configuration of an s3 location to write error reports for records rejected. + * `bucket_name` - Name of S3 bucket. + * `encryption_object` - Encryption option for S3 location. + * `kms_key_id` - AWS KMS key ID for S3 location with AWS maanged key. + * `object_key_prefix` - Object key preview for S3 location. +* `retention_properties` - Object containing the following attributes to describe the retention duration for the memory and magnetic stores. + * `magnetic_store_retention_period_in_days` - Duration in days in which the data must be stored in magnetic store. + * `memory_store_retention_period_in_hours` - Duration in hours in which the data must be stored in memory store. +* `schema` - Object containing the following attributes to describe the schema of the table. + * `type` - Type of partition key. + * `partition_key` - Level of enforcement for the specification of a dimension key in ingested records. + * `name` - Name of the timestream attribute used for a dimension key. +* `name` - Name of the table. +* `table_status` - Current state of table. diff --git a/website/docs/d/transfer_connector.html.markdown b/website/docs/d/transfer_connector.html.markdown new file mode 100644 index 00000000000..bc8ca2c9873 --- /dev/null +++ b/website/docs/d/transfer_connector.html.markdown @@ -0,0 +1,52 @@ +--- +subcategory: "Transfer Family" +layout: "aws" +page_title: "AWS: aws_transfer_connector" +description: |- + Terraform data source for managing an AWS Transfer Family Connector. +--- + +# Data Source: aws_transfer_connector + +Terraform data source for managing an AWS Transfer Family Connector. + +### Basic Usage + +```terraform +data "aws_transfer_connector" "test" { + id = "c-xxxxxxxxxxxxxx" +} +``` + +## Argument Reference + +The following arguments are required: + +* `id` - (Required) Unique identifier for connector + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `access_role` - ARN of the AWS Identity and Access Management role. +* `arn` - ARN of the Connector. +* `as2_config` - Structure containing the parameters for an AS2 connector object. Contains the following attributes: + * `basic_auth_secret_id` - Basic authentication for AS2 connector API. Returns a null value if not set. + * `compression` - Specifies whether AS2 file is compressed. Will be ZLIB or DISABLED + * `encryption_algorithm` - Algorithm used to encrypt file. Will be AES128_CBC or AES192_CBC or AES256_CBC or DES_EDE3_CBC or NONE. + * `local_profile_id` - Unique identifier for AS2 local profile. + * `mdn_response` - Used for outbound requests to tell if response is asynchronous or not. Will be either SYNC or NONE. + * `mdn_signing_algorithm` - Signing algorithm for MDN response. Will be SHA256 or SHA384 or SHA512 or SHA1 or NONE or DEFAULT. + * `message_subject` - Subject HTTP header attribute in outbound AS2 messages to the connector. + * `partner_profile_id` - Unique identifier used by connector for partner profile. + * `signing_algorithm` - Algorithm used for signing AS2 messages sent with the connector. +* `logging_role` - ARN of the IAM role that allows a connector to turn on CLoudwatch logging for Amazon S3 events. +* `security_policy_name` - Name of security policy. +* `service_managed_egress_ip_addresses` - List of egress Ip addresses. +* `sftp_config` - Object containing the following attributes: + * `trusted_host_keys` - List of the public portions of the host keys that are used to identify the servers the connector is connected to. + * `user_secret_id` - Identifer for the secret in AWS Secrets Manager that contains the SFTP user's private key, and/or password. +* `tags` - Object containing the following attributes: + * `key` - Name of the tag. + * `value` - Values associated with the tags key. +* `url` - URL of the partner's AS2 or SFTP endpoint. diff --git a/website/docs/guides/custom-service-endpoints.html.markdown b/website/docs/guides/custom-service-endpoints.html.markdown index cd8e3f0a645..72e17ce56ae 100644 --- a/website/docs/guides/custom-service-endpoints.html.markdown +++ b/website/docs/guides/custom-service-endpoints.html.markdown @@ -84,6 +84,7 @@ provider "aws" {
  • appflow
  • appintegrations (or appintegrationsservice)
  • applicationinsights
  • +
  • applicationsignals
  • appmesh
  • apprunner
  • appstream
  • @@ -133,6 +134,7 @@ provider "aws" {
  • costoptimizationhub
  • cur (or costandusagereportservice)
  • customerprofiles
  • +
  • databrew (or gluedatabrew)
  • dataexchange
  • datapipeline
  • datasync
  • @@ -226,6 +228,7 @@ provider "aws" {
  • neptunegraph
  • networkfirewall
  • networkmanager
  • +
  • networkmonitor
  • oam (or cloudwatchobservabilityaccessmanager)
  • opensearch (or opensearchservice)
  • opensearchserverless
  • diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 68b1502c0eb..9b97bf1fa30 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -11,7 +11,7 @@ Use the Amazon Web Services (AWS) provider to interact with the many resources supported by AWS. You must configure the provider with the proper credentials before you can use it. -Use the navigation to the left to read about the available resources. There are currently 1375 resources and 559 data sources available in the provider. +Use the navigation to the left to read about the available resources. There are currently 1389 resources and 564 data sources available in the provider. To learn the basics of Terraform using this provider, follow the hands-on [get started tutorials](https://learn.hashicorp.com/tutorials/terraform/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). Interact with AWS services, diff --git a/website/docs/r/amplify_domain_association.html.markdown b/website/docs/r/amplify_domain_association.html.markdown index c8115f82982..912ebaf3a39 100644 --- a/website/docs/r/amplify_domain_association.html.markdown +++ b/website/docs/r/amplify_domain_association.html.markdown @@ -52,11 +52,17 @@ resource "aws_amplify_domain_association" "example" { This resource supports the following arguments: * `app_id` - (Required) Unique ID for an Amplify app. +* `certificate_settings` - (Optional) The type of SSL/TLS certificate to use for your custom domain. If you don't specify a certificate type, Amplify uses the default certificate that it provisions and manages for you. * `domain_name` - (Required) Domain name for the domain association. * `enable_auto_sub_domain` - (Optional) Enables the automated creation of subdomains for branches. * `sub_domain` - (Required) Setting for the subdomain. Documented below. * `wait_for_verification` - (Optional) If enabled, the resource will wait for the domain association status to change to `PENDING_DEPLOYMENT` or `AVAILABLE`. Setting this to `false` will skip the process. Default: `true`. +The `certificate_settings` configuration block supports the following arguments: + +* `type` - (Required) The certificate type. Valid values are `AMPLIFY_MANAGED` and `CUSTOM`. +* `custom_certificate_arn` - (Optional) The Amazon resource name (ARN) for the custom certificate. + The `sub_domain` configuration block supports the following arguments: * `branch_name` - (Required) Branch name setting for the subdomain. diff --git a/website/docs/r/api_gateway_integration.html.markdown b/website/docs/r/api_gateway_integration.html.markdown index 7971d235593..5942b846db6 100644 --- a/website/docs/r/api_gateway_integration.html.markdown +++ b/website/docs/r/api_gateway_integration.html.markdown @@ -223,7 +223,7 @@ This resource supports the following arguments: * `cache_key_parameters` - (Optional) List of cache key parameters for the integration. * `cache_namespace` - (Optional) Integration's cache namespace. * `content_handling` - (Optional) How to handle request payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT`. If this property is not defined, the request payload will be passed through from the method request to integration request without modification, provided that the passthroughBehaviors is configured to support payload pass-through. -* `timeout_milliseconds` - (Optional) Custom timeout between 50 and 29,000 milliseconds. The default value is 29,000 milliseconds. +* `timeout_milliseconds` - (Optional) Custom timeout between 50 and 300,000 milliseconds. The default value is 29,000 milliseconds. You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase time beyond 29,000 milliseconds. * `tls_config` - (Optional) TLS configuration. See below. ### tls_config Configuration Block diff --git a/website/docs/r/appfabric_app_authorization_connection.html.markdown b/website/docs/r/appfabric_app_authorization_connection.html.markdown new file mode 100644 index 00000000000..6a5932af330 --- /dev/null +++ b/website/docs/r/appfabric_app_authorization_connection.html.markdown @@ -0,0 +1,48 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_app_authorization_connection" +description: |- + Terraform resource for managing an AWS AppFabric App Authorization Connection. +--- + +# Resource: aws_appfabric_app_authorization_connection + +Terraform resource for managing an AWS AppFabric App Authorization Connection. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_appfabric_app_authorization_connection" "example" { + app_authorization_arn = aws_appfabric_app_authorization.test.arn + app_bundle_arn = aws_appfabric_app_bundle.arn +} +``` + +## Argument Reference + +This resource supports the following arguments: + +* `app_bundle_arn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. +* `app_authorization_arn` - (Required) The Amazon Resource Name (ARN) or Universal Unique Identifier (UUID) of the app authorization to use for the request. +* `auth_request` - (Optional) Contains OAuth2 authorization information.This is required if the app authorization for the request is configured with an OAuth2 (oauth2) authorization type. + +Auth Request support the following: + +* `code` - (Required) The authorization code returned by the application after permission is granted in the application OAuth page (after clicking on the AuthURL).. +* `redirect_uri` - (Optional) The redirect URL that is specified in the AuthURL and the application client. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `app` - The name of the application. +* `tenant` - Contains information about an application tenant, such as the application display name and identifier. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) diff --git a/website/docs/r/appfabric_ingestion.html.markdown b/website/docs/r/appfabric_ingestion.html.markdown new file mode 100644 index 00000000000..0d097ea83f6 --- /dev/null +++ b/website/docs/r/appfabric_ingestion.html.markdown @@ -0,0 +1,62 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_ingestion" +description: |- + Terraform resource for managing an AWS AppFabric Ingestion. +--- + +# Resource: aws_appfabric_ingestion + +Terraform resource for managing an AWS AppFabric Ingestion. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_appfabric_ingestion" "example" { + app = "OKTA" + app_bundle_arn = aws_appfabric_app_bundle.example.arn + tenant_id = "example.okta.com" + ingestion_type = "auditLog" + tags = { + Environment = "test" + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `app` - (Required) Name of the application. +Refer to the AWS Documentation for the [list of valid values](https://docs.aws.amazon.com/appfabric/latest/api/API_CreateIngestion.html#appfabric-CreateIngestion-request-app) +* `app_bundle_arn` - (Required) Amazon Resource Name (ARN) of the app bundle to use for the request. +* `ingestion_type` - (Required) Ingestion type. Valid values are `auditLog`. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `tenant_id` - (Required) ID of the application tenant. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Ingestion. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import AppFabric Ingestion using the `app_bundle_identifier` and `arn` separated by `,`. For example: + +```terraform +import { + to = aws_appfabric_ingestion.example + id = "arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx,arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx/ingestion/32251416-710b-4425-96ca-xxxxxxxxxx" +} +``` + +Using `terraform import`, import AppFabric Ingestion using the `app_bundle_identifier` and `arn` separated by `,`. For example: + +```console +% terraform import aws_appfabric_ingestion.example arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx,arn:aws:appfabric:[region]:[account]:appbundle/a9b91477-8831-43c0-970c-xxxxxxxxxx/ingestion/32251416-710b-4425-96ca-xxxxxxxxxx +``` diff --git a/website/docs/r/appfabric_ingestion_destination.html.markdown b/website/docs/r/appfabric_ingestion_destination.html.markdown new file mode 100644 index 00000000000..43cbc88c3bc --- /dev/null +++ b/website/docs/r/appfabric_ingestion_destination.html.markdown @@ -0,0 +1,95 @@ +--- +subcategory: "AppFabric" +layout: "aws" +page_title: "AWS: aws_appfabric_ingestion_destination" +description: |- + Terraform resource for managing an AWS AppFabric Ingestion Destination. +--- + +# Resource: aws_appfabric_ingestion_destination + +Terraform resource for managing an AWS AppFabric Ingestion Destination. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_appfabric_ingestion_destination" "example" { + app_bundle_arn = aws_appfabric_app_bundle.example.arn + ingestion_arn = aws_appfabric_ingestion.example.arn + + processing_configuration { + audit_log { + format = "json" + schema = "raw" + } + } + + destination_configuration { + audit_log { + destination { + s3_bucket { + bucket_name = aws_s3_bucket.example.bucket + } + } + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `app_bundle_arn` - (Required) The Amazon Resource Name (ARN) of the app bundle to use for the request. +* `ingestion_arn` - (Required) The Amazon Resource Name (ARN) of the ingestion to use for the request. +* `destination_configuration` - (Required) Contains information about the destination of ingested data. +* `processing_configuration` - (Required) Contains information about how ingested data is processed. +* `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +Destination Configuration support the following: + +* `audit_log` - (Required) Contains information about an audit log destination configuration. + +Audit Log Destination Configuration support the following: + +* `destination` - (Required) Contains information about an audit log destination. Only one destination (Firehose Stream) or (S3 Bucket) can be specified. + +Destination support the following: + +* `firehose_stream` - (Optional) Contains information about an Amazon Data Firehose delivery stream. +* `s3_bucket` - (Optional) Contains information about an Amazon S3 bucket. + +Firehose Stream support the following: + +* `streamName` - (Required) The name of the Amazon Data Firehose delivery stream. + +S3 Bucket support the following: + +* `bucketName` - (Required) The name of the Amazon S3 bucket. +* `prefix` - (Optional) The object key to use. + +Processing Configuration support the following: + +* `audit_log` - (Required) Contains information about an audit log processing configuration. + +Audit Log Processing Configuration support the following: + +* `format` - (Required) The format in which the audit logs need to be formatted. Valid values: `json`, `parquet`. +* `schema` - (Required) The event schema in which the audit logs need to be formatted. Valid values: `ocsf`, `raw`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the Ingestion Destination. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `5m`) +* `update` - (Default `5m`) +* `delete` - (Default `5m`) diff --git a/website/docs/r/appstream_fleet.html.markdown b/website/docs/r/appstream_fleet.html.markdown index 157c1e414d1..c220c946fa2 100644 --- a/website/docs/r/appstream_fleet.html.markdown +++ b/website/docs/r/appstream_fleet.html.markdown @@ -56,7 +56,7 @@ The following arguments are optional: * `enable_default_internet_access` - (Optional) Enables or disables default internet access for the fleet. * `fleet_type` - (Optional) Fleet type. Valid values are: `ON_DEMAND`, `ALWAYS_ON` * `iam_role_arn` - (Optional) ARN of the IAM role to apply to the fleet. -* `idle_disconnect_timeout_in_seconds` - (Optional) Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins. Defaults to 60 seconds. +* `idle_disconnect_timeout_in_seconds` - (Optional) Amount of time that users can be idle (inactive) before they are disconnected from their streaming session and the `disconnect_timeout_in_seconds` time interval begins. Defaults to `0`. Valid value is between `60` and `3600 `seconds. * `image_name` - (Optional) Name of the image used to create the fleet. * `image_arn` - (Optional) ARN of the public, private, or shared image to use. * `stream_view` - (Optional) AppStream 2.0 view that is displayed to your users when they stream from the fleet. When `APP` is specified, only the windows of applications opened by users display. When `DESKTOP` is specified, the standard desktop that is provided by the operating system displays. If not specified, defaults to `APP`. diff --git a/website/docs/r/appsync_datasource.html.markdown b/website/docs/r/appsync_datasource.html.markdown index 0fb74dc90a3..8112f624411 100644 --- a/website/docs/r/appsync_datasource.html.markdown +++ b/website/docs/r/appsync_datasource.html.markdown @@ -81,88 +81,90 @@ This resource supports the following arguments: * `name` - (Required) User-supplied name for the data source. * `type` - (Required) Type of the Data Source. Valid values: `AWS_LAMBDA`, `AMAZON_DYNAMODB`, `AMAZON_ELASTICSEARCH`, `HTTP`, `NONE`, `RELATIONAL_DATABASE`, `AMAZON_EVENTBRIDGE`, `AMAZON_OPENSEARCH_SERVICE`. * `description` - (Optional) Description of the data source. -* `dynamodb_config` - (Optional) DynamoDB settings. See [DynamoDB Config](#dynamodb-config) -* `elasticsearch_config` - (Optional) Amazon Elasticsearch settings. See [ElasticSearch Config](#elasticsearch-config) -* `event_bridge_config` - (Optional) AWS EventBridge settings. See [Event Bridge Config](#event-bridge-config) -* `http_config` - (Optional) HTTP settings. See [HTTP Config](#http-config) -* `lambda_config` - (Optional) AWS Lambda settings. See [Lambda Config](#lambda-config) -* `opensearchservice_config` - (Optional) Amazon OpenSearch Service settings. See [OpenSearch Service Config](#opensearch-service-config) -* `relational_database_config` (Optional) AWS RDS settings. See [Relational Database Config](#relational-database-config) +* `dynamodb_config` - (Optional) DynamoDB settings. See [`dynamodb_config` Block](#dynamodb_config-block) for details. +* `elasticsearch_config` - (Optional) Amazon Elasticsearch settings. See [`elasticsearch_config` Block](#elasticsearch_config-block) for details. +* `event_bridge_config` - (Optional) AWS EventBridge settings. See [`event_bridge_config` Block](#event_bridge_config-block) for details. +* `http_config` - (Optional) HTTP settings. See [`http_config` Block](#http_config-block) for details. +* `lambda_config` - (Optional) AWS Lambda settings. See [`lambda_config` Block](#lambda_config-block) for details. +* `opensearchservice_config` - (Optional) Amazon OpenSearch Service settings. See [`opensearchservice_config` Block](#opensearchservice_config-block) for details. +* `relational_database_config` (Optional) AWS RDS settings. See [`relational_database_config` Block](#relational_database_config-block) for details. * `service_role_arn` - (Optional) IAM service role ARN for the data source. Required if `type` is specified as `AWS_LAMBDA`, `AMAZON_DYNAMODB`, `AMAZON_ELASTICSEARCH`, `AMAZON_EVENTBRIDGE`, or `AMAZON_OPENSEARCH_SERVICE`. -### DynamoDB Config +### `dynamodb_config` Block -This argument supports the following arguments: +The `dynamodb_config` configuration block supports the following arguments: * `table_name` - (Required) Name of the DynamoDB table. * `region` - (Optional) AWS region of the DynamoDB table. Defaults to current region. * `use_caller_credentials` - (Optional) Set to `true` to use Amazon Cognito credentials with this data source. -* `delta_sync_config` - (Optional) The DeltaSyncConfig for a versioned data source. See [Delta Sync Config](#delta-sync-config) +* `delta_sync_config` - (Optional) The DeltaSyncConfig for a versioned data source. See [`delta_sync_config` Block](#delta_sync_config-block) for details. * `versioned` - (Optional) Detects Conflict Detection and Resolution with this data source. -### Delta Sync Config +### `delta_sync_config` Block + +The `delta_sync_config` configuration block supports the following arguments: * `base_table_ttl` - (Optional) The number of minutes that an Item is stored in the data source. * `delta_sync_table_name` - (Required) The table name. * `delta_sync_table_ttl` - (Optional) The number of minutes that a Delta Sync log entry is stored in the Delta Sync table. -### ElasticSearch Config +### `elasticsearch_config` Block -This argument supports the following arguments: +The `elasticsearch_config` configuration block supports the following arguments: * `endpoint` - (Required) HTTP endpoint of the Elasticsearch domain. * `region` - (Optional) AWS region of Elasticsearch domain. Defaults to current region. -### Event Bridge Config +### `event_bridge_config` Block -This argument supports the following arguments: +The `event_bridge_config` configuration block supports the following arguments: * `event_bus_arn` - (Required) ARN for the EventBridge bus. -### HTTP Config +### `http_config` Block -This argument supports the following arguments: +The `http_config` configuration block supports the following arguments: * `endpoint` - (Required) HTTP URL. -* `authorization_config` - (Optional) Authorization configuration in case the HTTP endpoint requires authorization. See [Authorization Config](#authorization-config). +* `authorization_config` - (Optional) Authorization configuration in case the HTTP endpoint requires authorization. See [`authorization_config` Block](#authorization_config-block) for details. -#### Authorization Config +### `authorization_config` Block -This argument supports the following arguments: +The `authorization_config` configuration block supports the following arguments: * `authorization_type` - (Optional) Authorization type that the HTTP endpoint requires. Default values is `AWS_IAM`. -* `aws_iam_config` - (Optional) Identity and Access Management (IAM) settings. See [AWS IAM Config](#aws-iam-config). +* `aws_iam_config` - (Optional) Identity and Access Management (IAM) settings. See [`aws_iam_config` Block](#aws_iam_config-block) for details. -##### AWS IAM Config +### `aws_iam_config` Block -This argument supports the following arguments: +The `aws_iam_config` configuration block supports the following arguments: * `signing_region` - (Optional) Signing Amazon Web Services Region for IAM authorization. * `signing_service_name`- (Optional) Signing service name for IAM authorization. -### Lambda Config +### `lambda_config` Block -This argument supports the following arguments: +The `lambda_config` configuration block supports the following arguments: * `function_arn` - (Required) ARN for the Lambda function. -### OpenSearch Service Config +### `opensearchservice_config` Block -This argument supports the following arguments: +The `opensearchservice_config` configuration block supports the following arguments: * `endpoint` - (Required) HTTP endpoint of the OpenSearch domain. * `region` - (Optional) AWS region of the OpenSearch domain. Defaults to current region. -### Relational Database Config +### `relational_database_config` Block -This argument supports the following arguments: +The `relational_database_config` configuration block supports the following arguments: -* `http_endpoint_config` - (Required) Amazon RDS HTTP endpoint configuration. See [HTTP Endpoint Config](#http-endpoint-config). +* `http_endpoint_config` - (Required) Amazon RDS HTTP endpoint configuration. See [`http_endpoint_config` Block](#http_endpoint_config-block) for details. * `source_type` - (Optional) Source type for the relational database. Valid values: `RDS_HTTP_ENDPOINT`. -#### HTTP Endpoint Config +### `http_endpoint_config` Block -This argument supports the following arguments: +The `http_endpoint_config` configuration block supports the following arguments: * `db_cluster_identifier` - (Required) Amazon RDS cluster identifier. * `aws_secret_store_arn` - (Required) AWS secret store ARN for database credentials. diff --git a/website/docs/r/appsync_function.html.markdown b/website/docs/r/appsync_function.html.markdown index 04d89ef15fb..fc943d9e3a7 100644 --- a/website/docs/r/appsync_function.html.markdown +++ b/website/docs/r/appsync_function.html.markdown @@ -100,28 +100,28 @@ This resource supports the following arguments: * `request_mapping_template` - (Optional) Function request mapping template. Functions support only the 2018-05-29 version of the request mapping template. * `response_mapping_template` - (Optional) Function response mapping template. * `description` - (Optional) Function description. -* `runtime` - (Optional) Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See [Runtime](#runtime). -* `sync_config` - (Optional) Describes a Sync configuration for a resolver. See [Sync Config](#sync-config). +* `runtime` - (Optional) Describes a runtime used by an AWS AppSync pipeline resolver or AWS AppSync function. Specifies the name and version of the runtime to use. Note that if a runtime is specified, code must also be specified. See [`runtime` Block](#runtime-block) for details. +* `sync_config` - (Optional) Describes a Sync configuration for a resolver. See [`sync_config` Block](#sync_config-block) for details. * `function_version` - (Optional) Version of the request mapping template. Currently the supported value is `2018-05-29`. Does not apply when specifying `code`. -### Runtime +### `runtime` Block -This argument supports the following arguments: +The `runtime` configuration block supports the following arguments: * `name` - (Optional) The name of the runtime to use. Currently, the only allowed value is `APPSYNC_JS`. * `runtime_version` - (Optional) The version of the runtime to use. Currently, the only allowed version is `1.0.0`. -### Sync Config +### `sync_config` Block -This argument supports the following arguments: +The `sync_config` configuration block supports the following arguments: * `conflict_detection` - (Optional) Conflict Detection strategy to use. Valid values are `NONE` and `VERSION`. * `conflict_handler` - (Optional) Conflict Resolution strategy to perform in the event of a conflict. Valid values are `NONE`, `OPTIMISTIC_CONCURRENCY`, `AUTOMERGE`, and `LAMBDA`. -* `lambda_conflict_handler_config` - (Optional) Lambda Conflict Handler Config when configuring `LAMBDA` as the Conflict Handler. See [Lambda Conflict Handler Config](#lambda-conflict-handler-config). +* `lambda_conflict_handler_config` - (Optional) Lambda Conflict Handler Config when configuring `LAMBDA` as the Conflict Handler. See [`lambda_conflict_handler_config` Block](#lambda_conflict_handler_config-block) for details. -#### Lambda Conflict Handler Config +#### `lambda_conflict_handler_config` Block -This argument supports the following arguments: +The `lambda_conflict_handler_config` configuration block supports the following arguments: * `lambda_conflict_handler_arn` - (Optional) ARN for the Lambda function to use as the Conflict Handler. diff --git a/website/docs/r/appsync_graphql_api.html.markdown b/website/docs/r/appsync_graphql_api.html.markdown index ae04abc1610..bc6c1baf08f 100644 --- a/website/docs/r/appsync_graphql_api.html.markdown +++ b/website/docs/r/appsync_graphql_api.html.markdown @@ -215,13 +215,13 @@ resource "aws_appsync_graphql_api" "example" { This resource supports the following arguments: * `authentication_type` - (Required) Authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA` -* `name` - (Required) User-supplied name for the GraphqlApi. -* `log_config` - (Optional) Nested argument containing logging configuration. Defined below. -* `openid_connect_config` - (Optional) Nested argument containing OpenID Connect configuration. Defined below. -* `user_pool_config` - (Optional) Amazon Cognito User Pool configuration. Defined below. -* `lambda_authorizer_config` - (Optional) Nested argument containing Lambda authorizer configuration. Defined below. +* `name` - (Required) User-supplied name for the GraphSQL API. +* `log_config` - (Optional) Nested argument containing logging configuration. See [`log_config` Block](#log_config-block) for details. +* `openid_connect_config` - (Optional) Nested argument containing OpenID Connect configuration. See [`openid_connect_config` Block](#openid_connect_config-block) for details. +* `user_pool_config` - (Optional) Amazon Cognito User Pool configuration. See [`user_pool_config` Block](#user_pool_config-block) for details. +* `lambda_authorizer_config` - (Optional) Nested argument containing Lambda authorizer configuration. See [`lambda_authorizer_config` Block](#lambda_authorizer_config-block) for details. * `schema` - (Optional) Schema definition, in GraphQL schema language format. Terraform cannot perform drift detection of this configuration. -* `additional_authentication_provider` - (Optional) One or more additional authentication providers for the GraphqlApi. Defined below. +* `additional_authentication_provider` - (Optional) One or more additional authentication providers for the GraphSQL API. See [`additional_authentication_provider` Block](#additional_authentication_provider-block) for details. * `introspection_config` - (Optional) Sets the value of the GraphQL API to enable (`ENABLED`) or disable (`DISABLED`) introspection. If no value is provided, the introspection configuration will be set to ENABLED by default. This field will produce an error if the operation attempts to use the introspection feature while this field is disabled. For more information about introspection, see [GraphQL introspection](https://graphql.org/learn/introspection/). * `query_depth_limit` - (Optional) The maximum depth a query can have in a single request. Depth refers to the amount of nested levels allowed in the body of query. The default value is `0` (or unspecified), which indicates there's no depth limit. If you set a limit, it can be between `1` and `75` nested levels. This field will produce a limit error if the operation falls out of bounds. @@ -231,43 +231,43 @@ This resource supports the following arguments: * `xray_enabled` - (Optional) Whether tracing with X-ray is enabled. Defaults to false. * `visibility` - (Optional) Sets the value of the GraphQL API to public (`GLOBAL`) or private (`PRIVATE`). If no value is provided, the visibility will be set to `GLOBAL` by default. This value cannot be changed once the API has been created. -### log_config +### `log_config` Block -This argument supports the following arguments: +The `log_config` configuration block supports the following arguments: * `cloudwatch_logs_role_arn` - (Required) Amazon Resource Name of the service role that AWS AppSync will assume to publish to Amazon CloudWatch logs in your account. * `field_log_level` - (Required) Field logging level. Valid values: `ALL`, `ERROR`, `NONE`. * `exclude_verbose_content` - (Optional) Set to TRUE to exclude sections that contain information such as headers, context, and evaluated mapping templates, regardless of logging level. Valid values: `true`, `false`. Default value: `false` -### additional_authentication_provider +### `additional_authentication_provider` Block -This argument supports the following arguments: +The `additional_authentication_provider` configuration block supports the following arguments: * `authentication_type` - (Required) Authentication type. Valid values: `API_KEY`, `AWS_IAM`, `AMAZON_COGNITO_USER_POOLS`, `OPENID_CONNECT`, `AWS_LAMBDA` -* `openid_connect_config` - (Optional) Nested argument containing OpenID Connect configuration. Defined below. -* `user_pool_config` - (Optional) Amazon Cognito User Pool configuration. Defined below. +* `openid_connect_config` - (Optional) Nested argument containing OpenID Connect configuration. See [`openid_connect_config` Block](#openid_connect_config-block) for details. +* `user_pool_config` - (Optional) Amazon Cognito User Pool configuration. See [`user_pool_config` Block](#user_pool_config-block) for details. -### openid_connect_config +### `openid_connect_config` Block -This argument supports the following arguments: +The `openid_connect_config` configuration block supports the following arguments: * `issuer` - (Required) Issuer for the OpenID Connect configuration. The issuer returned by discovery MUST exactly match the value of iss in the ID Token. * `auth_ttl` - (Optional) Number of milliseconds a token is valid after being authenticated. * `client_id` - (Optional) Client identifier of the Relying party at the OpenID identity provider. This identifier is typically obtained when the Relying party is registered with the OpenID identity provider. You can specify a regular expression so the AWS AppSync can validate against multiple client identifiers at a time. * `iat_ttl` - (Optional) Number of milliseconds a token is valid after being issued to a user. -### user_pool_config +### `user_pool_config` Block -This argument supports the following arguments: +The `user_pool_config` configuration block supports the following arguments: * `default_action` - (Required only if Cognito is used as the default auth provider) Action that you want your GraphQL API to take when a request that uses Amazon Cognito User Pool authentication doesn't match the Amazon Cognito User Pool configuration. Valid: `ALLOW` and `DENY` * `user_pool_id` - (Required) User pool ID. * `app_id_client_regex` - (Optional) Regular expression for validating the incoming Amazon Cognito User Pool app client ID. * `aws_region` - (Optional) AWS region in which the user pool was created. -### lambda_authorizer_config +### `lambda_authorizer_config` Block -This argument supports the following arguments: +The `lambda_authorizer_config` configuration block supports the following arguments: * `authorizer_uri` - (Required) ARN of the Lambda function to be called for authorization. Note: This Lambda function must have a resource-based policy assigned to it, to allow `lambda:InvokeFunction` from service principal `appsync.amazonaws.com`. * `authorizer_result_ttl_in_seconds` - (Optional) Number of seconds a response should be cached for. The default is 5 minutes (300 seconds). The Lambda function can override this by returning a `ttlOverride` key in its response. A value of 0 disables caching of responses. Minimum value of 0. Maximum value of 3600. diff --git a/website/docs/r/autoscaling_group.html.markdown b/website/docs/r/autoscaling_group.html.markdown index 64f84a537bc..a1862987a63 100644 --- a/website/docs/r/autoscaling_group.html.markdown +++ b/website/docs/r/autoscaling_group.html.markdown @@ -612,6 +612,7 @@ This configuration block supports the following: * ssd - solid state drive ``` +- `max_spot_price_as_percentage_of_optimal_on_demand_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spot_max_price_percentage_over_lowest_price` - `memory_gib_per_vcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. - `min` - (Optional) Minimum. May be a decimal number, e.g. `0.5`. - `max` - (Optional) Maximum. May be a decimal number, e.g. `0.5`. @@ -629,7 +630,7 @@ This configuration block supports the following: If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. - `require_hibernate_support` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -- `spot_max_price_percentage_over_lowest_price` - (Optional) Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +- `spot_max_price_percentage_over_lowest_price` - (Optional) Price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `max_spot_price_as_percentage_of_optimal_on_demand_price` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. diff --git a/website/docs/r/autoscaling_policy.html.markdown b/website/docs/r/autoscaling_policy.html.markdown index 26be6f27e02..c340ba1bc40 100644 --- a/website/docs/r/autoscaling_policy.html.markdown +++ b/website/docs/r/autoscaling_policy.html.markdown @@ -257,14 +257,14 @@ The following fields are available in target tracking configuration: ### predefined_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefined_metric_type` - (Required) Metric type. * `resource_label` - (Optional) Identifies the resource associated with the metric type. ### customized_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric_dimension` - (Optional) Dimensions of the metric. * `metric_name` - (Optional) Name of the metric. @@ -275,14 +275,14 @@ This argument supports the following arguments: #### metric_dimension -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. #### metrics -This argument supports the following arguments: +This configuration block supports the following arguments: * `expression` - (Optional) Math expression used on the returned metric. You must specify either `expression` or `metric_stat`, but not both. * `id` - (Required) Short name for the metric used in target tracking scaling policy. @@ -292,7 +292,7 @@ This argument supports the following arguments: ##### metric_stat -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric` - (Required) Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. * `stat` - (Required) Statistic of the metrics to return. @@ -300,7 +300,7 @@ This argument supports the following arguments: ##### metric -This argument supports the following arguments: +This configuration block supports the following arguments: * `dimensions` - (Optional) Dimensions of the metric. * `metric_name` - (Required) Name of the metric. @@ -308,14 +308,14 @@ This argument supports the following arguments: ###### dimensions -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. ### predictive_scaling_configuration -This argument supports the following arguments: +This configuration block supports the following arguments: * `max_capacity_breach_behavior` - (Optional) Defines the behavior that should be applied if the forecast capacity approaches or exceeds the maximum capacity of the Auto Scaling group. Valid values are `HonorMaxCapacity` or `IncreaseMaxCapacity`. Default is `HonorMaxCapacity`. * `max_capacity_buffer` - (Optional) Size of the capacity buffer to use when the forecast capacity is close to or exceeds the maximum capacity. Valid range is `0` to `100`. If set to `0`, Amazon EC2 Auto Scaling may scale capacity higher than the maximum capacity to equal but not exceed forecast capacity. @@ -325,7 +325,7 @@ This argument supports the following arguments: #### metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `customized_capacity_metric_specification` - (Optional) Customized capacity metric specification. The field is only valid when you use `customized_load_metric_specification` * `customized_load_metric_specification` - (Optional) Customized load metric specification. @@ -336,46 +336,46 @@ This argument supports the following arguments: ##### predefined_load_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefined_metric_type` - (Required) Metric type. Valid values are `ASGTotalCPUUtilization`, `ASGTotalNetworkIn`, `ASGTotalNetworkOut`, or `ALBTargetGroupRequestCount`. * `resource_label` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### predefined_metric_pair_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefined_metric_type` - (Required) Which metrics to use. There are two different types of metrics for each metric type: one is a load metric and one is a scaling metric. For example, if the metric type is `ASGCPUUtilization`, the Auto Scaling group's total CPU metric is used as the load metric, and the average CPU metric is used for the scaling metric. Valid values are `ASGCPUUtilization`, `ASGNetworkIn`, `ASGNetworkOut`, or `ALBRequestCount`. * `resource_label` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### predefined_scaling_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `predefined_metric_type` - (Required) Describes a scaling metric for a predictive scaling policy. Valid values are `ASGAverageCPUUtilization`, `ASGAverageNetworkIn`, `ASGAverageNetworkOut`, or `ALBRequestCountPerTarget`. * `resource_label` - (Required) Label that uniquely identifies a specific Application Load Balancer target group from which to determine the request count served by your Auto Scaling group. You create the resource label by appending the final portion of the load balancer ARN and the final portion of the target group ARN into a single value, separated by a forward slash (/). Refer to [PredefinedMetricSpecification](https://docs.aws.amazon.com/autoscaling/ec2/APIReference/API_PredefinedMetricSpecification.html) for more information. ##### customized_scaling_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric_data_queries` - (Required) List of up to 10 structures that defines custom scaling metric in predictive scaling policy ##### customized_load_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric_data_queries` - (Required) List of up to 10 structures that defines custom load metric in predictive scaling policy ##### customized_capacity_metric_specification -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric_data_queries` - (Required) List of up to 10 structures that defines custom capacity metric in predictive scaling policy ##### metric_data_queries -This argument supports the following arguments: +This configuration block supports the following arguments: * `expression` - (Optional) Math expression used on the returned metric. You must specify either `expression` or `metric_stat`, but not both. * `id` - (Required) Short name for the metric used in predictive scaling policy. @@ -385,7 +385,7 @@ This argument supports the following arguments: ##### metric_stat -This argument supports the following arguments: +This configuration block supports the following arguments: * `metric` - (Required) Structure that defines the CloudWatch metric to return, including the metric name, namespace, and dimensions. * `stat` - (Required) Statistic of the metrics to return. @@ -393,7 +393,7 @@ This argument supports the following arguments: ##### metric -This argument supports the following arguments: +This configuration block supports the following arguments: * `dimensions` - (Optional) Dimensions of the metric. * `metric_name` - (Required) Name of the metric. @@ -401,7 +401,7 @@ This argument supports the following arguments: ##### dimensions -This argument supports the following arguments: +This configuration block supports the following arguments: * `name` - (Required) Name of the dimension. * `value` - (Required) Value of the dimension. diff --git a/website/docs/r/bedrock_custom_model.html.markdown b/website/docs/r/bedrock_custom_model.html.markdown index 631e8cb21dc..07a891b92e4 100644 --- a/website/docs/r/bedrock_custom_model.html.markdown +++ b/website/docs/r/bedrock_custom_model.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_custom_model" description: |- diff --git a/website/docs/r/bedrock_model_invocation_logging_configuration.html.markdown b/website/docs/r/bedrock_model_invocation_logging_configuration.html.markdown index fd322e4b654..ee06edcb175 100644 --- a/website/docs/r/bedrock_model_invocation_logging_configuration.html.markdown +++ b/website/docs/r/bedrock_model_invocation_logging_configuration.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_model_invocation_logging_configuration" description: |- diff --git a/website/docs/r/bedrock_provisioned_model_throughput.html.markdown b/website/docs/r/bedrock_provisioned_model_throughput.html.markdown index 4f910f572d9..52c96abe9ed 100644 --- a/website/docs/r/bedrock_provisioned_model_throughput.html.markdown +++ b/website/docs/r/bedrock_provisioned_model_throughput.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Amazon Bedrock" +subcategory: "Bedrock" layout: "aws" page_title: "AWS: aws_bedrock_provisioned_model_throughput" description: |- diff --git a/website/docs/r/bedrockagent_agent.html.markdown b/website/docs/r/bedrockagent_agent.html.markdown index 326cdaeaaa5..5fd34efb50e 100644 --- a/website/docs/r/bedrockagent_agent.html.markdown +++ b/website/docs/r/bedrockagent_agent.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent" description: |- diff --git a/website/docs/r/bedrockagent_agent_action_group.html.markdown b/website/docs/r/bedrockagent_agent_action_group.html.markdown index c0fea52bafe..c5211124eab 100644 --- a/website/docs/r/bedrockagent_agent_action_group.html.markdown +++ b/website/docs/r/bedrockagent_agent_action_group.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_action_group" description: |- diff --git a/website/docs/r/bedrockagent_agent_alias.html.markdown b/website/docs/r/bedrockagent_agent_alias.html.markdown index 106b7117faa..24d1d05608e 100644 --- a/website/docs/r/bedrockagent_agent_alias.html.markdown +++ b/website/docs/r/bedrockagent_agent_alias.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_alias" description: |- diff --git a/website/docs/r/bedrockagent_agent_knowledge_base_association.html.markdown b/website/docs/r/bedrockagent_agent_knowledge_base_association.html.markdown index e017c9a7ccd..3fbb4d839fe 100644 --- a/website/docs/r/bedrockagent_agent_knowledge_base_association.html.markdown +++ b/website/docs/r/bedrockagent_agent_knowledge_base_association.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_agent_knowledge_base_association" description: |- diff --git a/website/docs/r/bedrockagent_data_source.html.markdown b/website/docs/r/bedrockagent_data_source.html.markdown index 14ee41df7e6..3d72c1d3248 100644 --- a/website/docs/r/bedrockagent_data_source.html.markdown +++ b/website/docs/r/bedrockagent_data_source.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_data_source" description: |- diff --git a/website/docs/r/bedrockagent_knowledge_base.html.markdown b/website/docs/r/bedrockagent_knowledge_base.html.markdown index cbf3a50aaba..c093ac73b2a 100644 --- a/website/docs/r/bedrockagent_knowledge_base.html.markdown +++ b/website/docs/r/bedrockagent_knowledge_base.html.markdown @@ -1,5 +1,5 @@ --- -subcategory: "Agents for Amazon Bedrock" +subcategory: "Bedrock Agents" layout: "aws" page_title: "AWS: aws_bedrockagent_knowledge_base" description: |- diff --git a/website/docs/r/cloudformation_stack_set_instance.html.markdown b/website/docs/r/cloudformation_stack_set_instance.html.markdown index 9fed412cb58..e180b086d57 100644 --- a/website/docs/r/cloudformation_stack_set_instance.html.markdown +++ b/website/docs/r/cloudformation_stack_set_instance.html.markdown @@ -87,7 +87,7 @@ This resource supports the following arguments: * `stack_set_name` - (Required) Name of the StackSet. * `account_id` - (Optional) Target AWS Account ID to create a Stack based on the StackSet. Defaults to current account. -* `deployment_targets` - (Optional) The AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See [deployment_targets](#deployment_targets-argument-reference) below. +* `deployment_targets` - (Optional) AWS Organizations accounts to which StackSets deploys. StackSets doesn't deploy stack instances to the organization management account, even if the organization management account is in your organization or in an OU in your organization. Drift detection is not possible for this argument. See [deployment_targets](#deployment_targets-argument-reference) below. * `parameter_overrides` - (Optional) Key-value map of input parameters to override from the StackSet for this Instance. * `region` - (Optional) Target AWS Region to create a Stack based on the StackSet. Defaults to current region. * `retain_stack` - (Optional) During Terraform resource destroy, remove Instance from StackSet while keeping the Stack and its associated resources. Must be enabled in Terraform state _before_ destroy operation to take effect. You cannot reassociate a retained Stack or add an existing, saved Stack to a new StackSet. Defaults to `false`. @@ -98,25 +98,28 @@ This resource supports the following arguments: The `deployment_targets` configuration block supports the following arguments: -* `organizational_unit_ids` - (Optional) The organization root ID or organizational unit (OU) IDs to which StackSets deploys. +* `organizational_unit_ids` - (Optional) Organization root ID or organizational unit (OU) IDs to which StackSets deploys. +* `account_filter_type` - (Optional) Limit deployment targets to individual accounts or include additional accounts with provided OUs. Valid values: `INTERSECTION`, `DIFFERENCE`, `UNION`, `NONE`. +* `accounts` - (Optional) List of accounts to deploy stack set updates. +* `accounts_url` - (Optional) S3 URL of the file containing the list of accounts. ### `operation_preferences` Argument Reference The `operation_preferences` configuration block supports the following arguments: -* `failure_tolerance_count` - (Optional) The number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. -* `failure_tolerance_percentage` - (Optional) The percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. -* `max_concurrent_count` - (Optional) The maximum number of accounts in which to perform this operation at one time. -* `max_concurrent_percentage` - (Optional) The maximum percentage of accounts in which to perform this operation at one time. -* `region_concurrency_type` - (Optional) The concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are `SEQUENTIAL` and `PARALLEL`. -* `region_order` - (Optional) The order of the Regions in where you want to perform the stack operation. +* `failure_tolerance_count` - (Optional) Number of accounts, per Region, for which this operation can fail before AWS CloudFormation stops the operation in that Region. +* `failure_tolerance_percentage` - (Optional) Percentage of accounts, per Region, for which this stack operation can fail before AWS CloudFormation stops the operation in that Region. +* `max_concurrent_count` - (Optional) Maximum number of accounts in which to perform this operation at one time. +* `max_concurrent_percentage` - (Optional) Maximum percentage of accounts in which to perform this operation at one time. +* `region_concurrency_type` - (Optional) Concurrency type of deploying StackSets operations in Regions, could be in parallel or one Region at a time. Valid values are `SEQUENTIAL` and `PARALLEL`. +* `region_order` - (Optional) Order of the Regions in where you want to perform the stack operation. ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `id` - Unique identifier for the resource. If `deployment_targets` is set, this is a comma-delimited string combining stack set name, organizational unit IDs (`/`-delimited), and region (ie. `mystack,ou-123/ou-456,us-east-1`). Otherwise, this is a comma-delimited string combining stack set name, AWS account ID, and region (ie. `mystack,123456789012,us-east-1`). -* `organizational_unit_id` - The organization root ID or organizational unit (OU) ID in which the stack is deployed. +* `organizational_unit_id` - Organization root ID or organizational unit (OU) ID in which the stack is deployed. * `stack_id` - Stack identifier. * `stack_instance_summaries` - List of stack instances created from an organizational unit deployment target. This will only be populated when `deployment_targets` is set. See [`stack_instance_summaries`](#stack_instance_summaries-attribute-reference). diff --git a/website/docs/r/cloudtrail_event_data_store.html.markdown b/website/docs/r/cloudtrail_event_data_store.html.markdown index a057e81bf83..54e8b8270ed 100644 --- a/website/docs/r/cloudtrail_event_data_store.html.markdown +++ b/website/docs/r/cloudtrail_event_data_store.html.markdown @@ -79,6 +79,7 @@ resource "aws_cloudtrail_event_data_store" "example" { This resource supports the following arguments: - `name` - (Required) The name of the event data store. +- `billing_mode` - (Optional) The billing mode for the event data store. The valid values are `EXTENDABLE_RETENTION_PRICING` and `FIXED_RETENTION_PRICING`. Defaults to `EXTENDABLE_RETENTION_PRICING`. - `advanced_event_selector` - (Optional) The advanced event selectors to use to select the events for the data store. For more information about how to use advanced event selectors, see [Log events by using advanced event selectors](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/logging-data-events-with-cloudtrail.html#creating-data-event-selectors-advanced) in the CloudTrail User Guide. - `multi_region_enabled` - (Optional) Specifies whether the event data store includes events from all regions, or only from the region in which the event data store is created. Default: `true`. - `organization_enabled` - (Optional) Specifies whether an event data store collects events logged for an organization in AWS Organizations. Default: `false`. diff --git a/website/docs/r/cloudwatch_log_account_policy.html.markdown b/website/docs/r/cloudwatch_log_account_policy.html.markdown new file mode 100644 index 00000000000..55fb7e3faf8 --- /dev/null +++ b/website/docs/r/cloudwatch_log_account_policy.html.markdown @@ -0,0 +1,94 @@ +--- +subcategory: "CloudWatch Logs" +layout: "aws" +page_title: "AWS: aws_cloudwatch_log_account_policy" +description: |- + Provides a CloudWatch Log Account Policy resource. +--- + +# Resource: aws_cloudwatch_log_account_policy + +Provides a CloudWatch Log Account Policy resource. + +## Example Usage + +### Account Data Protection Policy + +```terraform +resource "aws_cloudwatch_log_account_policy" "data_protection" { + policy_name = "data-protection" + policy_type = "DATA_PROTECTION_POLICY" + policy_document = jsonencode({ + Name = "DataProtection" + Version = "2021-06-01" + + Statement = [ + { + Sid = "Audit" + DataIdentifier = ["arn:aws:dataprotection::aws:data-identifier/EmailAddress"] + Operation = { + Audit = { + FindingsDestination = {} + } + } + }, + { + Sid = "Redact" + DataIdentifier = ["arn:aws:dataprotection::aws:data-identifier/EmailAddress"] + Operation = { + Deidentify = { + MaskConfig = {} + } + } + } + ] + }) +} +``` + +### Subscription Filter Policy + +```terraform +resource "aws_cloudwatch_log_account_policy" "subscription_filter" { + policy_name = "subscription-filter" + policy_type = "SUBSCRIPTION_FILTER_POLICY" + policy_document = jsonencode( + { + DestinationArn = "${aws_lambda_function.test.arn}" + FilterPattern = "test" + } + ) + selection_criteria = "LogGroupName NOT IN [\"excluded_log_group_name\"]" +} +``` + +## Argument Reference + +This resource supports the following arguments: + +* `policy_document` - (Required) Text of the account policy. Refer to the [AWS docs](https://docs.aws.amazon.com/cli/latest/reference/logs/put-account-policy.html) for more information. +* `policy_type` - (Required) Type of account policy. Either `DATA_PROTECTION_POLICY` or `SUBSCRIPTION_FILTER_POLICY`. You can have one account policy per type in an account. +* `policy_name` - (Required) Name of the account policy. +* `scope` - (Optional) Currently defaults to and only accepts the value: `ALL`. +* `selection_criteria` - (Optional) - Criteria for applying a subscription filter policy to a selection of log groups. The only allowable criteria selector is `LogGroupName NOT IN []`. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import this resource using the `policy_name` and `policy_type` fields separated by `:`. For example: + +```terraform +import { + to = aws_cloudwatch_log_account_policy.example + id = "my-account-policy:SUBSCRIPTION_FILTER_POLICY" +} +``` + +Using `terraform import`, import this resource using the `policy_name` and `policy_type` separated by `:`. For example: + +```console +% terraform import aws_cloudwatch_log_account_policy.example "my-account-policy:SUBSCRIPTION_FILTER_POLICY" +``` diff --git a/website/docs/r/cloudwatch_metric_alarm.html.markdown b/website/docs/r/cloudwatch_metric_alarm.html.markdown index b468e10f65b..983b72154f9 100644 --- a/website/docs/r/cloudwatch_metric_alarm.html.markdown +++ b/website/docs/r/cloudwatch_metric_alarm.html.markdown @@ -174,7 +174,7 @@ You must choose one or the other See [related part of AWS Docs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html) for details about valid values. -This argument supports the following arguments: +This resource supports the following arguments: * `alarm_name` - (Required) The descriptive name for the alarm. This name must be unique within the user's AWS account * `comparison_operator` - (Required) The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: `GreaterThanOrEqualToThreshold`, `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`. Additionally, the values `LessThanLowerOrGreaterThanUpperThreshold`, `LessThanLowerThreshold`, and `GreaterThanUpperThreshold` are used only for alarms based on anomaly detection models. diff --git a/website/docs/r/codebuild_webhook.html.markdown b/website/docs/r/codebuild_webhook.html.markdown index 974744511ab..03e12f8ff18 100644 --- a/website/docs/r/codebuild_webhook.html.markdown +++ b/website/docs/r/codebuild_webhook.html.markdown @@ -72,6 +72,7 @@ This resource supports the following arguments: * `build_type` - (Optional) The type of build this webhook will trigger. Valid values for this parameter are: `BUILD`, `BUILD_BATCH`. * `branch_filter` - (Optional) A regular expression used to determine which branches get built. Default is all branches are built. We recommend using `filter_group` over `branch_filter`. * `filter_group` - (Optional) Information about the webhook's trigger. Filter group blocks are documented below. +* `scope_configuration` - (Optional) Scope configuration for global or organization webhooks. Scope configuration blocks are documented below. `filter_group` supports the following: @@ -80,9 +81,15 @@ This resource supports the following arguments: `filter` supports the following: * `type` - (Required) The webhook filter group's type. Valid values for this parameter are: `EVENT`, `BASE_REF`, `HEAD_REF`, `ACTOR_ACCOUNT_ID`, `FILE_PATH`, `COMMIT_MESSAGE`, `WORKFLOW_NAME`, `TAG_NAME`, `RELEASE_NAME`. At least one filter group must specify `EVENT` as its type. -* `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. +* `pattern` - (Required) For a filter that uses `EVENT` type, a comma-separated string that specifies one event: `PUSH`, `PULL_REQUEST_CREATED`, `PULL_REQUEST_UPDATED`, `PULL_REQUEST_REOPENED`. `PULL_REQUEST_MERGED`, `WORKFLOW_JOB_QUEUED` works with GitHub & GitHub Enterprise only. For a filter that uses any of the other filter types, a regular expression. * `exclude_matched_pattern` - (Optional) If set to `true`, the specified filter does *not* trigger a build. Defaults to `false`. +`scope_configuration` supports the following: + +* `name` - (Required) The name of either the enterprise or organization. +* `scope` - (Required) The type of scope for a GitHub webhook. Valid values for this parameter are: `GITHUB_ORGANIZATION`, `GITHUB_GLOBAL`. +* `domain` - (Optional) The domain of the GitHub Enterprise organization. Required if your project's source type is GITHUB_ENTERPRISE. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/config_conformance_pack.html.markdown b/website/docs/r/config_conformance_pack.html.markdown index 8fd8ba3ee0b..a936e0e637d 100644 --- a/website/docs/r/config_conformance_pack.html.markdown +++ b/website/docs/r/config_conformance_pack.html.markdown @@ -82,7 +82,7 @@ EOT ~> **Note:** If both `template_body` and `template_s3_uri` are specified, AWS Config uses the `template_s3_uri` and ignores the `template_body`. -This argument supports the following arguments: +This resource supports the following arguments: * `name` - (Required, Forces new resource) The name of the conformance pack. Must begin with a letter and contain from 1 to 256 alphanumeric characters and hyphens. * `delivery_s3_bucket` - (Optional) Amazon S3 bucket where AWS Config stores conformance pack templates. Maximum length of 63. diff --git a/website/docs/r/controltower_control.html.markdown b/website/docs/r/controltower_control.html.markdown index 786a962a715..22d1a1d2502 100644 --- a/website/docs/r/controltower_control.html.markdown +++ b/website/docs/r/controltower_control.html.markdown @@ -28,20 +28,35 @@ resource "aws_controltower_control" "example" { for x in data.aws_organizations_organizational_units.example.children : x.arn if x.name == "Infrastructure" ][0] + + parameters { + key = "AllowedRegions" + value = jsonencode(["us-east-1"]) + } } ``` ## Argument Reference -This resource supports the following arguments: +This following arguments are required: * `control_identifier` - (Required) The ARN of the control. Only Strongly recommended and Elective controls are permitted, with the exception of the Region deny guardrail. * `target_identifier` - (Required) The ARN of the organizational unit. +The following arguments are optional: + +* `parameters` - (Optional) Parameter values which are specified to configure the control when you enable it. See [Parameters](#parameters) for more details. + +### Parameters + +* `key` - (Required) The name of the parameter. +* `value` - (Required) The value of the parameter. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: +* `arn` - The ARN of the EnabledControl resource. * `id` - The ARN of the organizational unit. ## Import diff --git a/website/docs/r/datazone_project.html.markdown b/website/docs/r/datazone_project.html.markdown new file mode 100644 index 00000000000..0a2880090df --- /dev/null +++ b/website/docs/r/datazone_project.html.markdown @@ -0,0 +1,83 @@ +--- +subcategory: "DataZone" +layout: "aws" +page_title: "AWS: aws_datazone_project" +description: |- + Terraform resource for managing an Amazon DataZone Project. +--- +# Resource: aws_datazone_project + +Terraform resource for managing an AWS DataZone Project. + +## Example Usage + +```terraform +resource "aws_datazone_project" "test" { + domain_id = aws_datazone_domain.test.id + glossary_terms = ["2N8w6XJCwZf"] + name = "name" + description = "desc" + skip_deletion_check = true +} +``` + +### Basic Usage + +```terraform +resource "aws_datazone_project" "test" { + domain_identifier = aws_datazone_domain.test.id + name = "name" +} +``` + +## Argument Reference + +The following arguments are required: + +* `domain_identifier` - (Required) Identifier of domain which the project is part of. Must follow the regex of ^dzd[-_][a-zA-Z0-9_-]{1,36}$. +* `name` - (Required) Name of the project. Must follow the regex of ^[\w -]+$. and have a length of at most 64. + +The following arguments are optional: + +* `skip_deletion_check` - (Optional) Optional flag to delete all child entities within the project. +* `description` - (Optional) Description of project. +* `glossary_terms` - (Optional) List of glossary terms that can be used in the project. The list cannot be empty or include over 20 values. Each value must follow the regex of [a-zA-Z0-9_-]{1,36}$. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `created_by` - Creator of the project. +* `domain_id` - Id of the project's DataZone domain. +* `id` - Id of the project. +* `name` - Name of the project. +* `created_at` - Timestamp of when the project was made. +* `description` - Description of the project. +* `failure_reasons` - List of error messages if operation cannot be completed. +* `glossary_terms` - Business glossary terms that can be used in the project. +* `last_updated_at` - Timestamp of when the project was last updated. +* `project_status` - Enum that conveys state of project. Can be ACTIVE, DELETING, or DELETE_FAILED. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `10m`) +* `delete` - (Default `10m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DataZone Project using the `id`. For example: + +```terraform +import { + to = aws_datazone_project.example + id = "projectid123" +} +``` + +Using `terraform import`, import DataZone Project using the `id`. For example: + +```console +% terraform import aws_datazone_project.example projectid123 +``` diff --git a/website/docs/r/db_instance.html.markdown b/website/docs/r/db_instance.html.markdown index 241fa52092f..d18ae8831f0 100644 --- a/website/docs/r/db_instance.html.markdown +++ b/website/docs/r/db_instance.html.markdown @@ -277,7 +277,7 @@ resource "aws_db_instance" "default" { For more detailed documentation about each argument, refer to the [AWS official documentation](http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `allocated_storage` - (Required unless a `snapshot_identifier` or `replicate_source_db` is provided) The allocated storage in gibibytes. If `max_allocated_storage` is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs. If `replicate_source_db` is set, the value is ignored during the creation of the instance. * `allow_major_version_upgrade` - (Optional) Indicates that major version @@ -330,6 +330,7 @@ for additional read replica constraints. * `enabled_cloudwatch_logs_exports` - (Optional) Set of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. For supported values, see the EnableCloudwatchLogsExports.member.N parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). * `engine` - (Required unless a `snapshot_identifier` or `replicate_source_db` is provided) The database engine to use. For supported values, see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). Note that for Amazon Aurora instances the engine must match the [DB cluster](/docs/providers/aws/r/rds_cluster.html)'s engine'. For information on the difference between the available Aurora MySQL engines see [Comparison between Aurora MySQL 1 and Aurora MySQL 2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AuroraMySQL.Updates.20180206.html) in the Amazon RDS User Guide. * `engine_version` - (Optional) The engine version to use. If `auto_minor_version_upgrade` is enabled, you can provide a prefix of the version such as `8.0` (for `8.0.36`). The actual engine version used is returned in the attribute `engine_version_actual`, see [Attribute Reference](#attribute-reference) below. For supported values, see the EngineVersion parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html). Note that for Amazon Aurora instances the engine version must match the [DB cluster](/docs/providers/aws/r/rds_cluster.html)'s engine version'. +* `engine_lifecycle_support` - (Optional) The life cycle type for this DB instance. This setting applies only to RDS for MySQL and RDS for PostgreSQL. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `final_snapshot_identifier` - (Optional) The name of your final DB snapshot when this DB instance is deleted. Must be provided if `skip_final_snapshot` is set to `false`. The value must begin with a letter, only contain alphanumeric characters and hyphens, and not end with a hyphen or contain two consecutive hyphens. Must not be provided when deleting a read replica. diff --git a/website/docs/r/db_proxy_default_target_group.html.markdown b/website/docs/r/db_proxy_default_target_group.html.markdown index 41aed35dfa2..889caac9624 100644 --- a/website/docs/r/db_proxy_default_target_group.html.markdown +++ b/website/docs/r/db_proxy_default_target_group.html.markdown @@ -64,7 +64,7 @@ This resource supports the following arguments: * `init_query` - (Optional) One or more SQL statements for the proxy to run when opening each new database connection. Typically used with `SET` statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single `SET` statement, such as `SET x=1, y=2`. * `max_connections_percent` - (Optional) The maximum size of the connection pool for each target in a target group. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. * `max_idle_connections_percent` - (Optional) Controls how actively the proxy closes idle database connections in the connection pool. A high value enables the proxy to leave a high percentage of idle connections open. A low value causes the proxy to close idle client connections and return the underlying database connections to the connection pool. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group. -* `session_pinning_filters` - (Optional) Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is `EXCLUDE_VARIABLE_SETS`. +* `session_pinning_filters` - (Optional) Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. This setting is only supported for MySQL engine family databases. Currently, the only allowed value is `EXCLUDE_VARIABLE_SETS`. ## Attribute Reference diff --git a/website/docs/r/docdb_cluster.html.markdown b/website/docs/r/docdb_cluster.html.markdown index 8309fd16228..f54c6c9690a 100644 --- a/website/docs/r/docdb_cluster.html.markdown +++ b/website/docs/r/docdb_cluster.html.markdown @@ -40,7 +40,7 @@ resource "aws_docdb_cluster" "docdb" { For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-cluster.html). -This argument supports the following arguments: +This resource supports the following arguments: * `allow_major_version_upgrade` - (Optional) A value that indicates whether major version upgrades are allowed. Constraints: You must allow major version upgrades when specifying a value for the EngineVersion parameter that is a different major version than the DB cluster's current version. * `apply_immediately` - (Optional) Specifies whether any cluster modifications @@ -53,7 +53,7 @@ This argument supports the following arguments: * `cluster_identifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `db_subnet_group_name` - (Optional) A DB subnet group to associate with this DB instance. * `db_cluster_parameter_group_name` - (Optional) A cluster parameter group to associate with the cluster. -* `deletion_protection` - (Optional) A value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. By default, deletion protection is disabled. +* `deletion_protection` - (Optional) A boolean value that indicates whether the DB cluster has deletion protection enabled. The database can't be deleted when deletion protection is enabled. Defaults to `false`. * `enabled_cloudwatch_logs_exports` - (Optional) List of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `profiler`. * `engine_version` - (Optional) The database engine version. Updating this argument results in an outage. diff --git a/website/docs/r/docdb_cluster_instance.html.markdown b/website/docs/r/docdb_cluster_instance.html.markdown index 1d213d1895d..70724eb6fa9 100644 --- a/website/docs/r/docdb_cluster_instance.html.markdown +++ b/website/docs/r/docdb_cluster_instance.html.markdown @@ -40,7 +40,7 @@ resource "aws_docdb_cluster" "default" { For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/docdb/create-db-instance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `apply_immediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. diff --git a/website/docs/r/drs_replication_configuration_template.html.markdown b/website/docs/r/drs_replication_configuration_template.html.markdown new file mode 100644 index 00000000000..a0b4a7408e4 --- /dev/null +++ b/website/docs/r/drs_replication_configuration_template.html.markdown @@ -0,0 +1,123 @@ +--- +subcategory: "DRS (Elastic Disaster Recovery)" +layout: "aws" +page_title: "AWS: drs_replication_configuration_template" +description: |- + Provides an Elastic Disaster Recovery replication configuration template resource. +--- + +# Resource: aws_drs_replication_configuration_template + +Provides an Elastic Disaster Recovery replication configuration template resource. Before using DRS, your account must be [initialized](https://docs.aws.amazon.com/drs/latest/userguide/getting-started-initializing.html). + +~> **NOTE:** Your configuration must use the PIT policy shown in the [basic configuration](#basic-configuration) due to AWS rules. The only value that you can change is the `retention_duration` of `rule_id` 3. + +## Example Usage + +### Basic configuration + +```terraform +resource "aws_drs_replication_configuration_template" "example" { + associate_default_security_group = false + bandwidth_throttling = 12 + create_public_ip = false + data_plane_routing = "PRIVATE_IP" + default_large_staging_disk_type = "GP2" + ebs_ecryption = "DEFAULT" + ebs_encryption_key_arn = "arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + replication_server_instance_type = "t3.small" + replication_servers_security_groups_ids = aws_security_group.example[*].id + staging_area_subnet_id = aws_subnet.example.id + use_dedicated_replication_server = false + + pit_policy { + enabled = true + interval = 10 + retention_duration = 60 + units = "MINUTE" + rule_id = 1 + } + + pit_policy { + enabled = true + interval = 1 + retention_duration = 24 + units = "HOUR" + rule_id = 2 + } + + pit_policy { + enabled = true + interval = 1 + retention_duration = 3 + units = "DAY" + rule_id = 3 + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `associate_default_security_group` - (Required) Whether to associate the default Elastic Disaster Recovery Security group with the Replication Configuration Template. +* `bandwidth_throttling` - (Required) Configure bandwidth throttling for the outbound data transfer rate of the Source Server in Mbps. +* `create_public_ip` - (Required) Whether to create a Public IP for the Recovery Instance by default. +* `data_plane_routing` - (Required) Data plane routing mechanism that will be used for replication. Valid values are `PUBLIC_IP` and `PRIVATE_IP`. +* `default_large_staging_disk_type` - (Required) Staging Disk EBS volume type to be used during replication. Valid values are `GP2`, `GP3`, `ST1`, or `AUTO`. +* `ebs_encryption` - (Required) Type of EBS encryption to be used during replication. Valid values are `DEFAULT` and `CUSTOM`. +* `ebs_encryption_key_arn` - (Required) ARN of the EBS encryption key to be used during replication. +* `pit_policy` - (Required) Configuration block for Point in time (PIT) policy to manage snapshots taken during replication. [See below](#pit_policy). +* `replication_server_instance_type` - (Required) Instance type to be used for the replication server. +* `replication_servers_security_groups_ids` - (Required) Security group IDs that will be used by the replication server. +* `staging_area_subnet_id` - (Required) Subnet to be used by the replication staging area. +* `staging_area_tags` - (Required) Set of tags to be associated with all resources created in the replication staging area: EC2 replication server, EBS volumes, EBS snapshots, etc. +* `use_dedicated_replication_server` - (Required) Whether to use a dedicated Replication Server in the replication staging area. + +The following arguments are optional: + +* `auto_replicate_new_disks` - (Optional) Whether to allow the AWS replication agent to automatically replicate newly added disks. +* `tags` - (Optional) Set of tags to be associated with the Replication Configuration Template resource. + +### `pit_policy` + +The PIT policies _must_ be specified as shown in the [basic configuration example](#basic-configuration) above. The only value that you can change is the `retention_duration` of `rule_id` 3. + +* `enabled` - (Optional) Whether this rule is enabled or not. +* `interval` - (Required) How often, in the chosen units, a snapshot should be taken. +* `retention_duration` - (Required) Duration to retain a snapshot for, in the chosen `units`. +* `rule_id` - (Optional) ID of the rule. Valid values are integers. +* `units` - (Required) Units used to measure the `interval` and `retention_duration`. Valid values are `MINUTE`, `HOUR`, and `DAY`. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Replication configuration template ARN. +* `id` - Replication configuration template ID. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `20m`) +- `update` - (Default `20m`) +- `delete` - (Default `20m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import DRS Replication Configuration Template using the `id`. For example: + +```terraform +import { + to = aws_drs_replication_configuration_template.example + id = "templateid" +} +``` + +Using `terraform import`, import DRS Replication Configuration Template using the `id`. For example: + +```console +% terraform import aws_drs_replication_configuration_template.example templateid +``` diff --git a/website/docs/r/dx_gateway_association.html.markdown b/website/docs/r/dx_gateway_association.html.markdown index 570a59952ff..4a0a42f55de 100644 --- a/website/docs/r/dx_gateway_association.html.markdown +++ b/website/docs/r/dx_gateway_association.html.markdown @@ -95,7 +95,7 @@ A full example of how to create a VPN Gateway in one AWS account, create a Direc ~> **NOTE:** If the `associated_gateway_id` is in another region, an [alias](https://developer.hashicorp.com/terraform/language/providers/configuration#alias-multiple-provider-configurations) in a new provider block for that region should be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `dx_gateway_id` - (Required) The ID of the Direct Connect gateway. * `associated_gateway_id` - (Optional) The ID of the VGW or transit gateway with which to associate the Direct Connect gateway. diff --git a/website/docs/r/dynamodb_table.html.markdown b/website/docs/r/dynamodb_table.html.markdown index a708e401e50..ab4672b0dbe 100644 --- a/website/docs/r/dynamodb_table.html.markdown +++ b/website/docs/r/dynamodb_table.html.markdown @@ -55,7 +55,7 @@ resource "aws_dynamodb_table" "basic-dynamodb-table" { ttl { attribute_name = "TimeToExist" - enabled = false + enabled = true } global_secondary_index { @@ -261,8 +261,10 @@ Optional arguments: ### `ttl` -* `enabled` - (Required) Whether TTL is enabled. -* `attribute_name` - (Required) Name of the table attribute to store the TTL timestamp in. +* `attribute_name` - (Optional) Name of the table attribute to store the TTL timestamp in. + Required if `enabled` is `true`, must not be set otherwise. +* `enabled` - (Optional) Whether TTL is enabled. + Default value is `false`. ## Attribute Reference diff --git a/website/docs/r/dynamodb_table_item.html.markdown b/website/docs/r/dynamodb_table_item.html.markdown index cedde5c7f2a..e5f43bced9d 100644 --- a/website/docs/r/dynamodb_table_item.html.markdown +++ b/website/docs/r/dynamodb_table_item.html.markdown @@ -48,7 +48,7 @@ resource "aws_dynamodb_table" "example" { ~> **Note:** Names included in `item` are represented internally with everything but letters removed. There is the possibility of collisions if two names, once filtered, are the same. For example, the names `your-name-here` and `yournamehere` will overlap and cause an error. -This argument supports the following arguments: +This resource supports the following arguments: * `hash_key` - (Required) Hash key to use for lookups and identification of the item * `item` - (Required) JSON representation of a map of attribute name/value pairs, one for each attribute. Only the primary key attributes are required; you can optionally provide other attribute name-value pairs for the item. diff --git a/website/docs/r/ec2_capacity_reservation.html.markdown b/website/docs/r/ec2_capacity_reservation.html.markdown index 62f4afcb473..2ef940ffa54 100644 --- a/website/docs/r/ec2_capacity_reservation.html.markdown +++ b/website/docs/r/ec2_capacity_reservation.html.markdown @@ -48,6 +48,14 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - The ARN of the Capacity Reservation. * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `10m`) +- `update` - (Default `10m`) +- `delete` - (Default `10m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Capacity Reservations using the `id`. For example: diff --git a/website/docs/r/ec2_fleet.html.markdown b/website/docs/r/ec2_fleet.html.markdown index cd02229e1e0..16563a632ff 100644 --- a/website/docs/r/ec2_fleet.html.markdown +++ b/website/docs/r/ec2_fleet.html.markdown @@ -130,6 +130,7 @@ This configuration block supports the following: * `instance_generations` - (Optional) Indicates whether current or previous generation instance types are included. The current generation instance types are recommended for use. Valid values are `current` and `previous`. Default is `current` and `previous` generation instance types. * `local_storage` - (Optional) Indicate whether instance types with local storage volumes are `included`, `excluded`, or `required`. Default is `included`. * `local_storage_types` - (Optional) List of local storage type names. Valid values are `hdd` and `ssd`. Default any storage type. +* `max_spot_price_as_percentage_of_optimal_on_demand_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spot_max_price_percentage_over_lowest_price` * `memory_gib_per_vcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. * `min` - (Optional) The minimum amount of memory per vCPU, in GiB. To specify no minimum limit, omit this parameter. * `max` - (Optional) The maximum amount of memory per vCPU, in GiB. To specify no maximum limit, omit this parameter. @@ -147,7 +148,7 @@ This configuration block supports the following: If you set `target_capacity_unit_type` to `vcpu` or `memory-mib`, the price protection threshold is applied based on the per-vCPU or per-memory price instead of the per-instance price. * `require_hibernate_support` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -* `spot_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +* `spot_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `max_spot_price_as_percentage_of_optimal_on_demand_price` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. diff --git a/website/docs/r/ec2_network_insights_path.html.markdown b/website/docs/r/ec2_network_insights_path.html.markdown index a76f57a9557..76382d10f75 100644 --- a/website/docs/r/ec2_network_insights_path.html.markdown +++ b/website/docs/r/ec2_network_insights_path.html.markdown @@ -25,7 +25,7 @@ resource "aws_ec2_network_insights_path" "test" { The following arguments are required: * `source` - (Required) ID or ARN of the resource which is the source of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. -* `destination` - (Required) ID or ARN of the resource which is the destination of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. +* `destination` - (Optional) ID or ARN of the resource which is the destination of the path. Can be an Instance, Internet Gateway, Network Interface, Transit Gateway, VPC Endpoint, VPC Peering Connection or VPN Gateway. If the resource is in another account, you must specify an ARN. * `protocol` - (Required) Protocol to use for analysis. Valid options are `tcp` or `udp`. The following arguments are optional: diff --git a/website/docs/r/ec2_transit_gateway_peering_attachment.html.markdown b/website/docs/r/ec2_transit_gateway_peering_attachment.html.markdown index 6710ef02c97..10073831213 100644 --- a/website/docs/r/ec2_transit_gateway_peering_attachment.html.markdown +++ b/website/docs/r/ec2_transit_gateway_peering_attachment.html.markdown @@ -65,9 +65,16 @@ This resource supports the following arguments: * `peer_account_id` - (Optional) Account ID of EC2 Transit Gateway to peer with. Defaults to the account ID the [AWS provider][1] is currently connected to. * `peer_region` - (Required) Region of EC2 Transit Gateway to peer with. * `peer_transit_gateway_id` - (Required) Identifier of EC2 Transit Gateway to peer with. +* `options` - (Optional) Describes whether dynamic routing is enabled or disabled for the transit gateway peering request. See [options](#options) below for more details! * `tags` - (Optional) Key-value tags for the EC2 Transit Gateway Peering Attachment. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `transit_gateway_id` - (Required) Identifier of EC2 Transit Gateway. +### options + +The `options` block supports the following: + +* `dynamic_routing` - (Optional) Indicates whether dynamic routing is enabled or disabled.. Supports `enable` and `disable`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/ecs_cluster.html.markdown b/website/docs/r/ecs_cluster.html.markdown index 1fa1a01a290..d318da77f63 100644 --- a/website/docs/r/ecs_cluster.html.markdown +++ b/website/docs/r/ecs_cluster.html.markdown @@ -23,7 +23,7 @@ resource "aws_ecs_cluster" "foo" { } ``` -### Example with Log Configuration +### Execute Command Configuration with Override Logging ```terraform resource "aws_kms_key" "example" { @@ -52,42 +52,153 @@ resource "aws_ecs_cluster" "test" { } ``` +### Fargate Ephemeral Storage Encryption with Customer-Managed KMS Key + +```terraform +data "aws_caller_identity" "current" {} + +resource "aws_kms_key" "example" { + description = "example" + deletion_window_in_days = 7 +} + +resource "aws_kms_key_policy" "example" { + key_id = aws_kms_key.example.id + policy = jsonencode({ + Id = "ECSClusterFargatePolicy" + Statement = [ + { + Sid = "Enable IAM User Permissions" + Effect = "Allow" + Principal = { + "AWS" : "*" + } + Action = "kms:*" + Resource = "*" + }, + { + Sid = "Allow generate data key access for Fargate tasks." + Effect = "Allow" + Principal = { + Service = "fargate.amazonaws.com" + } + Action = [ + "kms:GenerateDataKeyWithoutPlaintext" + ] + Condition = { + StringEquals = { + "kms:EncryptionContext:aws:ecs:clusterAccount" = [ + data.aws_caller_identity.current.account_id + ] + "kms:EncryptionContext:aws:ecs:clusterName" = [ + "example" + ] + } + } + Resource = "*" + }, + { + Sid = "Allow grant creation permission for Fargate tasks." + Effect = "Allow" + Principal = { + Service = "fargate.amazonaws.com" + } + Action = [ + "kms:CreateGrant" + ] + Condition = { + StringEquals = { + "kms:EncryptionContext:aws:ecs:clusterAccount" = [ + data.aws_caller_identity.current.account_id + ] + "kms:EncryptionContext:aws:ecs:clusterName" = [ + "example" + ] + } + "ForAllValues:StringEquals" = { + "kms:GrantOperations" = [ + "Decrypt" + ] + } + } + Resource = "*" + } + ] + Version = "2012-10-17" + }) +} + +resource "aws_ecs_cluster" "test" { + name = "example" + + configuration { + managed_storage_configuration { + fargate_ephemeral_storage_kms_key_id = aws_kms_key.example.id + } + } + depends_on = [ + aws_kms_key_policy.example + ] +} +``` + ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `configuration` - (Optional) The execute command configuration for the cluster. Detailed below. * `name` - (Required) Name of the cluster (up to 255 letters, numbers, hyphens, and underscores) -* `service_connect_defaults` - (Optional) Configures a default Service Connect namespace. Detailed below. -* `setting` - (Optional) Configuration block(s) with cluster settings. For example, this can be used to enable CloudWatch Container Insights for a cluster. Detailed below. + +The following arguments are optional: + +* `configuration` - (Optional) Execute command configuration for the cluster. See [`configueration` Block](#configuration-block) for details. +* `service_connect_defaults` - (Optional) Default Service Connect namespace. See [`service_connect_defaults` Block](#service_connect_defaults-block) for details. +* `setting` - (Optional) Configuration block(s) with cluster settings. For example, this can be used to enable CloudWatch Container Insights for a cluster. See [`setting` Block](#setting-block) for details. * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### `configuration` +### `configuration` Block + +The `configuration` configuration block supports the following arguments: + +* `execute_command_configuration` - (Optional) Details of the execute command configuration. See [`execute_command_configuration` Block](#execute_command_configuration-block) for details. +* `managed_storage_configuration` - (Optional) Details of the managed storage configuration. See [`managed_storage_configuration` Block](#managed_storage_configuration-block) for details. -* `execute_command_configuration` - (Optional) The details of the execute command configuration. Detailed below. +### `execute_command_configuration` Block -#### `execute_command_configuration` +The `execute_command_configuration` configuration block supports the following arguments: -* `kms_key_id` - (Optional) The AWS Key Management Service key ID to encrypt the data between the local client and the container. -* `log_configuration` - (Optional) The log configuration for the results of the execute command actions Required when `logging` is `OVERRIDE`. Detailed below. -* `logging` - (Optional) The log setting to use for redirecting logs for your execute command results. Valid values are `NONE`, `DEFAULT`, and `OVERRIDE`. +* `kms_key_id` - (Optional) AWS Key Management Service key ID to encrypt the data between the local client and the container. +* `log_configuration` - (Optional) Log configuration for the results of the execute command actions. Required when `logging` is `OVERRIDE`. See [`log_configuration` Block](#log_configuration-block) for details. +* `logging` - (Optional) Log setting to use for redirecting logs for your execute command results. Valid values: `NONE`, `DEFAULT`, `OVERRIDE`. -##### `log_configuration` +#### `log_configuration` Block -* `cloud_watch_encryption_enabled` - (Optional) Whether or not to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled. +The `log_configuration` configuration block supports the following arguments: + +* `cloud_watch_encryption_enabled` - (Optional) Whether to enable encryption on the CloudWatch logs. If not specified, encryption will be disabled. * `cloud_watch_log_group_name` - (Optional) The name of the CloudWatch log group to send logs to. -* `s3_bucket_name` - (Optional) The name of the S3 bucket to send logs to. -* `s3_bucket_encryption_enabled` - (Optional) Whether or not to enable encryption on the logs sent to S3. If not specified, encryption will be disabled. -* `s3_key_prefix` - (Optional) An optional folder in the S3 bucket to place logs in. +* `s3_bucket_name` - (Optional) Name of the S3 bucket to send logs to. +* `s3_bucket_encryption_enabled` - (Optional) Whether to enable encryption on the logs sent to S3. If not specified, encryption will be disabled. +* `s3_key_prefix` - (Optional) Optional folder in the S3 bucket to place logs in. -### `setting` +### `managed_storage_configuration` Block -* `name` - (Required) Name of the setting to manage. Valid values: `containerInsights`. -* `value` - (Required) The value to assign to the setting. Valid values are `enabled` and `disabled`. +The `managed_storage_configuration` configuration block supports the following arguments: + +* `fargate_ephemeral_storage_kms_key_id` - (Optional) AWS Key Management Service key ID for the Fargate ephemeral storage. +* `kms_key_id` - (Optional) AWS Key Management Service key ID to encrypt the managed storage. + +### `service_connect_defaults` Block -### `service_connect_defaults` +The `service_connect_defaults` configuration block supports the following arguments: -* `namespace` - (Required) The ARN of the [`aws_service_discovery_http_namespace`](/docs/providers/aws/r/service_discovery_http_namespace.html) that's used when you create a service and don't specify a Service Connect configuration. +* `namespace` - (Required) ARN of the [`aws_service_discovery_http_namespace`](/docs/providers/aws/r/service_discovery_http_namespace.html) that's used when you create a service and don't specify a Service Connect configuration. + +### `setting` Block + +The `setting` configuration block supports the following arguments: + +* `name` - (Required) Name of the setting to manage. Valid values: `containerInsights`. +* `value` - (Required) Value to assign to the setting. Valid values: `enabled`, `disabled`. ## Attribute Reference @@ -99,7 +210,7 @@ This resource exports the following attributes in addition to the arguments abov ## Import -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECS clusters using the `name`. For example: +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import ECS clusters using the cluster name. For example: ```terraform import { @@ -108,7 +219,7 @@ import { } ``` -Using `terraform import`, import ECS clusters using the `name`. For example: +Using `terraform import`, import ECS clusters using the cluster name. For example: ```console % terraform import aws_ecs_cluster.stateless stateless-app diff --git a/website/docs/r/eks_cluster.html.markdown b/website/docs/r/eks_cluster.html.markdown index de11b20772e..7a298d616ad 100644 --- a/website/docs/r/eks_cluster.html.markdown +++ b/website/docs/r/eks_cluster.html.markdown @@ -109,7 +109,7 @@ resource "aws_cloudwatch_log_group" "example" { ### Enabling IAM Roles for Service Accounts -Only available on Kubernetes version 1.13 and 1.14 clusters created or upgraded on or after September 3, 2019. For more information about this feature, see the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). +For more information about this feature, see the [EKS User Guide](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). ```terraform resource "aws_eks_cluster" "example" { @@ -215,6 +215,7 @@ The following arguments are required: The following arguments are optional: * `access_config` - (Optional) Configuration block for the access config associated with your cluster, see [Amazon EKS Access Entries](https://docs.aws.amazon.com/eks/latest/userguide/access-entries.html). +* `bootstrap_self_managed_addons` - (Optional) Install default unmanaged add-ons, such as `aws-cni`, `kube-proxy`, and CoreDNS during cluster creation. If `false`, you must manually install desired add-ons. Changing this value will force a new cluster to be created. Defaults to `true`. * `enabled_cluster_log_types` - (Optional) List of the desired control plane logging to enable. For more information, see [Amazon EKS Control Plane Logging](https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html). * `encryption_config` - (Optional) Configuration block with encryption configuration for the cluster. Only available on Kubernetes 1.13 and above clusters created after March 6, 2020. Detailed below. * `kubernetes_network_config` - (Optional) Configuration block with kubernetes network configuration for the cluster. Detailed below. If removed, Terraform will only perform drift detection if a configuration value is provided. @@ -227,7 +228,7 @@ The following arguments are optional: The `access_config` configuration block supports the following arguments: * `authentication_mode` - (Optional) The authentication mode for the cluster. Valid values are `CONFIG_MAP`, `API` or `API_AND_CONFIG_MAP` -* `bootstrap_cluster_creator_admin_permissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `true`. +* `bootstrap_cluster_creator_admin_permissions` - (Optional) Whether or not to bootstrap the access config values to the cluster. Default is `false`. ### encryption_config diff --git a/website/docs/r/elastictranscoder_pipeline.html.markdown b/website/docs/r/elastictranscoder_pipeline.html.markdown index d0248a13508..e16f00e56f1 100644 --- a/website/docs/r/elastictranscoder_pipeline.html.markdown +++ b/website/docs/r/elastictranscoder_pipeline.html.markdown @@ -34,7 +34,7 @@ resource "aws_elastictranscoder_pipeline" "bar" { See ["Create Pipeline"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-pipeline.html) in the AWS docs for reference. -This argument supports the following arguments: +This resource supports the following arguments: * `aws_kms_key_arn` - (Optional) The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline. * `content_config` - (Optional) The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below) diff --git a/website/docs/r/elastictranscoder_preset.html.markdown b/website/docs/r/elastictranscoder_preset.html.markdown index 018470cb85c..e230cc32c22 100644 --- a/website/docs/r/elastictranscoder_preset.html.markdown +++ b/website/docs/r/elastictranscoder_preset.html.markdown @@ -80,7 +80,7 @@ resource "aws_elastictranscoder_preset" "bar" { See ["Create Preset"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-preset.html) in the AWS docs for reference. -This argument supports the following arguments: +This resource supports the following arguments: * `audio` - (Optional, Forces new resource) Audio parameters object (documented below). * `audio_codec_options` - (Optional, Forces new resource) Codec options for the audio parameters (documented below) diff --git a/website/docs/r/emrserverless_application.html.markdown b/website/docs/r/emrserverless_application.html.markdown index d6d7dc90744..aebec5bac01 100644 --- a/website/docs/r/emrserverless_application.html.markdown +++ b/website/docs/r/emrserverless_application.html.markdown @@ -68,6 +68,7 @@ The following arguments are required: * `auto_stop_configuration` – (Optional) The configuration for an application to automatically stop after a certain amount of time being idle. * `image_configuration` – (Optional) The image configuration applied to all worker types. * `initial_capacity` – (Optional) The capacity to initialize when the application is created. +* `interactive_configuration` – (Optional) Enables the interactive use cases to use when running an application. * `maximum_capacity` – (Optional) The maximum capacity to allocate when the application is created. This is cumulative across all workers at any given point in time, not just when an application is created. No new resources will be created once any one of the defined limits is hit. * `name` – (Required) The name of the application. * `network_configuration` – (Optional) The network configuration for customer VPC connectivity. @@ -109,6 +110,11 @@ The following arguments are required: * `worker_configuration` - (Optional) The resource configuration of the initial capacity configuration. * `worker_count` - (Required) The number of workers in the initial capacity configuration. +### interactive_configuration Arguments + +* `livy_endpoint_enabled` - (Optional) Enables an Apache Livy endpoint that you can connect to and run interactive jobs. +* `studio_enabled` - (Optional) Enables you to connect an application to Amazon EMR Studio to run interactive workloads in a notebook. + ##### worker_configuration Arguments * `cpu` - (Required) The CPU requirements for every worker instance of the worker type. diff --git a/website/docs/r/fis_experiment_template.html.markdown b/website/docs/r/fis_experiment_template.html.markdown index e825d3b5212..e6d9fd8130c 100644 --- a/website/docs/r/fis_experiment_template.html.markdown +++ b/website/docs/r/fis_experiment_template.html.markdown @@ -59,10 +59,18 @@ The following arguments are required: The following arguments are optional: +* `experiment_options` - (Optional) The experiment options for the experiment template. See [experiment_options](#experiment_options) below for more details! * `tags` - (Optional) Key-value mapping of tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `target` - (Optional) Target of an action. See below. * `log_configuration` - (Optional) The configuration for experiment logging. See below. +### experiment_options + +The `experiment_options` block supports the following: + +* `account_targeting` - (Optional) Specifies the account targeting setting for experiment options. Supports `single-account` and `multi-account`. +* `empty_target_resolution_mode` - (Optional) Specifies the empty target resolution mode for experiment options. Supports `fail` and `skip`. + ### `action` * `action_id` - (Required) ID of the action. To find out what actions are supported see [AWS FIS actions reference](https://docs.aws.amazon.com/fis/latest/userguide/fis-actions-reference.html). diff --git a/website/docs/r/flow_log.html.markdown b/website/docs/r/flow_log.html.markdown index 6f12e22a7d5..b0da2610623 100644 --- a/website/docs/r/flow_log.html.markdown +++ b/website/docs/r/flow_log.html.markdown @@ -178,7 +178,7 @@ resource "aws_s3_bucket" "example" { ~> **NOTE:** One of `eni_id`, `subnet_id`, `transit_gateway_id`, `transit_gateway_attachment_id`, or `vpc_id` must be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `traffic_type` - (Required) The type of traffic to capture. Valid values: `ACCEPT`,`REJECT`, `ALL`. * `deliver_cross_account_role` - (Optional) ARN of the IAM role that allows Amazon EC2 to publish flow logs across accounts. diff --git a/website/docs/r/fsx_ontap_file_system.html.markdown b/website/docs/r/fsx_ontap_file_system.html.markdown index 7b5946ae093..398a306b6b1 100644 --- a/website/docs/r/fsx_ontap_file_system.html.markdown +++ b/website/docs/r/fsx_ontap_file_system.html.markdown @@ -27,8 +27,31 @@ resource "aws_fsx_ontap_file_system" "test" { resource "aws_fsx_ontap_file_system" "testhapairs" { storage_capacity = 2048 subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + ha_pairs = 2 + throughput_capacity_per_ha_pair = 128 + preferred_subnet_id = aws_subnet.test1.id +} +``` + +```terraform +resource "aws_fsx_ontap_file_system" "testsingleazgen2" { + storage_capacity = 4096 + subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_2" - throughput_capacity_per_ha_pair = 3072 + ha_pairs = 4 + throughput_capacity_per_ha_pair = 384 + preferred_subnet_id = aws_subnet.test1.id +} +``` + +```terraform +resource "aws_fsx_ontap_file_system" "testmultiazgen2" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] + deployment_type = "MULTI_AZ_2" + ha_pairs = 1 + throughput_capacity_per_ha_pair = 384 preferred_subnet_id = aws_subnet.test1.id } ``` @@ -37,24 +60,24 @@ resource "aws_fsx_ontap_file_system" "testhapairs" { This resource supports the following arguments: -* `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `1024` and `196608` for file systems with deployment_type `SINGLE_AZ_1` and `MULTI_AZ_1`. Valid values between `2048` (`1024` per ha pair) and `1048576` for file systems with deployment_type `SINGLE_AZ_2`. +* `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `1024` and `196608` for file systems with deployment_type `SINGLE_AZ_1` and `MULTI_AZ_1`. Valid values are between `1024` and `524288` for `MULTI_AZ_2`. Valid values between `1024` (`1024` per ha pair) and `1048576` for file systems with deployment_type `SINGLE_AZ_2`. For `SINGLE_AZ_2`, the `1048576` (1PB) maximum is only supported when using 2 or more ha_pairs, the maximum is `524288` (512TB) when using 1 ha_pair. * `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. Up to 2 subnets can be provided. * `preferred_subnet_id` - (Required) The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. -* `deployment_type` - (Optional) - The filesystem deployment type. Supports `MULTI_AZ_1`, `SINGLE_AZ_1`, and `SINGLE_AZ_2`. +* `deployment_type` - (Optional) - The filesystem deployment type. Supports `MULTI_AZ_1`, `MULTI_AZ_2`, `SINGLE_AZ_1`, and `SINGLE_AZ_2`. * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. * `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set. * `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system. See [Disk Iops Configuration](#disk-iops-configuration) below. * `endpoint_ip_address_range` - (Optional) Specifies the IP address range in which the endpoints to access your file system will be created. By default, Amazon FSx selects an unused IP address range for you from the 198.19.* range. -* `ha_pairs` - (Optional) - The number of ha_pairs to deploy for the file system. Valid values are 1 through 12. Value of 2 or greater required for `SINGLE_AZ_2`. Only value of 1 is supported with `SINGLE_AZ_1` or `MULTI_AZ_1` but not required. +* `ha_pairs` - (Optional) - The number of ha_pairs to deploy for the file system. Valid value is 1 for `SINGLE_AZ_1` or `MULTI_AZ_1` and `MULTI_AZ_2`. Valid values are 1 through 12 for `SINGLE_AZ_2`. * `storage_type` - (Optional) - The filesystem storage type. defaults to `SSD`. * `fsx_admin_password` - (Optional) The ONTAP administrative password for the fsxadmin user that you can use to administer your file system using the ONTAP CLI and REST API. * `route_table_ids` - (Optional) Specifies the VPC route tables in which your file system's endpoints will be created. You should specify all VPC route tables associated with the subnets in which your clients are located. By default, Amazon FSx selects your VPC's default route table. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `throughput_capacity` - (Optional) Sets the throughput capacity (in MBps) for the file system that you're creating. Valid values are `128`, `256`, `512`, `1024`, `2048`, and `4096`. This parameter is only supported when not using the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. -* `throughput_capacity_per_ha_pair` - (Optional) Sets the throughput capacity (in MBps) for the file system that you're creating. Valid value when using 1 ha_pair are `128`, `256`, `512`, `1024`, `2048`, and `4096`. Valid values when using 2 or more ha_pairs are `3072`,`6144`. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. +* `throughput_capacity_per_ha_pair` - (Optional) Sets the per-HA-pair throughput capacity (in MBps) for the file system that you're creating, as opposed to `throughput_capacity` which specifies the total throughput capacity for the file system. Valid value for `MULTI_AZ_1` and `SINGLE_AZ_1` are `128`, `256`, `512`, `1024`, `2048`, and `4096`. Valid values for deployment type `MULTI_AZ_2` and `SINGLE_AZ_2` are `384`,`768`,`1536`,`3072`,`6144` where `ha_pairs` is `1`. Valid values for deployment type `SINGLE_AZ_2` are `1536`, `3072`, and `6144` where `ha_pairs` is greater than 1. This parameter is only supported when specifying the ha_pairs parameter. Either throughput_capacity or throughput_capacity_per_ha_pair must be specified. ### Disk Iops Configuration @@ -66,7 +89,9 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - Amazon Resource Name of the file system. -* `dns_name` - DNS name for the file system, e.g., `fs-12345678.fsx.us-west-2.amazonaws.com` +* `dns_name` - DNS name for the file system. + + **Note:** This attribute does not apply to FSx for ONTAP file systems and is consequently not set. You can access your FSx for ONTAP file system and volumes via a [Storage Virtual Machine (SVM)](fsx_ontap_storage_virtual_machine.html) using its DNS name or IP address. * `endpoints` - The endpoints that are used to access data or to manage the file system using the NetApp ONTAP CLI, REST API, or NetApp SnapMirror. See [Endpoints](#endpoints) below. * `id` - Identifier of the file system, e.g., `fs-12345678` * `network_interface_ids` - Set of Elastic Network Interface identifiers from which the file system is accessible The first network interface returned is the primary network interface. diff --git a/website/docs/r/fsx_ontap_storage_virtual_machine.html.markdown b/website/docs/r/fsx_ontap_storage_virtual_machine.html.markdown index 5c7ae45ff16..eff28c5eeed 100644 --- a/website/docs/r/fsx_ontap_storage_virtual_machine.html.markdown +++ b/website/docs/r/fsx_ontap_storage_virtual_machine.html.markdown @@ -51,6 +51,7 @@ This resource supports the following arguments: * `file_system_id` - (Required) The ID of the Amazon FSx ONTAP File System that this SVM will be created on. * `name` - (Required) The name of the SVM. You can use a maximum of 47 alphanumeric characters, plus the underscore (_) special character. * `root_volume_security_style` - (Optional) Specifies the root volume security style, Valid values are `UNIX`, `NTFS`, and `MIXED`. All volumes created under this SVM will inherit the root security style unless the security style is specified on the volume. Default value is `UNIX`. +* `svm_admin_password` - (Optional) Specifies the password to use when logging on to the SVM using a secure shell (SSH) connection to the SVM's management endpoint. Doing so enables you to manage the SVM using the NetApp ONTAP CLI or REST API. If you do not specify a password, you can still use the file system's fsxadmin user to manage the SVM. * `tags` - (Optional) A map of tags to assign to the storage virtual machine. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### active_directory_configuration diff --git a/website/docs/r/globalaccelerator_cross_account_attachment.html.markdown b/website/docs/r/globalaccelerator_cross_account_attachment.html.markdown index 7a7edd54f8c..9c4bbaa8969 100644 --- a/website/docs/r/globalaccelerator_cross_account_attachment.html.markdown +++ b/website/docs/r/globalaccelerator_cross_account_attachment.html.markdown @@ -44,6 +44,7 @@ The following arguments are optional: * `principals` - (Optional) List of AWS account IDs that are allowed to associate resources with the accelerator. * `resource` - (Optional) List of resources to be associated with the accelerator. + * `cidr_block` - (Optional) IP address range, in CIDR format, that is specified as resource. * `endpoint_id` - (Optional) The endpoint ID for the endpoint that is specified as a AWS resource. * `region` - (Optional) The AWS Region where a shared endpoint resource is located. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. diff --git a/website/docs/r/glue_connection.html.markdown b/website/docs/r/glue_connection.html.markdown index d1684448610..6482bca4d93 100644 --- a/website/docs/r/glue_connection.html.markdown +++ b/website/docs/r/glue_connection.html.markdown @@ -30,14 +30,14 @@ resource "aws_glue_connection" "example" { ```terraform -data "aws_secretmanager_secret" "example" { +data "aws_secretsmanager_secret" "example" { name = "example-secret" } resource "aws_glue_connection" "example" { connection_properties = { JDBC_CONNECTION_URL = "jdbc:mysql://example.com/exampledatabase" - SECRET_ID = data.aws_secretmanager_secret.example.name + SECRET_ID = data.aws_secretsmanager_secret.example.name } name = "example" @@ -72,7 +72,7 @@ resource "aws_glue_connection" "example" { # Define the custom connector using the connection_type of `CUSTOM` with the match_criteria of `template_connection` # Example here being a snowflake jdbc connector with a secret having user and password as keys -data "aws_secretmanager_secret" "example" { +data "aws_secretsmanager_secret" "example" { name = "example-secret" } @@ -101,7 +101,7 @@ resource "aws_glue_connection" "example_connection" { CONNECTION_TYPE = "Jdbc" CONNECTOR_URL = "s3://example/snowflake-jdbc.jar" JDBC_CONNECTION_URL = "jdbc:snowflake://example.com/?user=$${user}&password=$${password}" - SECRET_ID = data.aws_secretmanager_secret.example.name + SECRET_ID = data.aws_secretsmanager_secret.example.name } name = "example" match_criteria = ["Connection", aws_glue_connection.example_connector.name] diff --git a/website/docs/r/glue_crawler.html.markdown b/website/docs/r/glue_crawler.html.markdown index b33117b8148..ae50387c5de 100644 --- a/website/docs/r/glue_crawler.html.markdown +++ b/website/docs/r/glue_crawler.html.markdown @@ -130,7 +130,7 @@ resource "aws_glue_crawler" "events_crawler" { ~> **NOTE:** Must specify at least one of `dynamodb_target`, `jdbc_target`, `s3_target`, `mongodb_target` or `catalog_target`. -This argument supports the following arguments: +This resource supports the following arguments: * `database_name` (Required) Glue database where results are written. * `name` (Required) Name of the crawler. diff --git a/website/docs/r/glue_job.html.markdown b/website/docs/r/glue_job.html.markdown index 1fd6f3d3f35..7420edd3223 100644 --- a/website/docs/r/glue_job.html.markdown +++ b/website/docs/r/glue_job.html.markdown @@ -109,6 +109,7 @@ This resource supports the following arguments: * `execution_property` – (Optional) Execution property of the job. Defined below. * `glue_version` - (Optional) The version of glue to use, for example "1.0". Ray jobs should set this to 4.0 or greater. For information about available versions, see the [AWS Glue Release Notes](https://docs.aws.amazon.com/glue/latest/dg/release-notes.html). * `execution_class` - (Optional) Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources. Valid value: `FLEX`, `STANDARD`. +* `maintenance_window` – (Optional) Specifies the day of the week and hour for the maintenance window for streaming jobs. * `max_capacity` – (Optional) The maximum number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. `Required` when `pythonshell` is set, accept either `0.0625` or `1.0`. Use `number_of_workers` and `worker_type` arguments instead with `glue_version` `2.0` and above. * `max_retries` – (Optional) The maximum number of times to retry this job if it fails. * `name` – (Required) The name you assign to this job. It must be unique in your account. diff --git a/website/docs/r/grafana_workspace_service_account.html.markdown b/website/docs/r/grafana_workspace_service_account.html.markdown new file mode 100644 index 00000000000..309bee2eeb3 --- /dev/null +++ b/website/docs/r/grafana_workspace_service_account.html.markdown @@ -0,0 +1,57 @@ +--- +subcategory: "Managed Grafana" +layout: "aws" +page_title: "AWS: aws_grafana_workspace_service_account" +description: |- + Terraform resource for managing an Amazon Managed Grafana Workspace Service Account. +--- + +# Resource: aws_grafana_workspace_service_account + +-> **Note:** You cannot update a service account. If you change any attribute, Terraform +will delete the current and create a new one. + +Read about Service Accounts in the [Amazon Managed Grafana user guide](https://docs.aws.amazon.com/grafana/latest/userguide/service-accounts.html). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_grafana_workspace_service_account" "example" { + name = "example-admin" + grafana_role = "ADMIN" + workspace_id = aws_grafana_workspace.example.id +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the service account. The name must be unique within the workspace, as it determines the ID associated with the service account. +* `grafana_role` - (Required) The permission level to use for this service account. For more information about the roles and the permissions each has, see the [User roles](https://docs.aws.amazon.com/grafana/latest/userguide/Grafana-user-roles.html) documentation. +* `workspace_id` - (Required) The Grafana workspace with which the service account is associated. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `service_account_id` - Identifier of the service account in the given Grafana workspace + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Managed Grafana Workspace Service Account using the `workspace_id` and `service_account_id` separated by a comma (`,`). For example: + +```terraform +import { + to = aws_grafana_workspace_service_account.example + id = "g-abc12345,1" +} +``` + +Using `terraform import`, import Managed Grafana Workspace Service Account using the `workspace_id` and `service_account_id` separated by a comma (`,`). For example: + +```console +% terraform import aws_grafana_workspace_service_account.example g-abc12345,1 +``` diff --git a/website/docs/r/grafana_workspace_service_account_token.html.markdown b/website/docs/r/grafana_workspace_service_account_token.html.markdown new file mode 100644 index 00000000000..87cba4631b1 --- /dev/null +++ b/website/docs/r/grafana_workspace_service_account_token.html.markdown @@ -0,0 +1,51 @@ +--- +subcategory: "Managed Grafana" +layout: "aws" +page_title: "AWS: aws_grafana_workspace_service_account_token" +description: |- + Terraform resource for managing an Amazon Managed Grafana Workspace Service Account Token. +--- + +# Resource: aws_grafana_workspace_service_account_token + +-> **Note:** You cannot update a service account token. If you change any attribute, Terraform +will delete the current and create a new one. + +Read about Service Accounts Tokens in the [Amazon Managed Grafana user guide](https://docs.aws.amazon.com/grafana/latest/userguide/service-accounts.html#service-account-tokens). + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_grafana_workspace_service_account" "example" { + name = "example-admin" + grafana_role = "ADMIN" + workspace_id = aws_grafana_workspace.example.id +} + +resource "aws_grafana_workspace_service_account_token" "example" { + name = "example-key" + service_account_id = aws_grafana_workspace_service_account.example.service_account_id + seconds_to_live = 3600 + workspace_id = aws_grafana_workspace.example.id +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required) A name for the token to create. The name must be unique within the workspace. +* `seconds_to_live` - (Required) Sets how long the token will be valid, in seconds. You can set the time up to 30 days in the future. +* `service_account_id` - (Required) The ID of the service account for which to create a token. +* `workspace_id` - (Required) The Grafana workspace with which the service account token is associated. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `service_account_token_id` - Identifier of the service account token in the given Grafana workspace. +* `created_at` - Specifies when the service account token was created. +* `expires_at` - Specifies when the service account token will expire. +* `key` - The key for the service account token. Used when making calls to the Grafana HTTP APIs to authenticate and authorize the requests. diff --git a/website/docs/r/guardduty_organization_configuration.html.markdown b/website/docs/r/guardduty_organization_configuration.html.markdown index 886e55194b3..a29841981e3 100644 --- a/website/docs/r/guardduty_organization_configuration.html.markdown +++ b/website/docs/r/guardduty_organization_configuration.html.markdown @@ -48,7 +48,7 @@ resource "aws_guardduty_organization_configuration" "example" { ~> **NOTE:** One of `auto_enable` or `auto_enable_organization_members` must be specified. -This argument supports the following arguments: +This resource supports the following arguments: * `auto_enable` - (Optional) *Deprecated:* Use `auto_enable_organization_members` instead. When this setting is enabled, all new accounts that are created in, or added to, the organization are added as a member accounts of the organization’s GuardDuty delegated administrator and GuardDuty is enabled in that AWS Region. * `auto_enable_organization_members` - (Optional) Indicates the auto-enablement configuration of GuardDuty for the member accounts in the organization. Valid values are `ALL`, `NEW`, `NONE`. diff --git a/website/docs/r/iam_server_certificate.html.markdown b/website/docs/r/iam_server_certificate.html.markdown index 6903a92dafb..0e8994fa094 100644 --- a/website/docs/r/iam_server_certificate.html.markdown +++ b/website/docs/r/iam_server_certificate.html.markdown @@ -122,6 +122,12 @@ This resource exports the following attributes in addition to the arguments abov * `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `upload_date` - Date and time in [RFC3339 format](https://tools.ietf.org/html/rfc3339#section-5.8) when the server certificate was uploaded. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `delete` - (Default `15m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import IAM Server Certificates using the `name`. For example: diff --git a/website/docs/r/imagebuilder_image_pipeline.html.markdown b/website/docs/r/imagebuilder_image_pipeline.html.markdown index 21a064637fd..4907a4e371a 100644 --- a/website/docs/r/imagebuilder_image_pipeline.html.markdown +++ b/website/docs/r/imagebuilder_image_pipeline.html.markdown @@ -37,11 +37,13 @@ The following arguments are optional: * `description` - (Optional) Description of the image pipeline. * `distribution_configuration_arn` - (Optional) Amazon Resource Name (ARN) of the Image Builder Distribution Configuration. * `enhanced_image_metadata_enabled` - (Optional) Whether additional information about the image being created is collected. Defaults to `true`. +* `execution_role` - (Optional) Amazon Resource Name (ARN) of the service-linked role to be used by Image Builder to [execute workflows](https://docs.aws.amazon.com/imagebuilder/latest/userguide/manage-image-workflows.html). * `image_recipe_arn` - (Optional) Amazon Resource Name (ARN) of the image recipe. * `image_scanning_configuration` - (Optional) Configuration block with image scanning configuration. Detailed below. * `image_tests_configuration` - (Optional) Configuration block with image tests configuration. Detailed below. * `schedule` - (Optional) Configuration block with schedule settings. Detailed below. * `status` - (Optional) Status of the image pipeline. Valid values are `DISABLED` and `ENABLED`. Defaults to `ENABLED`. +* `workflow` - (Optional) Configuration block with the workflow configuration. Detailed below. * `tags` - (Optional) Key-value map of resource tags for the image pipeline. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### image_scanning_configuration @@ -77,6 +79,25 @@ The following arguments are optional: * `timezone` - (Optional) The timezone that applies to the scheduling expression. For example, "Etc/UTC", "America/Los_Angeles" in the [IANA timezone format](https://www.joda.org/joda-time/timezones.html). If not specified this defaults to UTC. +### workflow + +The following arguments are required: + +* `workflow_arn` - (Required) Amazon Resource Name (ARN) of the Image Builder Workflow. + +The following arguments are optional: + +* `on_failure` - (Optional) The action to take if the workflow fails. Must be one of `CONTINUE` or `ABORT`. +* `parallel_group` - (Optional) The parallel group in which to run a test Workflow. +* `parameter` - (Optional) Configuration block for the workflow parameters. Detailed below. + +### parameter + +The following arguments are required: + +* `name` - (Required) The name of the Workflow parameter. +* `value` - (Required) The value of the Workflow parameter. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/iot_authorizer.html.markdown b/website/docs/r/iot_authorizer.html.markdown index c129f2efe56..5e5750c5c6d 100644 --- a/website/docs/r/iot_authorizer.html.markdown +++ b/website/docs/r/iot_authorizer.html.markdown @@ -23,6 +23,10 @@ resource "aws_iot_authorizer" "example" { token_signing_public_keys = { Key1 = file("test-fixtures/iot-authorizer-signing-key.pem") } + + tags = { + Name = "example" + } } ``` @@ -33,6 +37,7 @@ resource "aws_iot_authorizer" "example" { * `name` - (Required) The name of the authorizer. * `signing_disabled` - (Optional) Specifies whether AWS IoT validates the token signature in an authorization request. Default: `false`. * `status` - (Optional) The status of Authorizer request at creation. Valid values: `ACTIVE`, `INACTIVE`. Default: `ACTIVE`. +* `tags` - (Optional) Map of tags to assign to this resource. If configured with a provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `token_key_name` - (Optional) The name of the token key used to extract the token from the HTTP headers. This value is required if signing is enabled in your authorizer. * `token_signing_public_keys` - (Optional) The public keys used to verify the digital signature returned by your custom authentication service. This value is required if signing is enabled in your authorizer. @@ -41,6 +46,7 @@ resource "aws_iot_authorizer" "example" { This resource exports the following attributes in addition to the arguments above: * `arn` - The ARN of the authorizer. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://www.terraform.io/docs/providers/aws/index.html#default_tags-configuration-block). ## Import diff --git a/website/docs/r/iot_topic_rule.html.markdown b/website/docs/r/iot_topic_rule.html.markdown index a18e5593928..d8b04c1b4ea 100644 --- a/website/docs/r/iot_topic_rule.html.markdown +++ b/website/docs/r/iot_topic_rule.html.markdown @@ -95,6 +95,7 @@ The `cloudwatch_alarm` object takes the following arguments: The `cloudwatch_logs` object takes the following arguments: +* `batch_mode` - (Optional) The payload that contains a JSON array of records will be sent to CloudWatch via a batch call. * `log_group_name` - (Required) The CloudWatch log group name. * `role_arn` - (Required) The IAM role ARN that allows access to the CloudWatch alarm. diff --git a/website/docs/r/kinesisanalyticsv2_application.html.markdown b/website/docs/r/kinesisanalyticsv2_application.html.markdown index 6f14e483e69..981f9a071d1 100644 --- a/website/docs/r/kinesisanalyticsv2_application.html.markdown +++ b/website/docs/r/kinesisanalyticsv2_application.html.markdown @@ -256,7 +256,7 @@ resource "aws_kinesisanalyticsv2_application" "example" { This resource supports the following arguments: * `name` - (Required) The name of the application. -* `runtime_environment` - (Required) The runtime environment for the application. Valid values: `SQL-1_0`, `FLINK-1_6`, `FLINK-1_8`, `FLINK-1_11`, `FLINK-1_13`, `FLINK-1_15`, `FLINK-1_18`. +* `runtime_environment` - (Required) The runtime environment for the application. Valid values: `SQL-1_0`, `FLINK-1_6`, `FLINK-1_8`, `FLINK-1_11`, `FLINK-1_13`, `FLINK-1_15`, `FLINK-1_18`, `FLINK-1_19`. * `service_execution_role` - (Required) The ARN of the [IAM role](/docs/providers/aws/r/iam_role.html) used by the application to access Kinesis data streams, Kinesis Data Firehose delivery streams, Amazon S3 objects, and other external resources. * `application_configuration` - (Optional) The application's configuration * `application_mode` - (Optional) The application's mode. Valid values are `STREAMING`, `INTERACTIVE`. diff --git a/website/docs/r/lakeformation_data_lake_settings.html.markdown b/website/docs/r/lakeformation_data_lake_settings.html.markdown index db938537c48..ed601b0af57 100644 --- a/website/docs/r/lakeformation_data_lake_settings.html.markdown +++ b/website/docs/r/lakeformation_data_lake_settings.html.markdown @@ -56,9 +56,10 @@ resource "aws_lakeformation_data_lake_settings" "example" { principal = aws_iam_role.test.arn } - allow_external_data_filtering = true - external_data_filtering_allow_list = [data.aws_caller_identity.current.account_id, data.aws_caller_identity.third_party.account_id] - authorized_session_tag_value_list = ["Amazon EMR"] + allow_external_data_filtering = true + external_data_filtering_allow_list = [data.aws_caller_identity.current.account_id, data.aws_caller_identity.third_party.account_id] + authorized_session_tag_value_list = ["Amazon EMR"] + allow_full_table_external_data_access = true } ``` @@ -75,6 +76,7 @@ The following arguments are optional: * `allow_external_data_filtering` - (Optional) Whether to allow Amazon EMR clusters to access data managed by Lake Formation. * `external_data_filtering_allow_list` - (Optional) A list of the account IDs of Amazon Web Services accounts with Amazon EMR clusters that are to perform data filtering. * `authorized_session_tag_value_list` - (Optional) Lake Formation relies on a privileged process secured by Amazon EMR or the third party integrator to tag the user's role while assuming it. +* `allow_full_table_external_data_access` - (Optional) Whether to allow a third-party query engine to get data access credentials without session tags when a caller has full data access permissions. ~> **NOTE:** Although optional, not including `admins`, `create_database_default_permissions`, `create_table_default_permissions`, and/or `trusted_resource_owners` results in the setting being cleared. diff --git a/website/docs/r/lambda_event_source_mapping.html.markdown b/website/docs/r/lambda_event_source_mapping.html.markdown index 948d8d9719e..4e67f1d41c4 100644 --- a/website/docs/r/lambda_event_source_mapping.html.markdown +++ b/website/docs/r/lambda_event_source_mapping.html.markdown @@ -199,7 +199,7 @@ resource "aws_lambda_event_source_mapping" "example" { ### scaling_config Configuration Block -* `maximum_concurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be between `2` and `1000`. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). +* `maximum_concurrency` - (Optional) Limits the number of concurrent instances that the Amazon SQS event source can invoke. Must be greater than or equal to `2`. See [Configuring maximum concurrency for Amazon SQS event sources](https://docs.aws.amazon.com/lambda/latest/dg/with-sqs.html#events-sqs-max-concurrency). You need to raise a [Service Quota Ticket](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) to increase the concurrency beyond 1000. ### self_managed_event_source Configuration Block diff --git a/website/docs/r/launch_template.html.markdown b/website/docs/r/launch_template.html.markdown index cd717279bc5..5320ef055ee 100644 --- a/website/docs/r/launch_template.html.markdown +++ b/website/docs/r/launch_template.html.markdown @@ -351,6 +351,7 @@ This configuration block supports the following: * ssd - solid state drive ``` +* `max_spot_price_as_percentage_of_optimal_on_demand_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Conflicts with `spot_max_price_percentage_over_lowest_price` * `memory_gib_per_vcpu` - (Optional) Block describing the minimum and maximum amount of memory (GiB) per vCPU. Default is no minimum or maximum. * `min` - (Optional) Minimum. May be a decimal number, e.g. `0.5`. * `max` - (Optional) Maximum. May be a decimal number, e.g. `0.5`. @@ -367,7 +368,7 @@ This configuration block supports the following: If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. * `require_hibernate_support` - (Optional) Indicate whether instance types must support On-Demand Instance Hibernation, either `true` or `false`. Default is `false`. -* `spot_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. +* `spot_max_price_percentage_over_lowest_price` - (Optional) The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a percentage higher than the cheapest M, C, or R instance type with your specified attributes. When Amazon EC2 Auto Scaling selects instance types with your attributes, we will exclude instance types whose price is higher than your threshold. The parameter accepts an integer, which Amazon EC2 Auto Scaling interprets as a percentage. To turn off price protection, specify a high value, such as 999999. Default is 100. Conflicts with `max_spot_price_as_percentage_of_optimal_on_demand_price` If you set DesiredCapacityType to vcpu or memory-mib, the price protection threshold is applied based on the per vCPU or per memory price instead of the per instance price. * `total_local_storage_gb` - (Optional) Block describing the minimum and maximum total local storage (GB). Default is no minimum or maximum. diff --git a/website/docs/r/lb_target_group.html.markdown b/website/docs/r/lb_target_group.html.markdown index b9e25f741e3..d3abf64af70 100644 --- a/website/docs/r/lb_target_group.html.markdown +++ b/website/docs/r/lb_target_group.html.markdown @@ -81,6 +81,29 @@ resource "aws_lb_target_group" "tcp-example" { } ``` +### Target group with health requirements + +```terraform +resource "aws_lb_target_group" "tcp-example" { + name = "tf-example-lb-nlb-tg" + port = 80 + protocol = "TCP" + vpc_id = aws_vpc.main.id + + target_group_health { + dns_failover { + minimum_healthy_targets_count = "1" + minimum_healthy_targets_percentage = "off" + } + + unhealthy_state_routing { + minimum_healthy_targets_count = "1" + minimum_healthy_targets_percentage = "off" + } + } +} +``` + ## Argument Reference This resource supports the following arguments: @@ -107,6 +130,7 @@ This resource supports the following arguments: * `tags` - (Optional) Map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `target_failover` - (Optional) Target failover block. Only applicable for Gateway Load Balancer target groups. See [target_failover](#target_failover) for more information. * `target_health_state` - (Optional) Target health state block. Only applicable for Network Load Balancer target groups when `protocol` is `TCP` or `TLS`. See [target_health_state](#target_health_state) for more information. +* `target_group_health` - (Optional) Target health requirements block. See [target_group_health](#target_group_health) for more information. * `target_type` - (Optional, Forces new resource) Type of target that you must specify when registering targets with this target group. See [doc](https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_CreateTargetGroup.html) for supported values. The default is `instance`. @@ -171,6 +195,29 @@ This resource supports the following arguments: * `enable_unhealthy_connection_termination` - (Optional) Indicates whether the load balancer terminates connections to unhealthy targets. Possible values are `true` or `false`. Default: `true`. +### target_group_health + +~> **NOTE:** This block is only supported by Application Load Balancers and Network Load Balancers. + +The `target_group_health` block supports the following: + +* `dns_failover` - (Optional) Block to configure DNS Failover requirements. See [DNS Failover](#dns_failover) below for details on attributes. +* `unhealthy_state_routing` - (Optional) Block to configure Unhealthy State Routing requirements. See [Unhealthy State Routing](#unhealthy_state_routing) below for details on attributes. + +### dns_failover + +The `dns_failover` block supports the following: + +* `minimum_healthy_targets_count` - (Optional) The minimum number of targets that must be healthy. If the number of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from `1` to the maximum number of targets. The default is `off`. +* `minimum_healthy_targets_percentage` - (Optional) The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, mark the zone as unhealthy in DNS, so that traffic is routed only to healthy zones. The possible values are `off` or an integer from `1` to `100`. The default is `off`. + +### unhealthy_state_routing + +The `unhealthy_state_routing` block supports the following: + +* `minimum_healthy_targets_count` - (Optional) The minimum number of targets that must be healthy. If the number of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `1` to the maximum number of targets. The default is `1`. +* `minimum_healthy_targets_percentage` - (Optional) The minimum percentage of targets that must be healthy. If the percentage of healthy targets is below this value, send traffic to all targets, including unhealthy targets. The possible values are `off` or an integer from `1` to `100`. The default is `off`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/lightsail_container_service.html.markdown b/website/docs/r/lightsail_container_service.html.markdown index f91ef77d3c4..dbcffe984d1 100644 --- a/website/docs/r/lightsail_container_service.html.markdown +++ b/website/docs/r/lightsail_container_service.html.markdown @@ -96,7 +96,7 @@ resource "aws_ecr_repository_policy" "default" { container service. For more information, see [Enabling and managing custom domains for your Amazon Lightsail container services](https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-creating-container-services-certificates). -This argument supports the following arguments: +This resource supports the following arguments: * `name` - (Required) The name for the container service. Names must be of length 1 to 63, and be unique within each AWS Region in your Lightsail account. diff --git a/website/docs/r/medialive_channel.html.markdown b/website/docs/r/medialive_channel.html.markdown index 02f9cd5a28a..aba4dd454d8 100644 --- a/website/docs/r/medialive_channel.html.markdown +++ b/website/docs/r/medialive_channel.html.markdown @@ -143,8 +143,8 @@ The following arguments are optional: ### Input Settings -* `audio_selectors` - (Optional) Used to select the audio stream to decode for inputs that have multiple. See [Audio Selectors](#audio-selectors) for more details. -* `caption_selectors` - (Optional) Used to select the caption input to use for inputs that have multiple available. See [Caption Selectors](#caption-selectors) for more details. +* `audio_selector` - (Optional) Used to select the audio stream to decode for inputs that have multiple. See [Audio Selectors](#audio-selectors) for more details. +* `caption_selector` - (Optional) Used to select the caption input to use for inputs that have multiple available. See [Caption Selectors](#caption-selectors) for more details. * `deblock_filter` - (Optional) Enable or disable the deblock filter when filtering. * `denoise_filter` - (Optional) Enable or disable the denoise filter when filtering. * `filter_strength` - (Optional) Adjusts the magnitude of filtering from 1 (minimal) to 5 (strongest). diff --git a/website/docs/r/msk_replicator.html.markdown b/website/docs/r/msk_replicator.html.markdown index f8ff06fc914..4f31eeb3e4c 100644 --- a/website/docs/r/msk_replicator.html.markdown +++ b/website/docs/r/msk_replicator.html.markdown @@ -89,7 +89,8 @@ The following arguments are required: * `target_kafka_cluster_arn` - (Required) The ARN of the target Kafka cluster. * `target_compression_type` - (Required) The type of compression to use writing records to target Kafka cluster. * `topic_replication` - (Required) Configuration relating to topic replication. -* `consumer_group_replication` - (Required) Confguration relating to consumer group replication. +* `starting_position` - (Optional) Configuration for specifying the position in the topics to start replicating from. +* `consumer_group_replication` - (Required) Configuration relating to consumer group replication. ### topic_replication Argument Reference @@ -106,6 +107,10 @@ The following arguments are required: * `detect_and_copy_new_consumer_groups` - (Optional) Whether to periodically check for new consumer groups. * `synchronise_consumer_group_offsets` - (Optional) Whether to periodically write the translated offsets to __consumer_offsets topic in target cluster. +### starting_position + +* `type` - (Optional) The type of replication starting position. Supports `LATEST` and `EARLIEST`. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/mskconnect_connector.html.markdown b/website/docs/r/mskconnect_connector.html.markdown index 7af8c023e19..b43dd3c5817 100644 --- a/website/docs/r/mskconnect_connector.html.markdown +++ b/website/docs/r/mskconnect_connector.html.markdown @@ -74,105 +74,145 @@ resource "aws_mskconnect_connector" "example" { ## Argument Reference -This resource supports the following arguments: +The following arguments are required: -* `capacity` - (Required) Information about the capacity allocated to the connector. See below. +* `capacity` - (Required) Information about the capacity allocated to the connector. See [`capacity` Block](#capacity-block) for details. * `connector_configuration` - (Required) A map of keys to values that represent the configuration for the connector. -* `description` - (Optional) A summary description of the connector. -* `kafka_cluster` - (Required) Specifies which Apache Kafka cluster to connect to. See below. -* `kafka_cluster_client_authentication` - (Required) Details of the client authentication used by the Apache Kafka cluster. See below. -* `kafka_cluster_encryption_in_transit` - (Required) Details of encryption in transit to the Apache Kafka cluster. See below. +* `kafka_cluster` - (Required) Specifies which Apache Kafka cluster to connect to. See [`kafka_cluster` Block](#kafka_cluster-block) for details. +* `kafka_cluster_client_authentication` - (Required) Details of the client authentication used by the Apache Kafka cluster. See [`kafka_cluster_client_authentication` Block](#kafka_cluster_client_authentication-block) for details. +* `kafka_cluster_encryption_in_transit` - (Required) Details of encryption in transit to the Apache Kafka cluster. See [`kafka_cluster_encryption_in_transit` Block](#kafka_cluster_encryption_in_transit-block) for details. * `kafkaconnect_version` - (Required) The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins. -* `log_delivery` - (Optional) Details about log delivery. See below. * `name` - (Required) The name of the connector. -* `plugin` - (Required) Specifies which plugins to use for the connector. See below. +* `plugin` - (Required) Specifies which plugins to use for the connector. See [`plugin` Block](#plugin-block) for details. * `service_execution_role_arn` - (Required) The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket. -* `worker_configuration` - (Optional) Specifies which worker configuration to use with the connector. See below. -### capacity Configuration Block +The following arguments are optional: + +* `description` - (Optional) A summary description of the connector. +* `log_delivery` - (Optional) Details about log delivery. See [`log_delivery` Block](#log_delivery-block) for details. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `worker_configuration` - (Optional) Specifies which worker configuration to use with the connector. See [`worker_configuration` Block](#worker_configuration-block) for details. + +### `capacity` Block + +The `capacity` configuration block supports the following arguments: + +* `autoscaling` - (Optional) Information about the auto scaling parameters for the connector. See [`autoscaling` Block](#autoscaling-block) for details. +* `provisioned_capacity` - (Optional) Details about a fixed capacity allocated to a connector. See [`provisioned_capacity` Block](#provisioned_capacity-block) for details. -* `autoscaling` - (Optional) Information about the auto scaling parameters for the connector. See below. -* `provisioned_capacity` - (Optional) Details about a fixed capacity allocated to a connector. See below. +### `autoscaling` Block -### autoscaling Configuration Block +The `autoscaling` configuration block supports the following arguments: * `max_worker_count` - (Required) The maximum number of workers allocated to the connector. * `mcu_count` - (Optional) The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: `1`, `2`, `4`, `8`. The default value is `1`. * `min_worker_count` - (Required) The minimum number of workers allocated to the connector. -* `scale_in_policy` - (Optional) The scale-in policy for the connector. See below. -* `scale_out_policy` - (Optional) The scale-out policy for the connector. See below. +* `scale_in_policy` - (Optional) The scale-in policy for the connector. See [`scale_in_policy` Block](#scale_in_policy-block) for details. +* `scale_out_policy` - (Optional) The scale-out policy for the connector. See [`scale_out_policy` Block](#scale_out_policy-block) for details. -### scale_in_policy Configuration Block +### `scale_in_policy` Block + +The `scale_in_policy` configuration block supports the following arguments: * `cpu_utilization_percentage` - (Required) Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered. -### scale_out_policy Configuration Block +### `scale_out_policy` Block + +The `scale_out_policy` configuration block supports the following arguments: * `cpu_utilization_percentage` - (Required) The CPU utilization percentage threshold at which you want connector scale out to be triggered. -### provisioned_capacity Configuration Block +### `provisioned_capacity` Block + +The `provisioned_capacity` configuration block supports the following arguments: * `mcu_count` - (Optional) The number of microcontroller units (MCUs) allocated to each connector worker. Valid values: `1`, `2`, `4`, `8`. The default value is `1`. * `worker_count` - (Required) The number of workers that are allocated to the connector. -### kafka_cluster Configuration Block +### `kafka_cluster` Block + +The `kafka_cluster` configuration block supports the following arguments: + +* `apache_kafka_cluster` - (Required) The Apache Kafka cluster to which the connector is connected. See [`apache_kafka_cluster` Block](#apache_kafka_cluster-block) for details. -* `apache_kafka_cluster` - (Required) The Apache Kafka cluster to which the connector is connected. +### `apache_kafka_cluster` Block -### apache_kafka_cluster Configuration Block +The `apache_kafka_cluster` configuration block supports the following arguments: * `bootstrap_servers` - (Required) The bootstrap servers of the cluster. -* `vpc` - (Required) Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. +* `vpc` - (Required) Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster. See [`vpc` Block](#vpc-block) for details. -### vpc Configuration Block +### `vpc` Block + +The `vpc` configuration block supports the following arguments: * `security_groups` - (Required) The security groups for the connector. * `subnets` - (Required) The subnets for the connector. -### kafka_cluster_client_authentication Configuration Block +### `kafka_cluster_client_authentication` Block + +The `kafka_cluster_client_authentication` configuration block supports the following arguments: * `authentication_type` - (Optional) The type of client authentication used to connect to the Apache Kafka cluster. Valid values: `IAM`, `NONE`. A value of `NONE` means that no client authentication is used. The default value is `NONE`. -### kafka_cluster_encryption_in_transit Configuration Block +### `kafka_cluster_encryption_in_transit` Block + +The `kafka_cluster_encryption_in_transit` configuration block supports the following arguments: * `encryption_type` - (Optional) The type of encryption in transit to the Apache Kafka cluster. Valid values: `PLAINTEXT`, `TLS`. The default values is `PLAINTEXT`. -### log_delivery Configuration Block +### `log_delivery` Block + +The `log_delivery` configuration block supports the following arguments: + +* `worker_log_delivery` - (Required) The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See [`worker_log_delivery` Block](#worker_log_delivery-block) for details. + +### `worker_log_delivery` Block -* `worker_log_delivery` - (Required) The workers can send worker logs to different destination types. This configuration specifies the details of these destinations. See below. +The `worker_log_delivery` configuration block supports the following arguments: -### worker_log_delivery Configuration Block +* `cloudwatch_logs` - (Optional) Details about delivering logs to Amazon CloudWatch Logs. See [`cloudwatch_logs` Block](#cloudwatch_logs-block) for details. +* `firehose` - (Optional) Details about delivering logs to Amazon Kinesis Data Firehose. See [`firehose` Block](#firehose-block) for details. +* `s3` - (Optional) Details about delivering logs to Amazon S3. See [`s3` Block](#s3-block) for deetails. -* `cloudwatch_logs` - (Optional) Details about delivering logs to Amazon CloudWatch Logs. See below. -* `firehose` - (Optional) Details about delivering logs to Amazon Kinesis Data Firehose. See below. -* `s3` - (Optional) Details about delivering logs to Amazon S3. See below. +### `cloudwatch_logs` Block -### cloudwatch_logs Configuration Block +The `cloudwatch_logs` configuration block supports the following arguments: * `enabled` - (Optional) Whether log delivery to Amazon CloudWatch Logs is enabled. * `log_group` - (Required) The name of the CloudWatch log group that is the destination for log delivery. -### firehose Configuration Block +### `firehose` Block + +The `firehose` configuration block supports the following arguments: * `delivery_stream` - (Optional) The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery. * `enabled` - (Required) Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose. -### s3 Configuration Block +### `s3` Block + +The `s3` configuration block supports the following arguments: * `bucket` - (Optional) The name of the S3 bucket that is the destination for log delivery. * `enabled` - (Required) Specifies whether connector logs get sent to the specified Amazon S3 destination. * `prefix` - (Optional) The S3 prefix that is the destination for log delivery. -### plugin Configuration Block +### `plugin` Block + +The `plugin` configuration block supports the following argumens: -* `custom_plugin` - (Required) Details about a custom plugin. See below. +* `custom_plugin` - (Required) Details about a custom plugin. See [`custom_plugin` Block](#custom_plugin-block) for details. -### custom_plugin Configuration Block +### `custom_plugin` Block + +The `custom_plugin` configuration block supports the following arguments: * `arn` - (Required) The Amazon Resource Name (ARN) of the custom plugin. * `revision` - (Required) The revision of the custom plugin. -### worker_configuration Configuration Block +### `worker_configuration` Block + +The `worker_configuration` configuration block supports the following arguments: * `arn` - (Required) The Amazon Resource Name (ARN) of the worker configuration. * `revision` - (Required) The revision of the worker configuration. @@ -182,6 +222,7 @@ This resource supports the following arguments: This resource exports the following attributes in addition to the arguments above: * `arn` - The Amazon Resource Name (ARN) of the connector. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). * `version` - The current version of the connector. ## Timeouts diff --git a/website/docs/r/mskconnect_custom_plugin.html.markdown b/website/docs/r/mskconnect_custom_plugin.html.markdown index cb965af9f54..735c9924906 100644 --- a/website/docs/r/mskconnect_custom_plugin.html.markdown +++ b/website/docs/r/mskconnect_custom_plugin.html.markdown @@ -41,23 +41,28 @@ resource "aws_mskconnect_custom_plugin" "example" { The following arguments are required: -* `name` - (Required) The name of the custom plugin.. -* `content_type` - (Required) The type of the plugin file. Allowed values are `ZIP` and `JAR`. -* `location` - (Required) Information about the location of a custom plugin. See below. +* `name` - (Required, Forces new resource) The name of the custom plugin.. +* `content_type` - (Required, Forces new resource) The type of the plugin file. Allowed values are `ZIP` and `JAR`. +* `location` - (Required, Forces new resource) Information about the location of a custom plugin. See [`location` Block](#location-block) for details. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. The following arguments are optional: -* `description` - (Optional) A summary description of the custom plugin. +* `description` - (Optional, Forces new resource) A summary description of the custom plugin. -### location Argument Reference +### `location` Block -* `s3` - (Required) Information of the plugin file stored in Amazon S3. See below. +The `location` configuration block supports the following arguments: -#### location s3 Argument Reference +* `s3` - (Required, Forces new resource) Information of the plugin file stored in Amazon S3. See [`s3` Block](#s3-block) for details.. -* `bucket_arn` - (Required) The Amazon Resource Name (ARN) of an S3 bucket. -* `file_key` - (Required) The file key for an object in an S3 bucket. -* `object_version` - (Optional) The version of an object in an S3 bucket. +### `s3` Block + +The `s3` configuration Block supports the following arguments: + +* `bucket_arn` - (Required, Forces new resource) The Amazon Resource Name (ARN) of an S3 bucket. +* `file_key` - (Required, Forces new resource) The file key for an object in an S3 bucket. +* `object_version` - (Optional, Forces new resource) The version of an object in an S3 bucket. ## Attribute Reference @@ -66,6 +71,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - the Amazon Resource Name (ARN) of the custom plugin. * `latest_revision` - an ID of the latest successfully created revision of the custom plugin. * `state` - the state of the custom plugin. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Timeouts diff --git a/website/docs/r/mskconnect_worker_configuration.html.markdown b/website/docs/r/mskconnect_worker_configuration.html.markdown index 3542f68d328..c0beb2d43e7 100644 --- a/website/docs/r/mskconnect_worker_configuration.html.markdown +++ b/website/docs/r/mskconnect_worker_configuration.html.markdown @@ -28,12 +28,13 @@ EOT The following arguments are required: -* `name` - (Required) The name of the worker configuration. -* `properties_file_content` - (Required) Contents of connect-distributed.properties file. The value can be either base64 encoded or in raw format. +* `name` - (Required, Forces new resource) The name of the worker configuration. +* `properties_file_content` - (Required, Forces new resource) Contents of connect-distributed.properties file. The value can be either base64 encoded or in raw format. The following arguments are optional: -* `description` - (Optional) A summary description of the worker configuration. +* `description` - (Optional, Forces new resource) A summary description of the worker configuration. +* `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ## Attribute Reference @@ -41,6 +42,13 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - the Amazon Resource Name (ARN) of the worker configuration. * `latest_revision` - an ID of the latest successfully created revision of the worker configuration. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `delete` - (Default `10m`) ## Import diff --git a/website/docs/r/mwaa_environment.html.markdown b/website/docs/r/mwaa_environment.html.markdown index 7689b6fe815..85199a2a8d8 100644 --- a/website/docs/r/mwaa_environment.html.markdown +++ b/website/docs/r/mwaa_environment.html.markdown @@ -129,6 +129,7 @@ This resource supports the following arguments: * `airflow_configuration_options` - (Optional) The `airflow_configuration_options` parameter specifies airflow override options. Check the [Official documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-env-variables.html#configuring-env-variables-reference) for all possible configuration options. * `airflow_version` - (Optional) Airflow version of your environment, will be set by default to the latest version that MWAA supports. * `dag_s3_path` - (Required) The relative path to the DAG folder on your Amazon S3 storage bucket. For example, dags. For more information, see [Importing DAGs on Amazon MWAA](https://docs.aws.amazon.com/mwaa/latest/userguide/configuring-dag-import.html). +* `endpoint_management` - (Optional) Defines whether the VPC endpoints configured for the environment are created and managed by the customer or by AWS. If set to `SERVICE`, Amazon MWAA will create and manage the required VPC endpoints in your VPC. If set to `CUSTOMER`, you must create, and manage, the VPC endpoints for your VPC. Defaults to `SERVICE` if not set. * `environment_class` - (Optional) Environment class for the cluster. Possible options are `mw1.small`, `mw1.medium`, `mw1.large`. Will be set by default to `mw1.small`. Please check the [AWS Pricing](https://aws.amazon.com/de/managed-workflows-for-apache-airflow/pricing/) for more information about the environment classes. * `execution_role_arn` - (Required) The Amazon Resource Name (ARN) of the task execution role that the Amazon MWAA and its environment can assume. Check the [official AWS documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/mwaa-create-role.html) for the detailed role specification. * `kms_key` - (Optional) The Amazon Resource Name (ARN) of your KMS key that you want to use for encryption. Will be set to the ARN of the managed KMS key `aws/airflow` by default. Please check the [Official Documentation](https://docs.aws.amazon.com/mwaa/latest/userguide/custom-keys-certs.html) for more information. diff --git a/website/docs/r/networkfirewall_tls_inspection_configuration.html.markdown b/website/docs/r/networkfirewall_tls_inspection_configuration.html.markdown new file mode 100644 index 00000000000..170291d1413 --- /dev/null +++ b/website/docs/r/networkfirewall_tls_inspection_configuration.html.markdown @@ -0,0 +1,354 @@ +--- +subcategory: "Network Firewall" +layout: "aws" +page_title: "AWS: aws_networkfirewall_tls_inspection_configuration" +description: |- + Terraform resource for managing an AWS Network Firewall TLS Inspection Configuration. +--- + +# Resource: aws_networkfirewall_tls_inspection_configuration + +Terraform resource for managing an AWS Network Firewall TLS Inspection Configuration. + +## Example Usage + +~> **NOTE:** You must configure either inbound inspection, outbound inspection, or both. + +### Basic inbound/ingress inspection + +``` +resource "aws_networkfirewall_tls_inspection_configuration" "example" { + name = "example" + description = "example" + encryption_configuration { + key_id = "AWS_OWNED_KMS_KEY" + type = "AWS_OWNED_KMS_KEY" + } + tls_inspection_configuration { + server_certificate_configuration { + server_certificate { + resource_arn = aws_acm_certificate.example_1.arn + } + scope { + protocols = [6] + destination_ports { + from_port = 443 + to_port = 443 + } + destination { + address_definition = "0.0.0.0/0" + } + source_ports { + from_port = 0 + to_port = 65535 + } + source { + address_definition = "0.0.0.0/0" + } + } + } + } +} +``` + +### Basic outbound/engress inspection + +``` +resource "aws_networkfirewall_tls_inspection_configuration" "example" { + name = "example" + description = "example" + encryption_configuration { + key_id = "AWS_OWNED_KMS_KEY" + type = "AWS_OWNED_KMS_KEY" + } + tls_inspection_configuration { + server_certificate_configuration { + certificate_authority_arn = aws_acm_certificate.example_1.arn + check_certificate_revocation_status { + revoked_status_action = "REJECT" + unknown_status_action = "PASS" + } + scope { + protocols = [6] + destination_ports { + from_port = 443 + to_port = 443 + } + destination { + address_definition = "0.0.0.0/0" + } + source_ports { + from_port = 0 + to_port = 65535 + } + source { + address_definition = "0.0.0.0/0" + } + } + } + } +} +``` + +### Inbound with encryption configuration + +``` +resource "aws_kms_key" "example" { + description = "example" + deletion_window_in_days = 7 +} + +resource "aws_networkfirewall_tls_inspection_configuration" "example" { + name = "example" + description = "example" + encryption_configuration { + key_id = aws_kms_key.example.arn + type = "CUSTOMER_KMS" + } + tls_inspection_configuration { + server_certificate_configuration { + server_certificate { + resource_arn = aws_acm_certificate.example_1.arn + } + scopes { + protocols = [6] + destination_ports { + from_port = 443 + to_port = 443 + } + destinations { + address_definition = "0.0.0.0/0" + } + source_ports { + from_port = 0 + to_port = 65535 + } + sources { + address_definition = "0.0.0.0/0" + } + } + } + } +} +``` + +### Outbound with encryption configuration + +``` +resource "aws_kms_key" "example" { + description = "example" + deletion_window_in_days = 7 +} + +resource "aws_networkfirewall_tls_inspection_configuration" "example" { + name = "example" + description = "example" + encryption_configuration { + key_id = aws_kms_key.example.arn + type = "CUSTOMER_KMS" + } + tls_inspection_configuration { + server_certificate_configurations { + certificate_authority_arn = aws_acm_certificate.example_1.arn + check_certificate_revocation_status { + revoked_status_action = "REJECT" + unknown_status_action = "PASS" + } + scope { + protocols = [6] + destination_ports { + from_port = 443 + to_port = 443 + } + destination { + address_definition = "0.0.0.0/0" + } + source_ports { + from_port = 0 + to_port = 65535 + } + source { + address_definition = "0.0.0.0/0" + } + } + } + } +} +``` + +### Combined inbound and outbound + +```terraform +resource "aws_networkfirewall_tls_inspection_configuration" "example" { + name = "example" + description = "example" + encryption_configuration { + key_id = "AWS_OWNED_KMS_KEY" + type = "AWS_OWNED_KMS_KEY" + } + tls_inspection_configuration { + server_certificate_configuration { + certificate_authority_arn = aws_acm_certificate.example_1.arn + check_certificate_revocation_status { + revoked_status_action = "REJECT" + unknown_status_action = "PASS" + } + server_certificate { + resource_arn = aws_acm_certificate.example_2.arn + } + scope { + protocols = [6] + destination_ports { + from_port = 443 + to_port = 443 + } + destination { + address_definition = "0.0.0.0/0" + } + source_ports { + from_port = 0 + to_port = 65535 + } + source { + address_definition = "0.0.0.0/0" + } + } + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `name` - (Required, Forces new resource) Descriptive name of the TLS inspection configuration. +* `tls_inspection_configuration` - (Required) TLS inspection configuration block. Detailed below. + +The following arguments are optional: + +* `description` - (Optional) Description of the TLS inspection configuration. +* `encryption_configuration` - (Optional) Encryption configuration block. Detailed below. + +### Encryption Configuration + +* `key_id` - (Optional) ARN of the Amazon Web Services Key Management Service (KMS) customer managed key. +* `type` - (Optional) Type of KMS key to use for encryption of your Network Firewall resources. Valid values: `AWS_OWNED_KMS_KEY`, `CUSTOMER_KMS`. + +### TLS Inspection Configuration + +* `server_certificate_configuration` - (Required) Server certificate configurations that are associated with the TLS configuration. Detailed below. + +### Server Certificate Configuration + +The `server_certificate_configuration` block supports the following arguments: + +* `certificate_authority_arn` - (Optional) ARN of the imported certificate authority (CA) certificate within Certificate Manager (ACM) to use for outbound SSL/TLS inspection. See [Using SSL/TLS certificates with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html) for limitations on CA certificates. +* `check_certificate_revocation_status` (Optional) - Check Certificate Revocation Status block. Detailed below. +* `scope` (Required) - Scope block. Detailed below. +* `server_certificate` - (Optional) Server certificates to use for inbound SSL/TLS inspection. See [Using SSL/TLS certificates with TLS inspection configurations](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html). + +### Check Certificate Revocation Status + +The `check_certificate_revocation_status` block supports the following arguments: + +~> **NOTE To check the certificate revocation status, you must also specify a `certificate_authority_arn` in `server_certificate_configuration`. + +`revoked_status_action` - (Optional) how Network Firewall processes traffic when it determines that the certificate presented by the server in the SSL/TLS connection has a revoked status. See [Checking certificate revocation status](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html#tls-inspection-check-certificate-revocation-status) for details. Valid values: `PASS`, `DROP`, `REJECT`. +`unknown_status_action` - (Optional) How Network Firewall processes traffic when it determines that the certificate presented by the server in the SSL/TLS connection has an unknown status, or a status that cannot be determined for any other reason, including when the service is unable to connect to the OCSP and CRL endpoints for the certificate. See [Checking certificate revocation status](https://docs.aws.amazon.com/network-firewall/latest/developerguide/tls-inspection-certificate-requirements.html#tls-inspection-check-certificate-revocation-status) for details. Valid values: `PASS`, `DROP`, `REJECT`. + +### Scopes + +The `scope` block supports the following arguments: + +* `destination` - (Required) Set of configuration blocks describing the destination IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any destination address. See [Destination](#destination) below for details. +* `destination_ports` - (Optional) Set of configuration blocks describing the destination ports to inspect for. If not specified, this matches with any destination port. See [Destination Ports](#destination-ports) below for details. +* `protocols` - (Optional) Set of protocols to inspect for, specified using the protocol's assigned internet protocol number (IANA). Network Firewall currently supports TCP only. Valid values: `6` +* `source` - (Optional) Set of configuration blocks describing the source IP address and address ranges to inspect for, in CIDR notation. If not specified, this matches with any source address. See [Source](#source) below for details. +* `source_ports` - (Optional) Set of configuration blocks describing the source ports to inspect for. If not specified, this matches with any source port. See [Source Ports](#source-ports) below for details. + +### Destination + +The `destination` block supports the following argument: + +* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + +### Destination Ports + +The `destination_ports` block supports the following arguments: + +* `from_ports` - (Required) The lower limit of the port range. This must be less than or equal to the `to_port`. +* `to_ports` - (Optional) The upper limit of the port range. This must be greater than or equal to the `from_port`. + +### Source + +The `source` block supports the following argument: + +* `address_definition` - (Required) An IP address or a block of IP addresses in CIDR notation. AWS Network Firewall supports all address ranges for IPv4. + +### Source Ports + +The `source_ports` block supports the following arguments: + +* `from_port` - (Required) The lower limit of the port range. This must be less than or equal to the `to_port`. +* `to_port` - (Optional) The upper limit of the port range. This must be greater than or equal to the `from_port`. + +### Server Certificates + +The `server_certificate` block supports the following arguments: + +* `resource_arn` - (Optional) ARN of the Certificate Manager SSL/TLS server certificate that's used for inbound SSL/TLS inspection. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the TLS Inspection Configuration. +* `certificate_authority` - Certificate Manager certificate block. See [Certificate Authority](#certificate-authority) below for details. +* `certificates` - List of certificate blocks describing certificates associated with the TLS inspection configuration. See [Certificates](#certificates) below for details. +* `number_of_associations` - Number of firewall policies that use this TLS inspection configuration. +* `tls_inspection_configuration_id` - A unique identifier for the TLS inspection configuration. +* `update_token` - String token used when updating the rule group. + +### Certificate Authority + +The `certificate_authority` block exports the following attributes: + +* `certificate_arn` - ARN of the certificate. +* `certificate_serial` - Serial number of the certificate. +* `status` - Status of the certificate. +* `status_message` - Details about the certificate status, including information about certificate errors. + +### Certificates + +The `certificates` block exports the following attributes: + +* `certificate_arn` - ARN of the certificate. +* `certificate_serial` - Serial number of the certificate. +* `status` - Status of the certificate. +* `status_message` - Details about the certificate status, including information about certificate errors. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall TLS Inspection Configuration using the `arn`. For example: + +```terraform +import { + to = aws_networkfirewall_tls_inspection_configuration.example + id = "arn:aws:network-firewall::::tls-configuration/example" +} +``` + +Using `terraform import`, import Network Firewall TLS Inspection Configuration using the `arn`. For example: + +```console +% terraform import aws_networkfirewall_tls_inspection_configuration.example arn:aws:network-firewall::::tls-configuration/example +``` diff --git a/website/docs/r/networkmonitor_monitor.html.markdown b/website/docs/r/networkmonitor_monitor.html.markdown new file mode 100644 index 00000000000..4402cf33e35 --- /dev/null +++ b/website/docs/r/networkmonitor_monitor.html.markdown @@ -0,0 +1,57 @@ +--- +subcategory: "CloudWatch Network Monitor" +layout: "aws" +page_title: "AWS: aws_networkmonitor_monitor" +description: |- + Terraform resource for managing an Amazon Network Monitor Monitor. +--- + +# Resource: aws_networkmonitor_monitor + +Terraform resource for managing an AWS Network Monitor Monitor. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_networkmonitor_monitor" "example" { + aggregation_period = 30 + monitor_name = "example" +} +``` + +## Argument Reference + +The following arguments are required: + +- `monitor_name` - (Required) The name of the monitor. + +The following arguments are optional: + +- `aggregation_period` - (Optional) The time, in seconds, that metrics are aggregated and sent to Amazon CloudWatch. Valid values are either 30 or 60. +- `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +- `arn` - The ARN of the monitor. +- `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmonitor_monitor` using the monitor name. For example: + +```terraform +import { + to = aws_networkmonitor_monitor.example + id = "monitor-7786087912324693644" +} +``` + +Using `terraform import`, import `aws_networkmonitor_monitor` using the monitor name. For example: + +```console +% terraform import aws_networkmonitor_monitor.example monitor-7786087912324693644 +``` diff --git a/website/docs/r/networkmonitor_probe.html.markdown b/website/docs/r/networkmonitor_probe.html.markdown new file mode 100644 index 00000000000..0220b85cba1 --- /dev/null +++ b/website/docs/r/networkmonitor_probe.html.markdown @@ -0,0 +1,71 @@ +--- +subcategory: "CloudWatch Network Monitor" +layout: "aws" +page_title: "AWS: aws_networkmonitor_probe" +description: |- + Terraform resource for managing an Amazon Network Monitor Probe. +--- + +# Resource: aws_networkmonitor_probe + +Terraform resource for managing an AWS Network Monitor Probe. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_networkmonitor_monitor" "example" { + aggregation_period = 30 + monitor_name = "example" +} + +resource "aws_networkmonitor_probe" "example" { + monitor_name = aws_networkmonitor_monitor.example.monitor_name + destination = "127.0.0.1" + destination_port = 80 + protocol = "TCP" + source_arn = aws_subnet.example.arn + packet_size = 200 +} +``` + +## Argument Reference + +The following arguments are required: + +- `destination` - (Required) The destination IP address. This must be either IPV4 or IPV6. +- `destination_port` - (Optional) The port associated with the destination. This is required only if the protocol is TCP and must be a number between 1 and 65536. +- `monitor_name` - (Required) The name of the monitor. +- `protocol` - (Required) The protocol used for the network traffic between the source and destination. This must be either TCP or ICMP. +- `source_arn` - (Required) The ARN of the subnet. +- `packet_size` - (Optional) The size of the packets sent between the source and destination. This must be a number between 56 and 8500. + +The following arguments are optional: + +- `tags` - (Optional) Key-value tags for the monitor. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +- `arn` - The ARN of the attachment. +- `source_arn` - The ARN of the subnet. +- `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import `aws_networkmonitor_probe` using the monitor name and probe id. For example: + +```terraform +import { + to = aws_networkmonitor_probe.example + id = "monitor-7786087912324693644,probe-3qm8p693i4fi1h8lqylzkbp42e" +} +``` + +Using `terraform import`, import `aws_networkmonitor_probe` using the monitor name and probe id. For example: + +```console +% terraform import aws_networkmonitor_probe.example monitor-7786087912324693644,probe-3qm8p693i4fi1h8lqylzkbp42e +``` diff --git a/website/docs/r/oam_link.html.markdown b/website/docs/r/oam_link.html.markdown index 404a252d84f..298aef9caf1 100644 --- a/website/docs/r/oam_link.html.markdown +++ b/website/docs/r/oam_link.html.markdown @@ -25,6 +25,36 @@ resource "aws_oam_link" "example" { } ``` +### Log Group Filtering + +```terraform +resource "aws_oam_link" "example" { + label_template = "$AccountName" + link_configuration { + log_group_configuration { + filter = "LogGroupName LIKE 'aws/lambda/%' OR LogGroupName LIKE 'AWSLogs%'" + } + } + resource_types = ["AWS::Logs::LogGroup"] + sink_identifier = aws_oam_sink.test.id +} +``` + +### Metric Filtering + +```terraform +resource "aws_oam_link" "example" { + label_template = "$AccountName" + link_configuration { + metric_configuration { + filter = "Namespace IN ('AWS/EC2', 'AWS/ELB', 'AWS/S3')" + } + } + resource_types = ["AWS::CloudWatch::Metric"] + sink_identifier = aws_oam_sink.test.id +} +``` + ## Argument Reference The following arguments are required: @@ -35,13 +65,34 @@ The following arguments are required: The following arguments are optional: +* `link_configuration` - (Optional) Configuration for creating filters that specify that only some metric namespaces or log groups are to be shared from the source account to the monitoring account. See [`link_configuration` Block](#link_configuration-block) for details. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +### `link_configuration` Block + +The `link_configuration` configuration block supports the following arguments: + +* `log_group_configuration` - (Optional) Configuration for filtering which log groups are to send log events from the source account to the monitoring account. See [`log_group_configuration` Block](#log_group_configuration-block) for details. +* `metric_configuration` - (Optional) Configuration for filtering which metric namespaces are to be shared from the source account to the monitoring account. See [`metric_configuration` Block](#metric_configuration-block) for details. + +### `log_group_configuration` Block + +The `log_group_configuration` configuration block supports the following arguments: + +* `filter` - (Required) Filter string that specifies which log groups are to share their log events with the monitoring account. See [LogGroupConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_LogGroupConfiguration.html) for details. + +### `metric_configuration` Block + +The `metric_configuration` configuration block supports the following arguments: + +* `filter` - (Required) Filter string that specifies which metrics are to be shared with the monitoring account. See [MetricConfiguration](https://docs.aws.amazon.com/OAM/latest/APIReference/API_MetricConfiguration.html) for details. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the link. +* `id` - ARN of the link. * `label` - Label that is assigned to this link. * `link_id` - ID string that AWS generated as part of the link ARN. * `sink_arn` - ARN of the sink that is used for this link. diff --git a/website/docs/r/oam_sink.html.markdown b/website/docs/r/oam_sink.html.markdown index 4c0ee34e3c3..12336403eac 100644 --- a/website/docs/r/oam_sink.html.markdown +++ b/website/docs/r/oam_sink.html.markdown @@ -39,6 +39,7 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Sink. +* `id` - ARN of the Sink. * `sink_id` - ID string that AWS generated as part of the sink ARN. ## Timeouts diff --git a/website/docs/r/oam_sink_policy.html.markdown b/website/docs/r/oam_sink_policy.html.markdown index da18fd94a95..fa3475e8980 100644 --- a/website/docs/r/oam_sink_policy.html.markdown +++ b/website/docs/r/oam_sink_policy.html.markdown @@ -54,6 +54,7 @@ The following arguments are required: This resource exports the following attributes in addition to the arguments above: * `arn` - ARN of the Sink. +* `id` - ARN of the sink to attach this policy to. * `sink_id` - ID string that AWS generated as part of the sink ARN. ## Timeouts diff --git a/website/docs/r/quicksight_account_subscription.html.markdown b/website/docs/r/quicksight_account_subscription.html.markdown index 8b880d2570b..2ed3e96cbe5 100644 --- a/website/docs/r/quicksight_account_subscription.html.markdown +++ b/website/docs/r/quicksight_account_subscription.html.markdown @@ -40,6 +40,7 @@ The following arguments are optional: * `directory_id` - (Optional) Active Directory ID that is associated with your Amazon QuickSight account. * `email_address` - (Optional) Email address of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `first_name` - (Optional) First name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. +* `iam_identity_center_instance_arn` - (Optional) The Amazon Resource Name (ARN) for the IAM Identity Center instance. * `last_name` - (Optional) Last name of the author of the Amazon QuickSight account to use for future communications. This field is required if `ENTERPPRISE_AND_Q` is the selected edition of the new Amazon QuickSight account. * `reader_group` - (Optional) Reader group associated with your Active Direcrtory. * `realm` - (Optional) Realm of the Active Directory that is associated with your Amazon QuickSight account. diff --git a/website/docs/r/rds_certificate.html.markdown b/website/docs/r/rds_certificate.html.markdown new file mode 100644 index 00000000000..839a279c568 --- /dev/null +++ b/website/docs/r/rds_certificate.html.markdown @@ -0,0 +1,48 @@ +--- +subcategory: "RDS (Relational Database)" +layout: "aws" +page_title: "AWS: aws_rds_certificate" +description: |- + Terraform resource for managing an AWS RDS (Relational Database) Certificate. +--- + +# Resource: aws_rds_certificate + +Provides a resource to override the system-default Secure Sockets Layer/Transport Layer Security (SSL/TLS) certificate for Amazon RDS for new DB instances in the current AWS region. + +~> **NOTE:** Removing this Terraform resource removes the override. New DB instances will use the system-default certificate for the current AWS region. + +## Example Usage + +```terraform +resource "aws_rds_certificate" "example" { + certificate_identifier = "rds-ca-rsa4096-g1" +} +``` + +## Argument Reference + +The following arguments are required: + +* `certificate_identifier` - (Required) Certificate identifier. For example, `rds-ca-rsa4096-g1`. Refer to [AWS RDS (Relational Database) Certificate Identifier](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.CertificateIdentifier) for more information. + +## Attribute Reference + +This resource exports no additional attributes. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import the RDS certificate override. For example: + +```terraform +import { + to = aws_rds_certificate.example + id = default +} +``` + +Using `terraform import`, import the default EBS encryption state. For example: + +```console +% terraform import aws_rds_certificate.example default +``` diff --git a/website/docs/r/rds_cluster.html.markdown b/website/docs/r/rds_cluster.html.markdown index f5088524160..76f798c8d2b 100644 --- a/website/docs/r/rds_cluster.html.markdown +++ b/website/docs/r/rds_cluster.html.markdown @@ -16,6 +16,8 @@ Changes to an RDS Cluster can occur when you manually change a parameter, such a ~> **Note:** Multi-AZ DB clusters are supported only for the MySQL and PostgreSQL DB engines. +~> **Note:** `ca_certificate_identifier` is only supported for Multi-AZ DB clusters. + ~> **Note:** using `apply_immediately` can result in a brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][4] for more information. ~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text. @@ -197,7 +199,7 @@ the AWS official documentation : * [create-db-cluster](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-cluster.html) * [modify-db-cluster](https://docs.aws.amazon.com/cli/latest/reference/rds/modify-db-cluster.html) -This argument supports the following arguments: +This resource supports the following arguments: * `allocated_storage` - (Optional, Required for Multi-AZ DB cluster) The amount of storage in gibibytes (GiB) to allocate to each DB instance in the Multi-AZ DB cluster. * `allow_major_version_upgrade` - (Optional) Enable to allow major engine version upgrades when changing engine versions. Defaults to `false`. @@ -208,6 +210,7 @@ This argument supports the following arguments: A maximum of 3 AZs can be configured. * `backtrack_window` - (Optional) Target backtrack window, in seconds. Only available for `aurora` and `aurora-mysql` engines currently. To disable backtracking, set this value to `0`. Defaults to `0`. Must be between `0` and `259200` (72 hours) * `backup_retention_period` - (Optional) Days to retain backups for. Default `1` +* `ca_certificate_identifier` - (Optional) The CA certificate identifier to use for the DB cluster's server certificate. * `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifier`. * `cluster_identifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier. * `copy_tags_to_snapshot` – (Optional, boolean) Copy all Cluster `tags` to snapshots. Default is `false`. @@ -229,6 +232,7 @@ This argument supports the following arguments: * `enable_local_write_forwarding` - (Optional) Whether read replicas can forward write operations to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances.. See the [User Guide for Aurora](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-mysql-write-forwarding.html) for more information. **NOTE:** Local write forwarding requires Aurora MySQL version 3.04 or higher. * `enabled_cloudwatch_logs_exports` - (Optional) Set of log types to export to cloudwatch. If omitted, no logs will be exported. The following log types are supported: `audit`, `error`, `general`, `slowquery`, `postgresql` (PostgreSQL). * `engine_mode` - (Optional) Database engine mode. Valid values: `global` (only valid for Aurora MySQL 1.21 and earlier), `parallelquery`, `provisioned`, `serverless`. Defaults to: `provisioned`. See the [RDS User Guide](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.html) for limitations when using `serverless`. +* `engine_lifecycle_support` - (Optional) The life cycle type for this DB instance. This setting is valid for cluster types Aurora DB clusters and Multi-AZ DB clusters. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `engine_version` - (Optional) Database engine version. Updating this argument results in an outage. See the [Aurora MySQL](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Updates.html) and [Aurora Postgres](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraPostgreSQL.Updates.html) documentation for your configured engine to determine this value, or by running `aws rds describe-db-engine-versions`. For example with Aurora MySQL 2, a potential value for this argument is `5.7.mysql_aurora.2.03.2`. The value can contain a partial version where supported by the API. The actual engine version used is returned in the attribute `engine_version_actual`, , see [Attribute Reference](#attribute-reference) below. * `engine` - (Required) Name of the database engine to be used for this DB cluster. Valid Values: `aurora-mysql`, `aurora-postgresql`, `mysql`, `postgres`. (Note that `mysql` and `postgres` are Multi-AZ RDS clusters). * `final_snapshot_identifier` - (Optional) Name of your final DB snapshot when this DB cluster is deleted. If omitted, no final snapshot will be made. @@ -368,6 +372,8 @@ This resource exports the following attributes in addition to the arguments abov * `cluster_members` – List of RDS Instances that are a part of this cluster * `availability_zones` - Availability zone of the instance * `backup_retention_period` - Backup retention period +* `ca_certificate_identifier` - CA identifier of the CA certificate used for the DB instance's server certificate +* `ca_certificate_valid_till` - Expiration date of the DB instance’s server certificate * `preferred_backup_window` - Daily time range during which the backups happen * `preferred_maintenance_window` - Maintenance window * `endpoint` - DNS address of the RDS instance diff --git a/website/docs/r/rds_cluster_activity_stream.html.markdown b/website/docs/r/rds_cluster_activity_stream.html.markdown index 8a9f3928cfe..b553b7d7ebf 100644 --- a/website/docs/r/rds_cluster_activity_stream.html.markdown +++ b/website/docs/r/rds_cluster_activity_stream.html.markdown @@ -56,7 +56,7 @@ resource "aws_rds_cluster_activity_stream" "default" { For more detailed documentation about each argument, refer to the [AWS official documentation][3]. -This argument supports the following arguments: +This resource supports the following arguments: * `resource_arn` - (Required, Forces new resources) The Amazon Resource Name (ARN) of the DB cluster. * `mode` - (Required, Forces new resources) Specifies the mode of the database activity stream. Database events such as a change or access generate an activity stream event. The database session can handle these events either synchronously or asynchronously. One of: `sync`, `async`. diff --git a/website/docs/r/rds_cluster_endpoint.html.markdown b/website/docs/r/rds_cluster_endpoint.html.markdown index 76770cdb60a..e5bd3b282bd 100644 --- a/website/docs/r/rds_cluster_endpoint.html.markdown +++ b/website/docs/r/rds_cluster_endpoint.html.markdown @@ -8,7 +8,7 @@ description: |- # Resource: aws_rds_cluster_endpoint -Manages an RDS Aurora Cluster Endpoint. +Manages an RDS Aurora Cluster Custom Endpoint. You can refer to the [User Guide][1]. ## Example Usage @@ -79,7 +79,7 @@ resource "aws_rds_cluster_endpoint" "static" { For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-cluster-endpoint.html). -This argument supports the following arguments: +This resource supports the following arguments: * `cluster_identifier` - (Required, Forces new resources) The cluster identifier. * `cluster_endpoint_identifier` - (Required, Forces new resources) The identifier to use for the new endpoint. This parameter is stored as a lowercase string. diff --git a/website/docs/r/rds_cluster_instance.html.markdown b/website/docs/r/rds_cluster_instance.html.markdown index 38450823d39..5b91b8b2287 100644 --- a/website/docs/r/rds_cluster_instance.html.markdown +++ b/website/docs/r/rds_cluster_instance.html.markdown @@ -51,7 +51,7 @@ resource "aws_rds_cluster" "default" { For more detailed documentation about each argument, refer to the [AWS official documentation](https://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html). -This argument supports the following arguments: +This resource supports the following arguments: * `apply_immediately` - (Optional) Specifies whether any database modifications are applied immediately, or during the next maintenance window. Default is`false`. * `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Default `true`. diff --git a/website/docs/r/rds_global_cluster.html.markdown b/website/docs/r/rds_global_cluster.html.markdown index eaf790883db..7c8b5d5ae74 100644 --- a/website/docs/r/rds_global_cluster.html.markdown +++ b/website/docs/r/rds_global_cluster.html.markdown @@ -206,6 +206,7 @@ This resource supports the following arguments: * `database_name` - (Optional, Forces new resources) Name for an automatically created database on cluster creation. * `deletion_protection` - (Optional) If the Global Cluster should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`. * `engine` - (Optional, Forces new resources) Name of the database engine to be used for this DB cluster. Terraform will only perform drift detection if a configuration value is provided. Valid values: `aurora`, `aurora-mysql`, `aurora-postgresql`. Defaults to `aurora`. Conflicts with `source_db_cluster_identifier`. +* `engine_lifecycle_support` - (Optional) The life cycle type for this DB instance. This setting applies only to Aurora PostgreSQL-based global databases. Valid values are `open-source-rds-extended-support`, `open-source-rds-extended-support-disabled`. Default value is `open-source-rds-extended-support`. [Using Amazon RDS Extended Support]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/extended-support.html * `engine_version` - (Optional) Engine version of the Aurora global database. The `engine`, `engine_version`, and `instance_class` (on the `aws_rds_cluster_instance`) must together support global databases. See [Using Amazon Aurora global databases](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-global-database.html) for more information. By upgrading the engine version, Terraform will upgrade cluster members. **NOTE:** To avoid an `inconsistent final plan` error while upgrading, use the `lifecycle` `ignore_changes` for `engine_version` meta argument on the associated `aws_rds_cluster` resource as shown above in [Upgrading Engine Versions](#upgrading-engine-versions) example. * `force_destroy` - (Optional) Enable to remove DB Cluster members from Global Cluster on destroy. Required with `source_db_cluster_identifier`. * `source_db_cluster_identifier` - (Optional) Amazon Resource Name (ARN) to use as the primary DB Cluster of the Global Cluster on creation. Terraform cannot perform drift detection of this value. diff --git a/website/docs/r/route53_resolver_firewall_rule.html.markdown b/website/docs/r/route53_resolver_firewall_rule.html.markdown index cb1c128b7e2..779cc33939a 100644 --- a/website/docs/r/route53_resolver_firewall_rule.html.markdown +++ b/website/docs/r/route53_resolver_firewall_rule.html.markdown @@ -48,8 +48,10 @@ This resource supports the following arguments: * `block_override_ttl` - (Required if `block_response` is `OVERRIDE`) The recommended amount of time, in seconds, for the DNS resolver or web browser to cache the provided override record. Minimum value of 0. Maximum value of 604800. * `block_response` - (Required if `action` is `BLOCK`) The way that you want DNS Firewall to block the request. Valid values: `NODATA`, `NXDOMAIN`, `OVERRIDE`. * `firewall_domain_list_id` - (Required) The ID of the domain list that you want to use in the rule. +* `firewall_domain_redirection_action` - (Optional) Evaluate DNS redirection in the DNS redirection chain, such as CNAME, DNAME, ot ALIAS. Valid values are `INSPECT_REDIRECTION_DOMAIN` and `TRUST_REDIRECTION_DOMAIN`. Default value is `INSPECT_REDIRECTION_DOMAIN`. * `firewall_rule_group_id` - (Required) The unique identifier of the firewall rule group where you want to create the rule. * `priority` - (Required) The setting that determines the processing order of the rule in the rule group. DNS Firewall processes the rules in a rule group by order of priority, starting from the lowest setting. +* `q_type` - (Optional) The query type you want the rule to evaluate. Additional details can be found [here](https://en.wikipedia.org/wiki/List_of_DNS_record_types) ## Attribute Reference diff --git a/website/docs/r/route53domains_delegation_signer_record.html.markdown b/website/docs/r/route53domains_delegation_signer_record.html.markdown index 4d8a57ce0e3..9e3d3c1f8f3 100644 --- a/website/docs/r/route53domains_delegation_signer_record.html.markdown +++ b/website/docs/r/route53domains_delegation_signer_record.html.markdown @@ -106,7 +106,7 @@ resource "aws_route53domains_delegation_signer_record" "example" { ## Argument Reference -This argument supports the following arguments: +This resource supports the following arguments: * `domain_name` - (Required) The name of the domain that will have its parent DNS zone updated with the Delegation Signer record. * `signing_attributes` - (Required) The information about a key, including the algorithm, public key-value, and flags. diff --git a/website/docs/r/route53domains_registered_domain.html.markdown b/website/docs/r/route53domains_registered_domain.html.markdown index 2eb15af8363..2c6a5646f63 100644 --- a/website/docs/r/route53domains_registered_domain.html.markdown +++ b/website/docs/r/route53domains_registered_domain.html.markdown @@ -38,7 +38,7 @@ resource "aws_route53domains_registered_domain" "example" { ~> **NOTE:** You must specify the same privacy setting for `admin_privacy`, `registrant_privacy` and `tech_privacy`. -This argument supports the following arguments: +This resource supports the following arguments: * `admin_contact` - (Optional) Details about the domain administrative contact. See [Contact Blocks](#contact-blocks) for more details. * `admin_privacy` - (Optional) Whether domain administrative contact information is concealed from WHOIS queries. Default: `true`. diff --git a/website/docs/r/route_table_association.html.markdown b/website/docs/r/route_table_association.html.markdown index ef72816b466..c87e59ed32f 100644 --- a/website/docs/r/route_table_association.html.markdown +++ b/website/docs/r/route_table_association.html.markdown @@ -31,7 +31,7 @@ resource "aws_route_table_association" "b" { ~> **NOTE:** Please note that one of either `subnet_id` or `gateway_id` is required. -This argument supports the following arguments: +This resource supports the following arguments: * `subnet_id` - (Optional) The subnet ID to create an association. Conflicts with `gateway_id`. * `gateway_id` - (Optional) The gateway ID to create an association. Conflicts with `subnet_id`. diff --git a/website/docs/r/s3_bucket_object_lock_configuration.html.markdown b/website/docs/r/s3_bucket_object_lock_configuration.html.markdown index 7dab081ee84..e3b5d067d7a 100644 --- a/website/docs/r/s3_bucket_object_lock_configuration.html.markdown +++ b/website/docs/r/s3_bucket_object_lock_configuration.html.markdown @@ -51,8 +51,8 @@ This resource supports the following arguments: * `expected_bucket_owner` - (Optional, Forces new resource) Account ID of the expected bucket owner. * `object_lock_enabled` - (Optional, Forces new resource) Indicates whether this bucket has an Object Lock configuration enabled. Defaults to `Enabled`. Valid values: `Enabled`. * `rule` - (Optional) Configuration block for specifying the Object Lock rule for the specified object. [See below](#rule). -* `token` - (Optional) Token to allow Object Lock to be enabled for an existing bucket. You must contact AWS support for the bucket's "Object Lock token". -The token is generated in the back-end when [versioning](https://docs.aws.amazon.com/AmazonS3/latest/userguide/manage-versioning-examples.html) is enabled on a bucket. For more details on versioning, see the [`aws_s3_bucket_versioning` resource](s3_bucket_versioning.html.markdown). +* `token` - (Optional,Deprecated) This argument is deprecated and no longer needed to enable Object Lock. +To enable Object Lock for an existing bucket, you must first enable versioning on the bucket and then enable Object Lock. For more details on versioning, see the [`aws_s3_bucket_versioning` resource](s3_bucket_versioning.html.markdown). ### rule diff --git a/website/docs/r/sagemaker_domain.html.markdown b/website/docs/r/sagemaker_domain.html.markdown index 2ac14e33afd..2b94d3793a7 100644 --- a/website/docs/r/sagemaker_domain.html.markdown +++ b/website/docs/r/sagemaker_domain.html.markdown @@ -221,6 +221,7 @@ The following arguments are optional: * `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [`default_resource_spec` Block](#default_resource_spec-block) below. * `lifecycle_config_arns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. +* `custom_image` - (Optional) A list of custom SageMaker images that are configured to run as a CodeEditor app. see [`custom_image` Block](#custom_image-block) below. ##### `code_repository` Block diff --git a/website/docs/r/sagemaker_endpoint_configuration.html.markdown b/website/docs/r/sagemaker_endpoint_configuration.html.markdown index 75e799d31a9..0cf7337ccfa 100644 --- a/website/docs/r/sagemaker_endpoint_configuration.html.markdown +++ b/website/docs/r/sagemaker_endpoint_configuration.html.markdown @@ -50,6 +50,7 @@ This resource supports the following arguments: * `container_startup_health_check_timeout_in_seconds` - (Optional) The timeout value, in seconds, for your inference container to pass health check by SageMaker Hosting. For more information about health check, see [How Your Container Should Respond to Health Check (Ping) Requests](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-inference-code.html#your-algorithms-inference-algo-ping-requests). Valid values between `60` and `3600`. * `core_dump_config` - (Optional) Specifies configuration for a core dump from the model container when the process crashes. Fields are documented below. * `enable_ssm_access` - (Optional) You can use this parameter to turn on native Amazon Web Services Systems Manager (SSM) access for a production variant behind an endpoint. By default, SSM access is disabled for all production variants behind an endpoints. +* `inference_ami_version` - (Optional) Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads. * `initial_instance_count` - (Optional) Initial number of instances used for auto-scaling. * `instance_type` - (Optional) The type of instance to start. * `initial_variant_weight` - (Optional) Determines initial traffic distribution among all of the models that you specify in the endpoint configuration. If unspecified, it defaults to `1.0`. @@ -86,7 +87,7 @@ This resource supports the following arguments: #### capture_options -* `capture_mode` - (Required) Specifies the data to be captured. Should be one of `Input` or `Output`. +* `capture_mode` - (Required) Specifies the data to be captured. Should be one of `Input`, `Output` or `InputAndOutput`. #### capture_content_type_header diff --git a/website/docs/r/sagemaker_space.html.markdown b/website/docs/r/sagemaker_space.html.markdown index 8d003d6fa87..01e24f03a8b 100644 --- a/website/docs/r/sagemaker_space.html.markdown +++ b/website/docs/r/sagemaker_space.html.markdown @@ -26,66 +26,62 @@ resource "aws_sagemaker_space" "example" { This resource supports the following arguments: * `domain_id` - (Required) The ID of the associated Domain. -* `ownership_settings` - (Optional) A collection of ownership settings. See [Ownership Settings](#ownership-settings) below. +* `ownership_settings` - (Optional) A collection of ownership settings. See [`ownership_settings` Block](#ownership_settings-block) below. * `space_display_name` - (Optional) The name of the space that appears in the SageMaker Studio UI. * `space_name` - (Required) The name of the space. -* `space_settings` - (Required) A collection of space settings. See [Space Settings](#space-settings) below. -* `space_sharing_settings` - (Optional) A collection of space sharing settings. See [Space Sharing Settings](#space-sharing-settings) below. +* `space_settings` - (Required) A collection of space settings. See [`space_settings` Block](#space_settings-block) below. +* `space_sharing_settings` - (Optional) A collection of space sharing settings. See [`space_sharing_settings` Block](#space_sharing_settings-block) below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### Ownership Settings +### `ownership_settings` Block * `owner_user_profile_name` - (Required) The user profile who is the owner of the private space. -### Space Sharing Settings +### `space_settings` Block -* `sharing_type` - (Required) Specifies the sharing type of the space. Valid values are `Private` and `Shared`. +* `app_type` - (Optional) The type of app created within the space. +* `code_editor_app_settings` - (Optional) The Code Editor application settings. See [`code_editor_app_settings` Block](#code_editor_app_settings-block) below. +* `custom_file_system` - (Optional) A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. See [`custom_file_system` Block](#custom_file_system-block) below. +* `jupyter_lab_app_settings` - (Optional) The settings for the JupyterLab application. See [`jupyter_lab_app_settings` Block](#jupyter_lab_app_settings-block) below. +* `jupyter_server_app_settings` - (Optional) The Jupyter server's app settings. See [`jupyter_server_app_settings` Block](#jupyter_server_app_settings-block) below. +* `kernel_gateway_app_settings` - (Optional) The kernel gateway app settings. See [`kernel_gateway_app_settings` Block](#kernel_gateway_app_settings-block) below. -### Space Settings +### `space_sharing_settings` Block -* `app_type` - (Optional) The type of app created within the space. -* `code_editor_app_settings` - (Optional) The Code Editor application settings. See [Code Editor App Settings](#code-editor-app-settings) below. -* `custom_file_system` - (Optional) A file system, created by you, that you assign to a space for an Amazon SageMaker Domain. See [Custom File System](#custom-file-system) below. -* `jupyter_lab_app_settings` - (Optional) The settings for the JupyterLab application. See [Jupyter Lab App Settings](#jupyter-lab-app-settings) below. -* `jupyter_server_app_settings` - (Optional) The Jupyter server's app settings. See [Jupyter Server App Settings](#jupyter-server-app-settings) below. -* `kernel_gateway_app_settings` - (Optional) The kernel gateway app settings. See [Kernel Gateway App Settings](#kernel-gateway-app-settings) below. +* `sharing_type` - (Required) Specifies the sharing type of the space. Valid values are `Private` and `Shared`. -#### Code Editor App Settings +### `code_editor_app_settings` Block -* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. See [`default_resource_spec` Block](#default_resource_spec-block) below. * `lifecycle_config_arns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. -#### Custom File System +### `custom_file_system` Block -* `efs_file_system` - (Optional) A custom file system in Amazon EFS. see [EFS File System](#efs-file-system) below. +* `efs_file_system` - (Optional) A custom file system in Amazon EFS. See [`efs_file_system` Block](#efs_file_system-block) below. -#### Jupyter Lab App Settings +### `jupyter_lab_app_settings` Block -* `code_repository` - (Optional) A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see [Code Repository](#code-repository) below. -* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. +* `code_repository` - (Optional) A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. See [`code_repository` Block](#code_repository-block) below. +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. See [`default_resource_spec` Block](#default_resource_spec-block) below. * `lifecycle_config_arns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. -#### Kernel Gateway App Settings +### `jupyter_server_app_settings` Block -* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. -* `custom_image` - (Optional) A list of custom SageMaker images that are configured to run as a KernelGateway app. see [Custom Image](#custom-image) below. +* `code_repository` - (Optional) A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. See [`code_repository` Block](#code_repository-block) below. +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. See [`default_resource_spec` Block](#default_resource_spec-block) below. * `lifecycle_config_arns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. -#### Jupyter Server App Settings +### `kernel_gateway_app_settings` Block -* `code_repository` - (Optional) A list of Git repositories that SageMaker automatically displays to users for cloning in the JupyterServer application. see [Code Repository](#code-repository) below. -* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default-resource-spec) below. +* `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. See [`default_resource_spec` Block](#default_resource_spec-block) below. +* `custom_image` - (Optional) A list of custom SageMaker images that are configured to run as a KernelGateway app. See [`custom_image` Block](#custom_image-block) below. * `lifecycle_config_arns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. -#### EFS File System - -* `file_system_id` - (Optional) The ID of your Amazon EFS file system. - -##### Code Repository +### `code_repository` Block * `repository_url` - (Optional) The URL of the Git repository. -##### Default Resource Spec +### `default_resource_spec` Block * `instance_type` - (Optional) The instance type. * `lifecycle_config_arn` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource. @@ -93,7 +89,11 @@ This resource supports the following arguments: * `sagemaker_image_version_alias` - (Optional) The SageMaker Image Version Alias. * `sagemaker_image_version_arn` - (Optional) The ARN of the image version created on the instance. -##### Custom Image +### `efs_file_system` Block + +* `file_system_id` - (Optional) The ID of your Amazon EFS file system. + +### `custom_image` Block * `app_image_config_name` - (Required) The name of the App Image Config. * `image_name` - (Required) The name of the Custom Image. diff --git a/website/docs/r/sagemaker_user_profile.html.markdown b/website/docs/r/sagemaker_user_profile.html.markdown index d06dfecbec1..c062db7b8ca 100644 --- a/website/docs/r/sagemaker_user_profile.html.markdown +++ b/website/docs/r/sagemaker_user_profile.html.markdown @@ -92,6 +92,7 @@ This resource supports the following arguments: * `default_resource_spec` - (Optional) The default instance type and the Amazon Resource Name (ARN) of the SageMaker image created on the instance. see [Default Resource Spec](#default_resource_spec) below. * `lifecycle_config_arns` - (Optional) The Amazon Resource Name (ARN) of the Lifecycle Configurations. +* `custom_image` - (Optional) A list of custom SageMaker images that are configured to run as a CodeEditor app. see [Custom Image](#custom_image) below. #### r_session_app_settings diff --git a/website/docs/r/sagemaker_workforce.html.markdown b/website/docs/r/sagemaker_workforce.html.markdown index aa809c8e765..a08e6c2f118 100644 --- a/website/docs/r/sagemaker_workforce.html.markdown +++ b/website/docs/r/sagemaker_workforce.html.markdown @@ -76,12 +76,14 @@ This resource supports the following arguments: ### Oidc Config +* `authentication_request_extra_params` - (Optional) A string to string map of identifiers specific to the custom identity provider (IdP) being used. * `authorization_endpoint` - (Required) The OIDC IdP authorization endpoint used to configure your private workforce. * `client_id` - (Required) The OIDC IdP client ID used to configure your private workforce. * `client_secret` - (Required) The OIDC IdP client secret used to configure your private workforce. * `issuer` - (Required) The OIDC IdP issuer used to configure your private workforce. * `jwks_uri` - (Required) The OIDC IdP JSON Web Key Set (Jwks) URI used to configure your private workforce. * `logout_endpoint` - (Required) The OIDC IdP logout endpoint used to configure your private workforce. +* `scope` - (Optional) An array of string identifiers used to refer to the specific pieces of user data or claims that the client application wants to access. * `token_endpoint` - (Required) The OIDC IdP token endpoint used to configure your private workforce. * `user_info_endpoint` - (Required) The OIDC IdP user information endpoint used to configure your private workforce. diff --git a/website/docs/r/sagemaker_workteam.html.markdown b/website/docs/r/sagemaker_workteam.html.markdown index ee9d9fb4747..dfb63860c11 100644 --- a/website/docs/r/sagemaker_workteam.html.markdown +++ b/website/docs/r/sagemaker_workteam.html.markdown @@ -55,6 +55,7 @@ This resource supports the following arguments: * `workteam_name` - (Required) The name of the workforce. * `member_definition` - (Required) A list of Member Definitions that contains objects that identify the workers that make up the work team. Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For private workforces created using Amazon Cognito use `cognito_member_definition`. For workforces created using your own OIDC identity provider (IdP) use `oidc_member_definition`. Do not provide input for both of these parameters in a single request. see [Member Definition](#member-definition) details below. * `notification_configuration` - (Optional) Configures notification of workers regarding available or expiring work items. see [Notification Configuration](#notification-configuration) details below. +* `worker_access_configuration` - (Optional) Use this optional parameter to constrain access to an Amazon S3 resource based on the IP address using supported IAM global condition keys. The Amazon S3 resource is accessed in the worker portal using a Amazon S3 presigned URL. see [Worker Access Configuration](#worker-access-configuration) details below. * `tags` - (Optional) A map of tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### Member Definition @@ -76,6 +77,19 @@ This resource supports the following arguments: * `notification_topic_arn` - (Required) The ARN for the SNS topic to which notifications should be published. +### Worker Access Configuration + +* `s3_presign` - (Required) Defines any Amazon S3 resource constraints. see [S3 Presign](#s3-presign) details below. + +#### S3 Presign + +* `iam_policy_constraints` - (Required) Use this parameter to specify the allowed request source. Possible sources are either SourceIp or VpcSourceIp. see [IAM Policy Constraints](#iam-policy-constraints) details below. + +##### IAM Policy Constraints + +* `source_ip` - (Optional) When SourceIp is Enabled the worker's IP address when a task is rendered in the worker portal is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. This IP address is checked by Amazon S3 and must match in order for the Amazon S3 resource to be rendered in the worker portal. Valid values are `Enabled` or `Disabled` +* `vpc_source_ip` - (Optional) When VpcSourceIp is Enabled the worker's IP address when a task is rendered in private worker portal inside the VPC is added to the IAM policy as a Condition used to generate the Amazon S3 presigned URL. To render the task successfully Amazon S3 checks that the presigned URL is being accessed over an Amazon S3 VPC Endpoint, and that the worker's IP address matches the IP address in the IAM policy. To learn more about configuring private worker portal, see [Use Amazon VPC mode from a private worker portal](https://docs.aws.amazon.com/sagemaker/latest/dg/samurai-vpc-worker-portal.html). Valid values are `Enabled` or `Disabled` + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/service_discovery_service.html.markdown b/website/docs/r/service_discovery_service.html.markdown index b486856acdb..8938e74b017 100644 --- a/website/docs/r/service_discovery_service.html.markdown +++ b/website/docs/r/service_discovery_service.html.markdown @@ -75,44 +75,44 @@ resource "aws_service_discovery_service" "example" { This resource supports the following arguments: -* `name` - (Required, ForceNew) The name of the service. +* `name` - (Required, Forces new resource) The name of the service. * `description` - (Optional) The description of the service. -* `dns_config` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. -* `health_check_config` - (Optional) A complex type that contains settings for an optional health check. Only for Public DNS namespaces. -* `force_destroy` - (Optional, Default:false ) A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. -* `health_check_custom_config` - (Optional, ForceNew) A complex type that contains settings for ECS managed health checks. +* `dns_config` - (Optional) A complex type that contains information about the resource record sets that you want Amazon Route 53 to create when you register an instance. See [`dns_config` Block](#dns_config-block) for details. +* `health_check_config` - (Optional) A complex type that contains settings for an optional health check. Only for Public DNS namespaces. See [`health_check_config` Block](#health_check_config-block) for details. +* `force_destroy` - (Optional) A boolean that indicates all instances should be deleted from the service so that the service can be destroyed without error. These instances are not recoverable. Defaults to `false`. +* `health_check_custom_config` - (Optional, Forces new resource) A complex type that contains settings for ECS managed health checks. See [`health_check_custom_config` Block](#health_check_custom_config-block) for details. * `namespace_id` - (Optional) The ID of the namespace that you want to use to create the service. * `type` - (Optional) If present, specifies that the service instances are only discoverable using the `DiscoverInstances` API operation. No DNS records is registered for the service instances. The only valid value is `HTTP`. * `tags` - (Optional) A map of tags to assign to the service. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### dns_config +### `dns_config` Block -This argument supports the following arguments: +The `dns_config` configuration block supports the following arguments: -* `namespace_id` - (Required, ForceNew) The ID of the namespace to use for DNS configuration. -* `dns_records` - (Required) An array that contains one DnsRecord object for each resource record set. +* `namespace_id` - (Required, Forces new resource) The ID of the namespace to use for DNS configuration. +* `dns_records` - (Required) An array that contains one DnsRecord object for each resource record set. See [`dns_records` Block](#dns_records-block) for details. * `routing_policy` - (Optional) The routing policy that you want to apply to all records that Route 53 creates when you register an instance and specify the service. Valid Values: MULTIVALUE, WEIGHTED -#### dns_records +#### `dns_records` Block -This argument supports the following arguments: +The `dns_records` configuration block supports the following arguments: * `ttl` - (Required) The amount of time, in seconds, that you want DNS resolvers to cache the settings for this resource record set. -* `type` - (Required, ForceNew) The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME +* `type` - (Required, Forces new resource) The type of the resource, which indicates the value that Amazon Route 53 returns in response to DNS queries. Valid Values: A, AAAA, SRV, CNAME -### health_check_config +### `health_check_config` Block -This argument supports the following arguments: +The `health_check_config` configuration block supports the following arguments: * `failure_threshold` - (Optional) The number of consecutive health checks. Maximum value of 10. * `resource_path` - (Optional) The path that you want Route 53 to request when performing health checks. Route 53 automatically adds the DNS name for the service. If you don't specify a value, the default value is /. -* `type` - (Optional, ForceNew) The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP +* `type` - (Optional, Forces new resource) The type of health check that you want to create, which indicates how Route 53 determines whether an endpoint is healthy. Valid Values: HTTP, HTTPS, TCP -### health_check_custom_config +### `health_check_custom_config` Block -This argument supports the following arguments: +The `health_check_custom_config` configuration block supports the following arguments: -* `failure_threshold` - (Optional, ForceNew) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. +* `failure_threshold` - (Optional, Forces new resource) The number of 30-second intervals that you want service discovery to wait before it changes the health status of a service instance. Maximum value of 10. ## Attribute Reference diff --git a/website/docs/r/servicecatalog_product.html.markdown b/website/docs/r/servicecatalog_product.html.markdown index 29d92156f33..5e39a1e9375 100644 --- a/website/docs/r/servicecatalog_product.html.markdown +++ b/website/docs/r/servicecatalog_product.html.markdown @@ -40,7 +40,7 @@ The following arguments are required: * `name` - (Required) Name of the product. * `owner` - (Required) Owner of the product. -* `provisioning_artifact_parameters` - (Required) Configuration block for provisioning artifact (i.e., version) parameters. Detailed below. +* `provisioning_artifact_parameters` - (Required) Configuration block for provisioning artifact (i.e., version) parameters. See [`provisioning_artifact_parameters` Block](#provisioning_artifact_parameters-block) for details. * `type` - (Required) Type of product. See [AWS Docs](https://docs.aws.amazon.com/servicecatalog/latest/dg/API_CreateProduct.html#API_CreateProduct_RequestSyntax) for valid list of values. The following arguments are optional: @@ -53,9 +53,9 @@ The following arguments are optional: * `support_url` - (Optional) Contact URL for product support. * `tags` - (Optional) Tags to apply to the product. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### provisioning_artifact_parameters +### `provisioning_artifact_parameters` Block -This argument supports the following arguments: +The `provisioning_artifact_parameters` configuration block supports the following arguments: * `description` - (Optional) Description of the provisioning artifact (i.e., version), including how it differs from the previous provisioning artifact. * `disable_template_validation` - (Optional) Whether AWS Service Catalog stops validating the specified provisioning artifact template even if it is invalid. diff --git a/website/docs/r/servicecatalog_provisioned_product.html.markdown b/website/docs/r/servicecatalog_provisioned_product.html.markdown index 495f955957e..bf921520468 100644 --- a/website/docs/r/servicecatalog_provisioned_product.html.markdown +++ b/website/docs/r/servicecatalog_provisioned_product.html.markdown @@ -56,24 +56,24 @@ The following arguments are optional: * `product_name` - (Optional) Name of the product. You must provide `product_id` or `product_name`, but not both. * `provisioning_artifact_id` - (Optional) Identifier of the provisioning artifact. For example, `pa-4abcdjnxjj6ne`. You must provide the `provisioning_artifact_id` or `provisioning_artifact_name`, but not both. * `provisioning_artifact_name` - (Optional) Name of the provisioning artifact. You must provide the `provisioning_artifact_id` or `provisioning_artifact_name`, but not both. -* `provisioning_parameters` - (Optional) Configuration block with parameters specified by the administrator that are required for provisioning the product. See details below. +* `provisioning_parameters` - (Optional) Configuration block with parameters specified by the administrator that are required for provisioning the product. See [`provisioning_parameters` Block](#provisioning_parameters-block) for details. * `retain_physical_resources` - (Optional) _Only applies to deleting._ Whether to delete the Service Catalog provisioned product but leave the CloudFormation stack, stack set, or the underlying resources of the deleted provisioned product. The default value is `false`. -* `stack_set_provisioning_preferences` - (Optional) Configuration block with information about the provisioning preferences for a stack set. See details below. +* `stack_set_provisioning_preferences` - (Optional) Configuration block with information about the provisioning preferences for a stack set. See [`stack_set_provisioning_preferences` Block](#stack_set_provisioning_preferences-block) for details. * `tags` - (Optional) Tags to apply to the provisioned product. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### provisioning_parameters +### `provisioning_parameters` Block -This argument supports the following arguments: +The `provisioning_parameters` configuration block supports the following arguments: * `key` - (Required) Parameter key. * `use_previous_value` - (Optional) Whether to ignore `value` and keep the previous parameter value. Ignored when initially provisioning a product. * `value` - (Optional) Parameter value. -### stack_set_provisioning_preferences +### `stack_set_provisioning_preferences` Block All of the `stack_set_provisioning_preferences` are only applicable to a `CFN_STACKSET` provisioned product type. -This argument supports the following arguments: +The `stack_set_provisioning_preferences` configuration block supports the following arguments: * `accounts` - (Optional) One or more AWS accounts that will have access to the provisioned product. The AWS accounts specified should be within the list of accounts in the STACKSET constraint. To get the list of accounts in the STACKSET constraint, use the `aws_servicecatalog_provisioning_parameters` data source. If no values are specified, the default value is all accounts from the STACKSET constraint. * `failure_tolerance_count` - (Optional) Number of accounts, per region, for which this operation can fail before AWS Service Catalog stops the operation in that region. If the operation is stopped in a region, AWS Service Catalog doesn't attempt the operation in any subsequent regions. You must specify either `failure_tolerance_count` or `failure_tolerance_percentage`, but not both. The default value is 0 if no value is specified. diff --git a/website/docs/r/sesv2_configuration_set.html.markdown b/website/docs/r/sesv2_configuration_set.html.markdown index b796a3631ef..801c76f8123 100644 --- a/website/docs/r/sesv2_configuration_set.html.markdown +++ b/website/docs/r/sesv2_configuration_set.html.markdown @@ -45,51 +45,61 @@ resource "aws_sesv2_configuration_set" "example" { This resource supports the following arguments: * `configuration_set_name` - (Required) The name of the configuration set. -* `delivery_options` - (Optional) An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. -* `reputation_options` - (Optional) An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. -* `sending_options` - (Optional) An object that defines whether or not Amazon SES can send email that you send using the configuration set. -* `suppression_options` - (Optional) An object that contains information about the suppression list preferences for your account. +* `delivery_options` - (Optional) An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set. See [`delivery_options` Block](#delivery_options-block) for details. +* `reputation_options` - (Optional) An object that defines whether or not Amazon SES collects reputation metrics for the emails that you send that use the configuration set. See [`reputation_options` Block](#reputation_options-block) for details. +* `sending_options` - (Optional) An object that defines whether or not Amazon SES can send email that you send using the configuration set. See [`sending_options` Block](#sending_options-block) for details. +* `suppression_options` - (Optional) An object that contains information about the suppression list preferences for your account. See [`suppression_options` Block](#suppression_options-block) for details. * `tags` - (Optional) A map of tags to assign to the service. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -* `tracking_options` - (Optional) An object that defines the open and click tracking options for emails that you send using the configuration set. -* `vdm_options` - (Optional) An object that defines the VDM settings that apply to emails that you send using the configuration set. +* `tracking_options` - (Optional) An object that defines the open and click tracking options for emails that you send using the configuration set. See [`tracking_options` Block](#tracking_options-block) for details. +* `vdm_options` - (Optional) An object that defines the VDM settings that apply to emails that you send using the configuration set. See [`vdm_options` Block](#vdm_options-block) for details. -### delivery_options +### `delivery_options` Block -This argument supports the following arguments: +The `delivery_options` configuration block supports the following arguments: * `sending_pool_name` - (Optional) The name of the dedicated IP pool to associate with the configuration set. * `tls_policy` - (Optional) Specifies whether messages that use the configuration set are required to use Transport Layer Security (TLS). Valid values: `REQUIRE`, `OPTIONAL`. -### reputation_options +### `reputation_options` Block -This argument supports the following arguments: +The `reputation_options` configuration block supports the following arguments: * `reputation_metrics_enabled` - (Optional) If `true`, tracking of reputation metrics is enabled for the configuration set. If `false`, tracking of reputation metrics is disabled for the configuration set. -### sending_options +### `sending_options` Block -This argument supports the following arguments: +The `sending_options` configuration block supports the following arguments: * `sending_enabled` - (Optional) If `true`, email sending is enabled for the configuration set. If `false`, email sending is disabled for the configuration set. -### suppression_options +### `suppression_options` Block + +The `suppression_options` configuration block supports the following arguments: * `suppressed_reasons` - (Optional) A list that contains the reasons that email addresses are automatically added to the suppression list for your account. Valid values: `BOUNCE`, `COMPLAINT`. -### tracking_options +### `tracking_options` Block + +The `tracking_options` configuration block supports the following arguments: * `custom_redirect_domain` - (Required) The domain to use for tracking open and click events. -### vdm_options +### `vdm_options` Block + +The `vdm_options` configuration block supports the following arguments: -* `dashboard_options` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Dashboard. -* `guardian_options` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Guardian. +* `dashboard_options` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Dashboard. See [`dashboard_options` Block](#dashboard_options-block) for details. +* `guardian_options` - (Optional) Specifies additional settings for your VDM configuration as applicable to the Guardian. See [`guardian_options` Block](#guardian_options-block) for details. -### dashboard_options +### `dashboard_options` Block + +The `dashboard_options` configuration block supports the following arguments: * `engagement_metrics` - (Optional) Specifies the status of your VDM engagement metrics collection. Valid values: `ENABLED`, `DISABLED`. -### guardian_options +### `guardian_options` Block + +The `guardian_options` configuration block supports the following arguments: * `optimized_shared_delivery` - (Optional) Specifies the status of your VDM optimized shared delivery. Valid values: `ENABLED`, `DISABLED`. diff --git a/website/docs/r/ssm_association.html.markdown b/website/docs/r/ssm_association.html.markdown index 670d42a13e6..f93a48c3609 100644 --- a/website/docs/r/ssm_association.html.markdown +++ b/website/docs/r/ssm_association.html.markdown @@ -96,6 +96,7 @@ This resource supports the following arguments: * `parameters` - (Optional) A block of arbitrary string parameters to pass to the SSM document. * `schedule_expression` - (Optional) A [cron or rate expression](https://docs.aws.amazon.com/systems-manager/latest/userguide/reference-cron-and-rate-expressions.html) that specifies when the association runs. * `sync_compliance` - (Optional) The mode for generating association compliance. You can specify `AUTO` or `MANUAL`. +* `tags` - (Optional) A map of tags to assign to the object. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `targets` - (Optional) A block containing the targets of the SSM association. Targets are documented below. AWS currently supports a maximum of 5 targets. * `wait_for_success_timeout_seconds` - (Optional) The number of seconds to wait for the association status to be `Success`. If `Success` status is not reached within the given time, create opration will fail. @@ -119,6 +120,7 @@ This resource exports the following attributes in addition to the arguments abov * `instance_id` - The instance id that the SSM document was applied to. * `name` - The name of the SSM document to apply. * `parameters` - Additional parameters passed to the SSM document. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block). ## Import diff --git a/website/docs/r/storagegateway_gateway.html.markdown b/website/docs/r/storagegateway_gateway.html.markdown index 245719996cf..0464dacf2a1 100644 --- a/website/docs/r/storagegateway_gateway.html.markdown +++ b/website/docs/r/storagegateway_gateway.html.markdown @@ -100,7 +100,7 @@ resource "aws_storagegateway_gateway" "example" { ~> **NOTE:** One of `activation_key` or `gateway_ip_address` must be provided for resource creation (gateway activation). Neither is required for resource import. If using `gateway_ip_address`, Terraform must be able to make an HTTP (port 80) GET request to the specified IP address from where it is running. -This argument supports the following arguments: +This resource supports the following arguments: * `gateway_name` - (Required) Name of the gateway. * `gateway_timezone` - (Required) Time zone for the gateway. The time zone is of the format "GMT", "GMT-hr:mm", or "GMT+hr:mm". For example, `GMT-4:00` indicates the time is 4 hours behind GMT. The time zone is used, for example, for scheduling snapshots and your gateway's maintenance schedule. diff --git a/website/docs/r/verifiedpermissions_identity_source.html.markdown b/website/docs/r/verifiedpermissions_identity_source.html.markdown new file mode 100644 index 00000000000..6066536adcc --- /dev/null +++ b/website/docs/r/verifiedpermissions_identity_source.html.markdown @@ -0,0 +1,145 @@ +--- +subcategory: "Verified Permissions" +layout: "aws" +page_title: "AWS: aws_verifiedpermissions_identity_source" +description: |- + Terraform resource for managing an AWS Verified Permissions Identity Source. +--- + +# Resource: aws_verifiedpermissions_identity_source + +Terraform resource for managing an AWS Verified Permissions Identity Source. + +## Example Usage + +### Cognito User Pool Configuration Usage + +```terraform +resource "aws_verifiedpermissions_policy_store" "example" { + validation_settings { + mode = "STRICT" + } +} + +resource "aws_cognito_user_pool" "example" { + name = "example" +} + +resource "aws_cognito_user_pool_client" "example" { + name = "example" + user_pool_id = aws_cognito_user_pool.example.id + explicit_auth_flows = ["ADMIN_NO_SRP_AUTH"] +} + +resource "aws_verifiedpermissions_identity_source" "example" { + policy_store_id = aws_verifiedpermissions_policy_store.example.id + configuration { + cognito_user_pool_configuration { + user_pool_arn = aws_cognito_user_pool.example.arn + client_ids = [aws_cognito_user_pool_client.example.id] + } + } +} +``` + +### OpenID Connect Configuration Usage + +```terraform +resource "aws_verifiedpermissions_policy_store" "example" { + validation_settings { + mode = "STRICT" + } +} + +resource "aws_verifiedpermissions_identity_source" "example" { + policy_store_id = aws_verifiedpermissions_policy_store.example.id + configuration { + open_id_connect_configuration { + issuer = "https://auth.example.com" + token_selection { + access_token_only { + audiences = ["https://myapp.example.com"] + principal_id_claim = "sub" + } + } + entity_id_prefix = "MyOIDCProvider" + group_configuration { + group_claim = "groups" + group_entity_type = "MyCorp::UserGroup" + } + } + } + principal_entity_type = "MyCorp::User" +} +``` + +## Argument Reference + +* `policy_store_id` - (Required) Specifies the ID of the policy store in which you want to store this identity source. +* `configuration`- (Required) Specifies the details required to communicate with the identity provider (IdP) associated with this identity source. See [Configuration](#configuration) below. +* `principal_entity_type`- (Optional) Specifies the namespace and data type of the principals generated for identities authenticated by the new identity source. + +### Configuration + +* `cognito_user_pool_configuration` - (Required) Specifies the configuration details of an Amazon Cognito user pool that Verified Permissions can use as a source of authenticated identities as entities. See [Cognito User Pool Configuration](#cognito-user-pool-configuration) below. +* `open_id_connect_configuration` - (Required) Specifies the configuration details of an OpenID Connect (OIDC) identity provider, or identity source, that Verified Permissions can use to generate entities from authenticated identities. See [Open ID Connect Configuration](#open-id-connect-configuration) below. + +#### Cognito User Pool Configuration + +* `user_pool_arn` - (Required) The Amazon Resource Name (ARN) of the Amazon Cognito user pool that contains the identities to be authorized. +* `client_ids` - (Optional) The unique application client IDs that are associated with the specified Amazon Cognito user pool. +* `group_configuration` - (Optional) The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. See [Group Configuration](#group-configuration) below. + +#### Group Configuration + +* `group_entity_type` - (Required) The name of the schema entity type that's mapped to the user pool group. Defaults to `AWS::CognitoGroup`. + +#### Open ID Connect Configuration + +* `issuer` - (Required) The issuer URL of an OIDC identity provider. This URL must have an OIDC discovery endpoint at the path `.well-known/openid-configuration`. +* `token_selection` - (Required) The token type that you want to process from your OIDC identity provider. Your policy store can process either identity (ID) or access tokens from a given OIDC identity source. See [Token Selection](#token-selection) below. +* `entity_id_prefix` - (Optional) A descriptive string that you want to prefix to user entities from your OIDC identity provider. +* `group_configuration` - (Optional) The type of entity that a policy store maps to groups from an Amazon Cognito user pool identity source. See [Group Configuration](#open-id-group-configuration) below. + +#### Token Selection + +* `access_token_only` - (Optional) The OIDC configuration for processing access tokens. See [Access Token Only](#access-token-only) below. +* `identity_token_only` - (Optional) The OIDC configuration for processing identity (ID) tokens. See [Identity Token Only](#identity-token-only) below. + +#### Access Token Only + +* `audiences` - (Optional) The access token aud claim values that you want to accept in your policy store. +* `principal_id_claim` - (Optional) The claim that determines the principal in OIDC access tokens. + +#### Identity Token Only + +* `client_ids` - (Optional) The ID token audience, or client ID, claim values that you want to accept in your policy store from an OIDC identity provider. +* `group_entity_type` - (Optional) The claim that determines the principal in OIDC access tokens. + +#### Open ID Group Configuration + +* `group_claim` - (Required) The token claim that you want Verified Permissions to interpret as group membership. For example, `groups`. +* `group_entity_type` - (Required) The policy store entity type that you want to map your users' group claim to. For example, `MyCorp::UserGroup`. A group entity type is an entity that can have a user entity type as a member. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `policy_id` - The Policy ID of the policy. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Verified Permissions Identity Source using the `policy_store_id:identity_source_id`. For example: + +```terraform +import { + to = aws_verifiedpermissions_identity_source.example + id = "policy-store-id-12345678:identity-source-id-12345678" +} +``` + +Using `terraform import`, import Verified Permissions Identity Source using the `policy_store_id:identity_source_id`. For example: + +```console +% terraform import aws_verifiedpermissions_identity_source.example policy-store-id-12345678:identity-source-id-12345678 +``` diff --git a/website/docs/r/vpc_peering_connection.html.markdown b/website/docs/r/vpc_peering_connection.html.markdown index f67b29d2891..f842d832ea7 100644 --- a/website/docs/r/vpc_peering_connection.html.markdown +++ b/website/docs/r/vpc_peering_connection.html.markdown @@ -103,7 +103,7 @@ can be done using the [`auto_accept`](vpc_peering_connection.html#auto_accept) a Connection has to be made active manually using other means. See [notes](vpc_peering_connection.html#notes) below for more information. -This argument supports the following arguments: +This resource supports the following arguments: * `peer_owner_id` - (Optional) The AWS account ID of the target peer VPC. Defaults to the account ID the [AWS provider][1] is currently connected to, so must be managed if connecting cross-account. diff --git a/website/docs/r/vpc_security_group_egress_rule.html.markdown b/website/docs/r/vpc_security_group_egress_rule.html.markdown index 206cfecd4b4..40ea7dee2db 100644 --- a/website/docs/r/vpc_security_group_egress_rule.html.markdown +++ b/website/docs/r/vpc_security_group_egress_rule.html.markdown @@ -34,7 +34,7 @@ resource "aws_vpc_security_group_egress_rule" "example" { ~> **Note** Although `cidr_ipv4`, `cidr_ipv6`, `prefix_list_id`, and `referenced_security_group_id` are all marked as optional, you *must* provide one of them in order to configure the destination of the traffic. The `from_port` and `to_port` arguments are required unless `ip_protocol` is set to `-1` or `icmpv6`. -This argument supports the following arguments: +This resource supports the following arguments: * `cidr_ipv4` - (Optional) The destination IPv4 CIDR range. * `cidr_ipv6` - (Optional) The destination IPv6 CIDR range. diff --git a/website/docs/r/vpclattice_listener.html.markdown b/website/docs/r/vpclattice_listener.html.markdown index c17668892f0..32736cf9ec6 100644 --- a/website/docs/r/vpclattice_listener.html.markdown +++ b/website/docs/r/vpclattice_listener.html.markdown @@ -118,7 +118,7 @@ This resource supports the following arguments: * `default_action` - (Required) Default action block for the default listener rule. Default action blocks are defined below. * `name` - (Required, Forces new resource) Name of the listener. A listener name must be unique within a service. Valid characters are a-z, 0-9, and hyphens (-). You can't use a hyphen as the first or last character, or immediately after another hyphen. * `port` - (Optional, Forces new resource) Listener port. You can specify a value from 1 to 65535. If `port` is not specified and `protocol` is HTTP, the value will default to 80. If `port` is not specified and `protocol` is HTTPS, the value will default to 443. -* `protocol` - (Required, Forces new resource) Protocol for the listener. Supported values are `HTTP` or `HTTPS` +* `protocol` - (Required, Forces new resource) Protocol for the listener. Supported values are `HTTP`, `HTTPS` or `TLS_PASSTHROUGH` * `service_arn` - (Optional) Amazon Resource Name (ARN) of the VPC Lattice service. You must include either the `service_arn` or `service_identifier` arguments. * `service_identifier` - (Optional) ID of the VPC Lattice service. You must include either the `service_arn` or `service_identifier` arguments. -> **NOTE:** You must specify one of the following arguments: `service_arn` or `service_identifier`. diff --git a/website/docs/r/wafv2_web_acl.html.markdown b/website/docs/r/wafv2_web_acl.html.markdown index 8b55c375afc..89ceef7b870 100644 --- a/website/docs/r/wafv2_web_acl.html.markdown +++ b/website/docs/r/wafv2_web_acl.html.markdown @@ -789,6 +789,7 @@ The `managed_rule_group_configs` block support the following arguments: ### `aws_managed_rules_bot_control_rule_set` Block +* `enable_machine_learning` - (Optional) Applies only to the targeted inspection level. Determines whether to use machine learning (ML) to analyze your web traffic for bot-related activity. Defaults to `true`. * `inspection_level` - (Optional) The inspection level to use for the Bot Control rule group. ### `aws_managed_rules_acfp_rule_set` Block @@ -946,7 +947,7 @@ Inspect a single header. Provide the name of the header to inspect, for example, The `single_header` block supports the following arguments: -* `name` - (Optional) Name of the query header to inspect. This setting must be provided as lower case characters. +* `name` - (Required) Name of the query header to inspect. This setting must be provided as lower case characters. ### `single_query_argument` Block @@ -954,7 +955,7 @@ Inspect a single query argument. Provide the name of the query argument to inspe The `single_query_argument` block supports the following arguments: -* `name` - (Optional) Name of the query header to inspect. This setting must be provided as lower case characters. +* `name` - (Required) Name of the query header to inspect. This setting must be provided as lower case characters. ### `body` Block diff --git a/website/docs/r/wafv2_web_acl_association.html.markdown b/website/docs/r/wafv2_web_acl_association.html.markdown index 89608fb5db4..2913ceade65 100644 --- a/website/docs/r/wafv2_web_acl_association.html.markdown +++ b/website/docs/r/wafv2_web_acl_association.html.markdown @@ -28,7 +28,7 @@ resource "aws_api_gateway_rest_api" "example" { paths = { "/path1" = { get = { - x-amazon-apigateway-integration = { + "x-amazon-apigateway-integration" = { httpMethod = "GET" payloadFormatVersion = "1.0" type = "HTTP_PROXY" diff --git a/website/docs/r/wafv2_web_acl_logging_configuration.html.markdown b/website/docs/r/wafv2_web_acl_logging_configuration.html.markdown index 4e9ee0fd938..5557f1bdcda 100644 --- a/website/docs/r/wafv2_web_acl_logging_configuration.html.markdown +++ b/website/docs/r/wafv2_web_acl_logging_configuration.html.markdown @@ -178,7 +178,7 @@ To redact a single header, provide the name of the header to be redacted. For ex The `single_header` block supports the following arguments: -* `name` - (Optional) Name of the query header to redact. This setting must be provided in lowercase characters. +* `name` - (Required) Name of the query header to redact. This setting must be provided in lowercase characters. ## Attribute Reference