From de338210dc1b0bb2eecee1dc16e073163b2d1df7 Mon Sep 17 00:00:00 2001 From: Mark Chappell Date: Mon, 8 May 2023 19:21:22 +0200 Subject: [PATCH] Bulk migration to Python 3.6 f-strings (#1810) Bulk migration to Python 3.6 f-strings SUMMARY We've dropped support for Python <3.6, bulk migrate to fstrings and perform some general string cleanup A combination of black --preview flynt some manual cleanup ISSUE TYPE Feature Pull Request COMPONENT NAME plugins/ tests/ ADDITIONAL INFORMATION Reviewed-by: Alina Buzachis --- changelogs/fragments/fstring-1.yml | 2 + plugins/module_utils/base.py | 10 +-- plugins/module_utils/etag.py | 4 +- plugins/module_utils/networkfirewall.py | 15 ++-- plugins/module_utils/opensearch.py | 12 +-- plugins/module_utils/sns.py | 6 +- plugins/modules/acm_certificate.py | 12 +-- plugins/modules/acm_certificate_info.py | 2 +- plugins/modules/api_gateway.py | 8 +- .../modules/autoscaling_instance_refresh.py | 2 +- plugins/modules/autoscaling_policy.py | 13 ++-- plugins/modules/batch_compute_environment.py | 7 +- plugins/modules/cloudformation_stack_set.py | 55 +++++++------ plugins/modules/cloudfront_distribution.py | 43 ++++------ plugins/modules/codebuild_project.py | 3 +- plugins/modules/codepipeline.py | 6 +- plugins/modules/config_delivery_channel.py | 3 +- plugins/modules/data_pipeline.py | 18 +++-- .../directconnect_confirm_connection.py | 4 +- plugins/modules/directconnect_connection.py | 4 +- .../directconnect_link_aggregation_group.py | 32 ++++---- .../directconnect_virtual_interface.py | 14 ++-- plugins/modules/dynamodb_table.py | 16 ++-- plugins/modules/ec2_ami_copy.py | 4 +- plugins/modules/ec2_launch_template.py | 45 +++++------ plugins/modules/ec2_placement_group.py | 10 +-- plugins/modules/ec2_placement_group_info.py | 2 +- plugins/modules/ec2_transit_gateway.py | 6 +- plugins/modules/ec2_vpc_egress_igw.py | 12 +-- plugins/modules/ec2_vpc_nacl_info.py | 2 +- plugins/modules/ec2_vpc_vgw.py | 2 +- plugins/modules/ec2_vpc_vpn.py | 37 +++++---- plugins/modules/ec2_win_password.py | 4 +- plugins/modules/ecs_attribute.py | 10 +-- plugins/modules/ecs_cluster.py | 2 +- plugins/modules/ecs_ecr.py | 11 ++- plugins/modules/ecs_service.py | 8 +- plugins/modules/ecs_tag.py | 10 +-- plugins/modules/ecs_taskdefinition.py | 16 ++-- plugins/modules/efs.py | 4 +- plugins/modules/efs_info.py | 6 +- plugins/modules/efs_tag.py | 8 +- plugins/modules/eks_cluster.py | 12 +-- plugins/modules/eks_fargate_profile.py | 10 +-- plugins/modules/eks_nodegroup.py | 20 ++--- plugins/modules/elasticache.py | 38 +++++---- plugins/modules/elasticache_info.py | 4 +- .../modules/elasticache_parameter_group.py | 22 +++--- plugins/modules/elasticache_snapshot.py | 6 +- plugins/modules/elb_instance.py | 2 +- plugins/modules/elb_target.py | 12 ++- plugins/modules/elb_target_group.py | 2 +- plugins/modules/elb_target_info.py | 6 +- plugins/modules/glue_connection.py | 8 +- plugins/modules/glue_crawler.py | 8 +- plugins/modules/glue_job.py | 8 +- plugins/modules/iam_access_key.py | 12 +-- plugins/modules/iam_access_key_info.py | 2 +- plugins/modules/iam_group.py | 24 +++--- plugins/modules/iam_managed_policy.py | 16 ++-- plugins/modules/iam_password_policy.py | 2 +- plugins/modules/iam_role.py | 42 +++++----- plugins/modules/iam_role_info.py | 8 +- plugins/modules/iam_saml_federation.py | 12 +-- plugins/modules/iam_server_certificate.py | 14 ++-- plugins/modules/kinesis_stream.py | 72 +++++++++-------- plugins/modules/lightsail.py | 3 +- plugins/modules/msk_cluster.py | 22 +++--- plugins/modules/msk_config.py | 4 +- plugins/modules/opensearch.py | 78 +++++++------------ plugins/modules/redshift.py | 8 +- .../redshift_cross_region_snapshots.py | 3 +- plugins/modules/s3_bucket_notification.py | 6 +- plugins/modules/s3_cors.py | 4 +- plugins/modules/s3_lifecycle.py | 2 +- plugins/modules/s3_logging.py | 4 +- plugins/modules/s3_metrics_configuration.py | 4 +- plugins/modules/s3_sync.py | 4 +- plugins/modules/secretsmanager_secret.py | 2 +- plugins/modules/ses_identity.py | 33 +++----- plugins/modules/ses_identity_policy.py | 6 +- plugins/modules/ses_rule_set.py | 13 ++-- plugins/modules/sns.py | 2 +- plugins/modules/sns_topic.py | 10 +-- .../modules/stepfunctions_state_machine.py | 4 +- plugins/modules/waf_condition.py | 4 +- plugins/modules/waf_info.py | 2 +- plugins/modules/waf_rule.py | 6 +- plugins/modules/waf_web_acl.py | 2 +- tests/unit/mock/loader.py | 2 +- .../plugins/modules/test_acm_certificate.py | 14 ++-- .../unit/plugins/modules/test_ec2_vpc_vpn.py | 4 +- 92 files changed, 515 insertions(+), 583 deletions(-) create mode 100644 changelogs/fragments/fstring-1.yml diff --git a/changelogs/fragments/fstring-1.yml b/changelogs/fragments/fstring-1.yml new file mode 100644 index 00000000000..94809541a76 --- /dev/null +++ b/changelogs/fragments/fstring-1.yml @@ -0,0 +1,2 @@ +minor_changes: +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/community.aws/pull/1810). diff --git a/plugins/module_utils/base.py b/plugins/module_utils/base.py index 6a549e47330..86b846c63be 100644 --- a/plugins/module_utils/base.py +++ b/plugins/module_utils/base.py @@ -136,7 +136,7 @@ def _inject_ratelimit_retries(self, model): def get_waiter(self, waiter_name): waiters = self._model.waiter_names if waiter_name not in waiters: - self.module.fail_json("Unable to find waiter {0}. Available_waiters: {1}".format(waiter_name, waiters)) + self.module.fail_json(f"Unable to find waiter {waiter_name}. Available_waiters: {waiters}") return botocore.waiter.create_waiter_with_client( waiter_name, self._model, @@ -183,11 +183,9 @@ def handler(_self, *args, **kwargs): try: return func(_self, *args, **kwargs) except botocore.exceptions.WaiterError as e: - _self.module.fail_json_aws( - e, msg="Failed waiting for {DESC}".format(DESC=description), **extra_ouput - ) + _self.module.fail_json_aws(e, msg=f"Failed waiting for {description}", **extra_ouput) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - _self.module.fail_json_aws(e, msg="Failed to {DESC}".format(DESC=description), **extra_ouput) + _self.module.fail_json_aws(e, msg=f"Failed to {description}", **extra_ouput) return handler @@ -356,7 +354,7 @@ def _set_resource_value(self, key, value, description=None, immutable=False): if immutable and self.original_resource: if description is None: description = key - self.module.fail_json(msg="{0} can not be updated after creation".format(description)) + self.module.fail_json(msg=f"{description} can not be updated after creation") self._resource_updates[key] = value self.changed = True return True diff --git a/plugins/module_utils/etag.py b/plugins/module_utils/etag.py index 978111ba63a..95c5ac94f81 100644 --- a/plugins/module_utils/etag.py +++ b/plugins/module_utils/etag.py @@ -54,11 +54,11 @@ def calculate_multipart_etag(source_path, chunk_size=DEFAULT_CHUNK_SIZE): md5s.append(md5) if len(md5s) == 1: - new_etag = '"{0}"'.format(md5s[0].hexdigest()) + new_etag = f'"{md5s[0].hexdigest()}"' else: # > 1 digests = b"".join(m.digest() for m in md5s) new_md5 = hashlib.md5(digests) - new_etag = '"{0}-{1}"'.format(new_md5.hexdigest(), len(md5s)) + new_etag = f'"{new_md5.hexdigest()}-{len(md5s)}"' return new_etag diff --git a/plugins/module_utils/networkfirewall.py b/plugins/module_utils/networkfirewall.py index cd4872907c6..3ebc84b81ae 100644 --- a/plugins/module_utils/networkfirewall.py +++ b/plugins/module_utils/networkfirewall.py @@ -546,7 +546,7 @@ def _set_metadata_value(self, key, value, description=None, immutable=False): if immutable and self.original_resource: if description is None: description = key - self.module.fail_json(msg="{0} can not be updated after creation".format(description)) + self.module.fail_json(msg=f"{description} can not be updated after creation") self._metadata_updates[key] = value self.changed = True return True @@ -785,7 +785,7 @@ def _set_rule_option(self, option_name, description, value, immutable=False, def if value == rule_options.get(option_name, default_value): return False if immutable and self.original_resource: - self.module.fail_json(msg="{0} can not be updated after creation".format(description)) + self.module.fail_json(msg=f"{description} can not be updated after creation") rule_options[option_name] = value @@ -834,7 +834,7 @@ def _set_rule_source(self, rule_type, rules): conflicting_rule_type = conflicting_types.intersection(current_keys) if conflicting_rule_type: self.module.fail_json( - "Unable to add {0} rules, {1} rules already set".format(rule_type, " and ".join(conflicting_rule_type)) + f"Unable to add {rule_type} rules, {' and '.join(conflicting_rule_type)} rules already set" ) original_rules = rules_source.get(rule_type, None) @@ -892,7 +892,7 @@ def set_domain_list(self, options): def _format_rule_options(self, options, sid): formatted_options = [] - opt = dict(Keyword="sid:{0}".format(sid)) + opt = dict(Keyword=f"sid:{sid}") formatted_options.append(opt) if options: for option in sorted(options.keys()): @@ -954,8 +954,7 @@ def _flush_create(self): rule_type = self.RULE_TYPES.intersection(set(rules_source.keys())) if len(rule_type) != 1: self.module.fail_json( - "Exactly one of rule strings, domain list or rule list" - " must be provided when creating a new rule group", + "Exactly one of rule strings, domain list or rule list must be provided when creating a new rule group", rule_type=rule_type, keys=self._resource_updates.keys(), types=self.RULE_TYPES, @@ -1168,7 +1167,7 @@ def _set_engine_option(self, option_name, description, value, immutable=False, d if value == engine_options.get(option_name, default_value): return False if immutable and self.original_resource: - self.module.fail_json(msg="{0} can not be updated after creation".format(description)) + self.module.fail_json(msg=f"{description} can not be updated after creation") engine_options[option_name] = value return self._set_resource_value("StatefulEngineOptions", engine_options) @@ -1207,7 +1206,7 @@ def set_default_actions(self, key, actions, valid_actions=None): invalid_actions = list(set(actions) - set(valid_actions or [])) if valid_actions and invalid_actions: self.module.fail_json( - msg="{0} contains invalid actions".format(key), + msg=f"{key} contains invalid actions", valid_actions=valid_actions, invalid_actions=invalid_actions, actions=actions, diff --git a/plugins/module_utils/opensearch.py b/plugins/module_utils/opensearch.py index b461669e3e2..2152a939827 100644 --- a/plugins/module_utils/opensearch.py +++ b/plugins/module_utils/opensearch.py @@ -34,7 +34,7 @@ def get_domain_status(client, module, domain_name): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get domain {0}".format(domain_name)) + module.fail_json_aws(e, msg=f"Couldn't get domain {domain_name}") return response["DomainStatus"] @@ -56,7 +56,7 @@ def get_domain_config(client, module, domain_name): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get domain {0}".format(domain_name)) + module.fail_json_aws(e, msg=f"Couldn't get domain {domain_name}") domain_config = {} arn = None if response is not None: @@ -96,7 +96,7 @@ def normalize_opensearch(client, module, domain): try: domain["Tags"] = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain["ARN"], aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain["domain_name"]) + module.fail_json_aws(e, f"Couldn't get tags for domain {domain['domain_name']}") except KeyError: module.fail_json(msg=str(domain)) @@ -203,7 +203,7 @@ def get_target_increment_version(client, module, domain_name, target_version): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws( e, - msg="Couldn't get compatible versions for domain {0}".format(domain_name), + msg=f"Couldn't get compatible versions for domain {domain_name}", ) compat = api_compatible_versions.get("CompatibleVersions") if compat is None: @@ -246,7 +246,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws(e, "Couldn't add tags to domain {0}".format(resource_arn)) + module.fail_json_aws(e, f"Couldn't add tags to domain {resource_arn}") if tags_to_remove: if module.check_mode: module.exit_json(changed=True, msg="Would have removed tags if not in check mode") @@ -256,5 +256,5 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws(e, "Couldn't remove tags from domain {0}".format(resource_arn)) + module.fail_json_aws(e, f"Couldn't remove tags from domain {resource_arn}") return changed diff --git a/plugins/module_utils/sns.py b/plugins/module_utils/sns.py index 8088b1b9ece..f4a32636c4f 100644 --- a/plugins/module_utils/sns.py +++ b/plugins/module_utils/sns.py @@ -46,12 +46,12 @@ def list_topic_subscriptions(client, module, topic_arn): # potentially AuthorizationError when listing subscriptions for third party topic return [sub for sub in _list_subscriptions_with_backoff(client) if sub["TopicArn"] == topic_arn] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn) + module.fail_json_aws(e, msg=f"Couldn't get subscriptions list for topic {topic_arn}") except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get subscriptions list for topic %s" % topic_arn) + module.fail_json_aws(e, msg=f"Couldn't get subscriptions list for topic {topic_arn}") def list_topics(client, module): @@ -65,7 +65,7 @@ def list_topics(client, module): def topic_arn_lookup(client, module, name): # topic names cannot have colons, so this captures the full topic name all_topics = list_topics(client, module) - lookup_topic = ":%s" % name + lookup_topic = f":{name}" for topic in all_topics: if topic.endswith(lookup_topic): return topic diff --git a/plugins/modules/acm_certificate.py b/plugins/modules/acm_certificate.py index 197124fb59e..4bf07f0321a 100644 --- a/plugins/modules/acm_certificate.py +++ b/plugins/modules/acm_certificate.py @@ -276,7 +276,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws(e, "Couldn't add tags to certificate {0}".format(resource_arn)) + module.fail_json_aws(e, f"Couldn't add tags to certificate {resource_arn}") if tags_to_remove and not module.check_mode: # remove_tags_from_certificate wants a list of key, value pairs, not a list of keys. tags_list = [{"Key": key, "Value": existing_tags.get(key)} for key in tags_to_remove] @@ -289,7 +289,7 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: - module.fail_json_aws(e, "Couldn't remove tags from certificate {0}".format(resource_arn)) + module.fail_json_aws(e, f"Couldn't remove tags from certificate {resource_arn}") new_tags = deepcopy(existing_tags) for key, value in tags_to_add.items(): new_tags[key] = value @@ -441,7 +441,7 @@ def ensure_certificates_present(client, module, acm, certificates, desired_tags, cert_arn = None changed = False if len(certificates) > 1: - msg = "More than one certificate with Name=%s exists in ACM in this region" % module.params["name_tag"] + msg = f"More than one certificate with Name={module.params['name_tag']} exists in ACM in this region" module.fail_json(msg=msg, certificates=certificates) elif len(certificates) == 1: # Update existing certificate that was previously imported to ACM. @@ -496,7 +496,7 @@ def main(): absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) < 1: for a in absent_args: - module.debug("%s is %s" % (a, module.params[a])) + module.debug(f"{a} is {module.params[a]}") module.fail_json( msg="If 'state' is specified as 'present' then at least one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" ) @@ -505,7 +505,7 @@ def main(): absent_args = ["certificate_arn", "domain_name", "name_tag"] if sum([(module.params[a] is not None) for a in absent_args]) != 1: for a in absent_args: - module.debug("%s is %s" % (a, module.params[a])) + module.debug(f"{a} is {module.params[a]}") module.fail_json( msg="If 'state' is specified as 'absent' then exactly one of 'name_tag', 'certificate_arn' or 'domain_name' must be specified" ) @@ -543,7 +543,7 @@ def main(): only_tags=filter_tags, ) - module.debug("Found %d corresponding certificates in ACM" % len(certificates)) + module.debug(f"Found {len(certificates)} corresponding certificates in ACM") if module.params["state"] == "present": ensure_certificates_present(client, module, acm, certificates, desired_tags, filter_tags) diff --git a/plugins/modules/acm_certificate_info.py b/plugins/modules/acm_certificate_info.py index 287e7006aef..420cd0e0f92 100644 --- a/plugins/modules/acm_certificate_info.py +++ b/plugins/modules/acm_certificate_info.py @@ -296,7 +296,7 @@ def main(): ) if module.params["certificate_arn"] and len(certificates) != 1: - module.fail_json(msg="No certificate exists in this region with ARN %s" % module.params["certificate_arn"]) + module.fail_json(msg=f"No certificate exists in this region with ARN {module.params['certificate_arn']}") module.exit_json(certificates=certificates) diff --git a/plugins/modules/api_gateway.py b/plugins/modules/api_gateway.py index 176404f644d..c63ad5f1582 100644 --- a/plugins/modules/api_gateway.py +++ b/plugins/modules/api_gateway.py @@ -248,7 +248,7 @@ def get_api_definitions(module, swagger_file=None, swagger_dict=None, swagger_te with open(swagger_file) as f: apidata = f.read() except OSError as e: - msg = "Failed trying to read swagger file {0}: {1}".format(str(swagger_file), str(e)) + msg = f"Failed trying to read swagger file {str(swagger_file)}: {str(e)}" module.fail_json(msg=msg, exception=traceback.format_exc()) if swagger_dict is not None: apidata = json.dumps(swagger_dict) @@ -281,7 +281,7 @@ def delete_rest_api(module, client, api_id): try: delete_response = delete_api(client, api_id) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg="deleting API {0}".format(api_id)) + module.fail_json_aws(e, msg=f"deleting API {api_id}") return delete_response @@ -299,7 +299,7 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): try: configure_response = configure_api(client, api_id, api_data=api_data) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - module.fail_json_aws(e, msg="configuring API {0}".format(api_id)) + module.fail_json_aws(e, msg=f"configuring API {api_id}") deploy_response = None @@ -308,7 +308,7 @@ def ensure_api_in_correct_state(module, client, api_id, api_data): try: deploy_response = create_deployment(client, api_id, **module.params) except (botocore.exceptions.ClientError, botocore.exceptions.EndpointConnectionError) as e: - msg = "deploying api {0} to stage {1}".format(api_id, stage) + msg = f"deploying api {api_id} to stage {stage}" module.fail_json_aws(e, msg) return configure_response, deploy_response diff --git a/plugins/modules/autoscaling_instance_refresh.py b/plugins/modules/autoscaling_instance_refresh.py index 5b9855d135d..86546fac21e 100644 --- a/plugins/modules/autoscaling_instance_refresh.py +++ b/plugins/modules/autoscaling_instance_refresh.py @@ -229,7 +229,7 @@ def start_or_cancel_instance_refresh(conn, module): result = dict(instance_refreshes=camel_dict_to_snake_dict(instance_refreshes["InstanceRefreshes"][0])) return module.exit_json(**result) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to {0} InstanceRefresh".format(asg_state.replace("ed", ""))) + module.fail_json_aws(e, msg=f"Failed to {asg_state.replace('ed', '')} InstanceRefresh") def main(): diff --git a/plugins/modules/autoscaling_policy.py b/plugins/modules/autoscaling_policy.py index f76ce74ceb1..67f7ccbd54b 100644 --- a/plugins/modules/autoscaling_policy.py +++ b/plugins/modules/autoscaling_policy.py @@ -447,7 +447,7 @@ def create_scaling_policy(connection, module): # it's only required if policy is SimpleScaling and state is present if not module.params["scaling_adjustment"]: module.fail_json( - msg="scaling_adjustment is required when policy_type is SimpleScaling " "and state is present" + msg="scaling_adjustment is required when policy_type is SimpleScaling and state is present" ) params["ScalingAdjustment"] = module.params["scaling_adjustment"] if module.params["cooldown"]: @@ -455,7 +455,7 @@ def create_scaling_policy(connection, module): elif policy_type == "StepScaling": if not module.params["step_adjustments"]: - module.fail_json(msg="step_adjustments is required when policy_type is StepScaling" "and state is present") + module.fail_json(msg="step_adjustments is required when policy_type is StepScaling and state is present") params["StepAdjustments"] = [] for step_adjustment in module.params["step_adjustments"]: step_adjust_params = dict(ScalingAdjustment=step_adjustment["scaling_adjustment"]) @@ -472,8 +472,7 @@ def create_scaling_policy(connection, module): elif policy_type == "TargetTrackingScaling": if not module.params["target_tracking_config"]: module.fail_json( - msg="target_tracking_config is required when policy_type is " - "TargetTrackingScaling and state is present" + msg="target_tracking_config is required when policy_type is TargetTrackingScaling and state is present" ) else: params["TargetTrackingConfiguration"] = build_target_specification( @@ -488,7 +487,7 @@ def create_scaling_policy(connection, module): aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") before = after = {} if not policies: @@ -512,7 +511,7 @@ def create_scaling_policy(connection, module): aws_retry=True, AutoScalingGroupName=asg_name, PolicyNames=[policy_name] )["ScalingPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") policy = camel_dict_to_snake_dict(policies[0]) # Backward compatible return values @@ -532,7 +531,7 @@ def delete_scaling_policy(connection, module): try: policy = connection.describe_policies(aws_retry=True, PolicyNames=[policy_name]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to obtain autoscaling policy %s" % policy_name) + module.fail_json_aws(e, msg=f"Failed to obtain autoscaling policy {policy_name}") if policy["ScalingPolicies"]: try: diff --git a/plugins/modules/batch_compute_environment.py b/plugins/modules/batch_compute_environment.py index ffc1f19b003..6bb2541e161 100644 --- a/plugins/modules/batch_compute_environment.py +++ b/plugins/modules/batch_compute_environment.py @@ -268,14 +268,11 @@ def validate_params(module): # validate compute environment name if not re.search(r"^[\w\_:]+$", compute_environment_name): module.fail_json( - msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters " - "and underscores.".format(compute_environment_name) + msg=f"Function compute_environment_name {compute_environment_name} is invalid. Names must contain only alphanumeric characters and underscores." ) if not compute_environment_name.startswith("arn:aws:batch:"): if len(compute_environment_name) > 128: - module.fail_json( - msg='compute_environment_name "{0}" exceeds 128 character limit'.format(compute_environment_name) - ) + module.fail_json(msg=f'compute_environment_name "{compute_environment_name}" exceeds 128 character limit') return diff --git a/plugins/modules/cloudformation_stack_set.py b/plugins/modules/cloudformation_stack_set.py index f825f2a63bf..17e888b4f1b 100644 --- a/plugins/modules/cloudformation_stack_set.py +++ b/plugins/modules/cloudformation_stack_set.py @@ -346,7 +346,7 @@ def create_stack_set(module, stack_params, cfn): cfn.create_stack_set(aws_retry=True, **stack_params) return await_stack_set_exists(cfn, stack_params["StackSetName"]) except (ClientError, BotoCoreError) as err: - module.fail_json_aws(err, msg="Failed to create stack set {0}.".format(stack_params.get("StackSetName"))) + module.fail_json_aws(err, msg=f"Failed to create stack set {stack_params.get('StackSetName')}.") def update_stack_set(module, stack_params, cfn): @@ -360,14 +360,19 @@ def update_stack_set(module, stack_params, cfn): except is_boto3_error_code("StackInstanceNotFound") as err: # pylint: disable=duplicate-except module.fail_json_aws( err, - msg="One or more stack instances were not found for this stack set. Double check " - "the `accounts` and `regions` parameters.", + msg=( + "One or more stack instances were not found for this stack set. Double check " + "the `accounts` and `regions` parameters." + ), ) except is_boto3_error_code("OperationInProgressException") as err: # pylint: disable=duplicate-except module.fail_json_aws( err, - msg="Another operation is already in progress on this stack set - please try again later. When making " - "multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op errors.", + msg=( + "Another operation is already in progress on this stack set - please try again later. When making" + " multiple cloudformation_stack_set calls, it's best to enable `wait: true` to avoid unfinished op" + " errors." + ), ) except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except module.fail_json_aws(err, msg="Could not update stack set.") @@ -436,9 +441,8 @@ def await_stack_set_operation(module, cfn, stack_set_name, operation_id, max_wai pass else: module.warn( - "Timed out waiting for operation {0} on stack set {1} after {2} seconds. Returning unfinished operation".format( - operation_id, stack_set_name, max_wait - ) + f"Timed out waiting for operation {operation_id} on stack set {stack_set_name} after {max_wait} seconds." + " Returning unfinished operation" ) @@ -456,9 +460,8 @@ def await_stack_instance_completion(module, cfn, stack_set_name, max_wait): time.sleep(15) module.warn( - "Timed out waiting for stack set {0} instances {1} to complete after {2} seconds. Returning unfinished operation".format( - stack_set_name, ", ".join(s["StackId"] for s in to_await), max_wait - ) + f"Timed out waiting for stack set {stack_set_name} instances {', '.join(s['StackId'] for s in to_await)} to" + f" complete after {max_wait} seconds. Returning unfinished operation" ) @@ -583,8 +586,10 @@ def main(): state = module.params["state"] if state == "present" and not module.params["accounts"]: module.fail_json( - msg="Can't create a stack set without choosing at least one account. " - "To get the ID of the current account, use the aws_caller_info module." + msg=( + "Can't create a stack set without choosing at least one account. " + "To get the ID of the current account, use the aws_caller_info module." + ) ) module.params["accounts"] = [to_native(a) for a in module.params["accounts"]] @@ -609,8 +614,10 @@ def main(): stack_params["UsePreviousTemplate"] = True else: module.fail_json( - msg="The Stack Set {0} does not exist, and no template was provided. Provide one of `template`, " - "`template_body`, or `template_url`".format(module.params["name"]) + msg=( + f"The Stack Set {module.params['name']} does not exist, and no template was provided. Provide one" + " of `template`, `template_body`, or `template_url`" + ) ) stack_params["Parameters"] = [] @@ -668,11 +675,11 @@ def main(): if state == "present": if not existing_stack_set: # on create this parameter has a different name, and cannot be referenced later in the job log - stack_params["ClientRequestToken"] = "Ansible-StackSet-Create-{0}".format(operation_uuid) + stack_params["ClientRequestToken"] = f"Ansible-StackSet-Create-{operation_uuid}" changed = True create_stack_set(module, stack_params, cfn) else: - stack_params["OperationId"] = "Ansible-StackSet-Update-{0}".format(operation_uuid) + stack_params["OperationId"] = f"Ansible-StackSet-Update-{operation_uuid}" operation_ids.append(stack_params["OperationId"]) if module.params.get("regions"): stack_params["OperationPreferences"] = get_operation_preferences(module) @@ -694,7 +701,7 @@ def main(): module.params["regions"], ) if new_stack_instances: - operation_ids.append("Ansible-StackInstance-Create-{0}".format(operation_uuid)) + operation_ids.append(f"Ansible-StackInstance-Create-{operation_uuid}") changed = True cfn.create_stack_instances( StackSetName=module.params["name"], @@ -704,7 +711,7 @@ def main(): OperationId=operation_ids[-1], ) else: - operation_ids.append("Ansible-StackInstance-Update-{0}".format(operation_uuid)) + operation_ids.append(f"Ansible-StackInstance-Update-{operation_uuid}") cfn.update_stack_instances( StackSetName=module.params["name"], Accounts=list(set(acct for acct, region in existing_stack_instances)), @@ -723,20 +730,20 @@ def main(): elif state == "absent": if not existing_stack_set: - module.exit_json(msg="Stack set {0} does not exist".format(module.params["name"])) + module.exit_json(msg=f"Stack set {module.params['name']} does not exist") if module.params.get("purge_stack_instances") is False: pass try: cfn.delete_stack_set( StackSetName=module.params["name"], ) - module.exit_json(msg="Stack set {0} deleted".format(module.params["name"])) + module.exit_json(msg=f"Stack set {module.params['name']} deleted") except is_boto3_error_code("OperationInProgressException") as e: # pylint: disable=duplicate-except module.fail_json_aws( - e, msg="Cannot delete stack {0} while there is an operation in progress".format(module.params["name"]) + e, msg=f"Cannot delete stack {module.params['name']} while there is an operation in progress" ) except is_boto3_error_code("StackSetNotEmptyException"): # pylint: disable=duplicate-except - delete_instances_op = "Ansible-StackInstance-Delete-{0}".format(operation_uuid) + delete_instances_op = f"Ansible-StackInstance-Delete-{operation_uuid}" cfn.delete_stack_instances( StackSetName=module.params["name"], Accounts=module.params["accounts"], @@ -768,7 +775,7 @@ def main(): msg="Could not purge all stacks, or not all accounts/regions were chosen for deletion: " + stack_states, ) - module.exit_json(changed=True, msg="Stack set {0} deleted".format(module.params["name"])) + module.exit_json(changed=True, msg=f"Stack set {module.params['name']} deleted") result.update(**describe_stack_tree(module, stack_params["StackSetName"], operation_ids=operation_ids)) if any(o["status"] == "FAILED" for o in result["operations"]): diff --git a/plugins/modules/cloudfront_distribution.py b/plugins/modules/cloudfront_distribution.py index 7b841c7f925..40bc15dac35 100644 --- a/plugins/modules/cloudfront_distribution.py +++ b/plugins/modules/cloudfront_distribution.py @@ -1463,7 +1463,7 @@ def ansible_list_to_cloudfront_list(list_items=None, include_quantity=True): if list_items is None: list_items = [] if not isinstance(list_items, list): - raise ValueError("Expected a list, got a {0} with value {1}".format(type(list_items).__name__, str(list_items))) + raise ValueError(f"Expected a list, got a {type(list_items).__name__} with value {str(list_items)}") result = {} if include_quantity: result["quantity"] = len(list_items) @@ -1491,7 +1491,7 @@ def delete_distribution(client, module, distribution): aws_retry=True, Id=distribution["Distribution"]["Id"], IfMatch=distribution["ETag"] ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error deleting distribution %s" % to_native(distribution["Distribution"])) + module.fail_json_aws(e, msg=f"Error deleting distribution {to_native(distribution['Distribution'])}") def update_distribution(client, module, config, distribution_id, e_tag): @@ -1500,7 +1500,7 @@ def update_distribution(client, module, config, distribution_id, e_tag): "Distribution" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Error updating distribution to %s" % to_native(config)) + module.fail_json_aws(e, msg=f"Error updating distribution to {to_native(config)}") def tag_resource(client, module, arn, tags): @@ -1721,13 +1721,11 @@ def validate_logging(self, logging): def validate_is_list(self, list_to_validate, list_name): if not isinstance(list_to_validate, list): - self.module.fail_json( - msg="%s is of type %s. Must be a list." % (list_name, type(list_to_validate).__name__) - ) + self.module.fail_json(msg=f"{list_name} is of type {type(list_to_validate).__name__}. Must be a list.") def validate_required_key(self, key_name, full_key_name, dict_object): if key_name not in dict_object: - self.module.fail_json(msg="%s must be specified." % full_key_name) + self.module.fail_json(msg=f"{full_key_name} must be specified.") def validate_origins( self, @@ -1781,8 +1779,8 @@ def validate_s3_origin_configuration(self, client, existing_config, origin): return existing_config["s3_origin_config"]["origin_access_identity"] try: - comment = "access-identity-by-ansible-%s-%s" % (origin.get("domain_name"), self.__default_datetime_string) - caller_reference = "%s-%s" % (origin.get("domain_name"), self.__default_datetime_string) + comment = f"access-identity-by-ansible-{origin.get('domain_name')}-{self.__default_datetime_string}" + caller_reference = f"{origin.get('domain_name')}-{self.__default_datetime_string}" cfoai_config = dict( CloudFrontOriginAccessIdentityConfig=dict(CallerReference=caller_reference, Comment=comment) ) @@ -1790,8 +1788,8 @@ def validate_s3_origin_configuration(self, client, existing_config, origin): "Id" ] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Couldn't create Origin Access Identity for id %s" % origin["id"]) - return "origin-access-identity/cloudfront/%s" % oai + self.module.fail_json_aws(e, msg=f"Couldn't create Origin Access Identity for id {origin['id']}") + return f"origin-access-identity/cloudfront/{oai}" def validate_origin(self, client, existing_config, origin, default_origin_path): try: @@ -1948,9 +1946,9 @@ def validate_cache_behavior_first_level_keys(self, config, cache_behavior, valid if is_default_cache: cache_behavior_name = "Default cache behavior" else: - cache_behavior_name = "Cache behavior for path %s" % cache_behavior["path_pattern"] + cache_behavior_name = f"Cache behavior for path {cache_behavior['path_pattern']}" self.module.fail_json( - msg="%s has target_origin_id pointing to an origin that does not exist." % cache_behavior_name + msg=f"{cache_behavior_name} has target_origin_id pointing to an origin that does not exist." ) cache_behavior["target_origin_id"] = target_origin_id cache_behavior = self.add_key_else_validate( @@ -2262,21 +2260,15 @@ def validate_attribute_list_with_allowed_list(self, attribute_list, attribute_li or isinstance(allowed_list, set) and not set(allowed_list).issuperset(attribute_list) ): - self.module.fail_json( - msg="The attribute list {0} must be one of [{1}]".format( - attribute_list_name, " ".join(str(a) for a in allowed_list) - ) - ) + attribute_list = " ".join(str(a) for a in allowed_list) + self.module.fail_json(msg=f"The attribute list {attribute_list_name} must be one of [{attribute_list}]") except Exception as e: self.module.fail_json_aws(e, msg="Error validating attribute list with allowed value list") def validate_attribute_with_allowed_values(self, attribute, attribute_name, allowed_list): if attribute is not None and attribute not in allowed_list: - self.module.fail_json( - msg="The attribute {0} must be one of [{1}]".format( - attribute_name, " ".join(str(a) for a in allowed_list) - ) - ) + attribute_list = " ".join(str(a) for a in allowed_list) + self.module.fail_json(msg=f"The attribute {attribute_name} must be one of [{attribute_list}]") def validate_distribution_from_caller_reference(self, caller_reference): try: @@ -2333,12 +2325,11 @@ def wait_until_processed(self, client, wait_timeout, distribution_id, caller_ref except botocore.exceptions.WaiterError as e: self.module.fail_json_aws( e, - msg="Timeout waiting for CloudFront action." - " Waited for {0} seconds before timeout.".format(to_text(wait_timeout)), + msg=f"Timeout waiting for CloudFront action. Waited for {to_text(wait_timeout)} seconds before timeout.", ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Error getting distribution {0}".format(distribution_id)) + self.module.fail_json_aws(e, msg=f"Error getting distribution {distribution_id}") def main(): diff --git a/plugins/modules/codebuild_project.py b/plugins/modules/codebuild_project.py index 6a910799d88..69fd2e463b5 100644 --- a/plugins/modules/codebuild_project.py +++ b/plugins/modules/codebuild_project.py @@ -310,8 +310,7 @@ class CodeBuildAnsibleAWSError(AnsibleAWSError): def do_create_project(client, params, formatted_params): if params["source"] is None or params["artifacts"] is None: raise CodeBuildAnsibleAWSError( - message="The source and artifacts parameters must be provided " - "when creating a new project. No existing project was found." + message="The source and artifacts parameters must be provided when creating a new project. No existing project was found." ) if params["tags"] is not None: diff --git a/plugins/modules/codepipeline.py b/plugins/modules/codepipeline.py index 7e0baf3fd65..9fb42643df4 100644 --- a/plugins/modules/codepipeline.py +++ b/plugins/modules/codepipeline.py @@ -216,7 +216,7 @@ def create_pipeline(client, name, role_arn, artifact_store, stages, version, mod resp = client.create_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable create pipeline {0}".format(pipeline_dict["name"])) + module.fail_json_aws(e, msg=f"Unable create pipeline {pipeline_dict['name']}") def update_pipeline(client, pipeline_dict, module): @@ -224,7 +224,7 @@ def update_pipeline(client, pipeline_dict, module): resp = client.update_pipeline(pipeline=pipeline_dict) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable update pipeline {0}".format(pipeline_dict["name"])) + module.fail_json_aws(e, msg=f"Unable update pipeline {pipeline_dict['name']}") def delete_pipeline(client, name, module): @@ -232,7 +232,7 @@ def delete_pipeline(client, name, module): resp = client.delete_pipeline(name=name) return resp except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable delete pipeline {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable delete pipeline {name}") def describe_pipeline(client, name, version, module): diff --git a/plugins/modules/config_delivery_channel.py b/plugins/modules/config_delivery_channel.py index dc03a95f719..c54fb36c05c 100644 --- a/plugins/modules/config_delivery_channel.py +++ b/plugins/modules/config_delivery_channel.py @@ -164,8 +164,7 @@ def update_resource(client, module, params, result): except is_boto3_error_code("InsufficientDeliveryPolicyException") as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="The `s3_prefix` or `s3_bucket` parameter is invalid. " - "Make sure the bucket exists and is available", + msg="The `s3_prefix` or `s3_bucket` parameter is invalid. Make sure the bucket exists and is available", ) except ( botocore.exceptions.ClientError, diff --git a/plugins/modules/data_pipeline.py b/plugins/modules/data_pipeline.py index d30be5c847d..4b602708163 100644 --- a/plugins/modules/data_pipeline.py +++ b/plugins/modules/data_pipeline.py @@ -271,7 +271,7 @@ def pipeline_field(client, dp_id, field): for field_key in dp_description["pipelineDescriptionList"][0]["fields"]: if field_key["key"] == field: return field_key["stringValue"] - raise KeyError("Field key {0} not found!".format(field)) + raise KeyError(f"Field key {field} not found!") def run_with_timeout(timeout, func, *func_args, **func_kwargs): @@ -350,7 +350,7 @@ def activate_pipeline(client, module): try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg="Data Pipeline {0} not found".format(dp_name)) + module.fail_json(msg=f"Data Pipeline {dp_name} not found") if pipeline_field(client, dp_id, field="@pipelineState") in DP_ACTIVE_STATES: changed = False @@ -388,7 +388,7 @@ def deactivate_pipeline(client, module): try: dp_id = pipeline_id(client, dp_name) except DataPipelineNotFound: - module.fail_json(msg="Data Pipeline {0} not found".format(dp_name)) + module.fail_json(msg=f"Data Pipeline {dp_name} not found") if pipeline_field(client, dp_id, field="@pipelineState") in DP_INACTIVE_STATES: changed = False @@ -527,7 +527,7 @@ def define_pipeline(client, module, objects, dp_id): dp_name = module.params.get("name") if pipeline_field(client, dp_id, field="@pipelineState") == "FINISHED": - msg = "Data Pipeline {0} is unable to be updated while in state FINISHED.".format(dp_name) + msg = f"Data Pipeline {dp_name} is unable to be updated while in state FINISHED." changed = False elif objects: @@ -538,14 +538,16 @@ def define_pipeline(client, module, objects, dp_id): client.put_pipeline_definition( pipelineId=dp_id, pipelineObjects=objects, parameterObjects=parameters, parameterValues=values ) - msg = "Data Pipeline {0} has been updated.".format(dp_name) + msg = f"Data Pipeline {dp_name} has been updated." changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( e, - msg=f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields" - "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" - "objects", + msg=( + f"Failed to put the definition for pipeline {dp_name}. Check that string/reference fields" + "are not empty and that the number of objects in the pipeline does not exceed maximum allowed" + "objects" + ), ) else: changed = False diff --git a/plugins/modules/directconnect_confirm_connection.py b/plugins/modules/directconnect_confirm_connection.py index e8e0f2c6b08..870e459327d 100644 --- a/plugins/modules/directconnect_confirm_connection.py +++ b/plugins/modules/directconnect_confirm_connection.py @@ -87,7 +87,7 @@ def find_connection_id(client, connection_id=None, connection_name=None): response = describe_connections(client, params) except (BotoCoreError, ClientError) as e: if connection_id: - msg = "Failed to describe DirectConnect ID {0}".format(connection_id) + msg = f"Failed to describe DirectConnect ID {connection_id}" else: msg = "Failed to describe DirectConnect connections" raise DirectConnectError( @@ -117,7 +117,7 @@ def get_connection_state(client, connection_id): return response["connections"][0]["connectionState"] except (BotoCoreError, ClientError, IndexError) as e: raise DirectConnectError( - msg="Failed to describe DirectConnect connection {0} state".format(connection_id), + msg=f"Failed to describe DirectConnect connection {connection_id} state", last_traceback=traceback.format_exc(), exception=e, ) diff --git a/plugins/modules/directconnect_connection.py b/plugins/modules/directconnect_connection.py index 176d83392d4..fd55a3b5291 100644 --- a/plugins/modules/directconnect_connection.py +++ b/plugins/modules/directconnect_connection.py @@ -187,7 +187,7 @@ def connection_exists(client, connection_id=None, connection_name=None, verify=T response = AWSRetry.jittered_backoff(**retry_params)(client.describe_connections)(**params) except (BotoCoreError, ClientError) as e: if connection_id: - msg = "Failed to describe DirectConnect ID {0}".format(connection_id) + msg = f"Failed to describe DirectConnect ID {connection_id}" else: msg = "Failed to describe DirectConnect connections" raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) @@ -233,7 +233,7 @@ def create_connection(client, location, bandwidth, name, lag_id): connection = AWSRetry.jittered_backoff(**retry_params)(client.create_connection)(**params) except (BotoCoreError, ClientError) as e: raise DirectConnectError( - msg="Failed to create DirectConnect connection {0}".format(name), + msg=f"Failed to create DirectConnect connection {name}", last_traceback=traceback.format_exc(), exception=e, ) diff --git a/plugins/modules/directconnect_link_aggregation_group.py b/plugins/modules/directconnect_link_aggregation_group.py index 9a532c63298..57907c93bb9 100644 --- a/plugins/modules/directconnect_link_aggregation_group.py +++ b/plugins/modules/directconnect_link_aggregation_group.py @@ -250,7 +250,7 @@ def create_lag(client, num_connections, location, bandwidth, name, connection_id lag = client.create_lag(**parameters) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to create DirectConnect link aggregation group {0}".format(name), + msg=f"Failed to create DirectConnect link aggregation group {name}", last_traceback=traceback.format_exc(), exception=e, ) @@ -263,7 +263,7 @@ def delete_lag(client, lag_id): client.delete_lag(lagId=lag_id) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to delete Direct Connect link aggregation group {0}.".format(lag_id), + msg=f"Failed to delete Direct Connect link aggregation group {lag_id}.", last_traceback=traceback.format_exc(), exception=e, ) @@ -285,8 +285,7 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ if min_links and min_links > num_connections: raise DirectConnectError( - msg="The number of connections {0} must be greater than the minimum number of links " - "{1} to update the LAG {2}".format(num_connections, min_links, lag_id), + msg=f"The number of connections {num_connections} must be greater than the minimum number of links {min_links} to update the LAG {lag_id}", last_traceback=None, exception=None, ) @@ -297,13 +296,9 @@ def update_lag(client, lag_id, lag_name, min_links, num_connections, wait, wait_ except botocore.exceptions.ClientError as e: if wait and time.time() - start <= wait_timeout: continue - msg = "Failed to update Direct Connect link aggregation group {0}.".format(lag_id) + msg = f"Failed to update Direct Connect link aggregation group {lag_id}." if "MinimumLinks cannot be set higher than the number of connections" in e.response["Error"]["Message"]: - msg += ( - "Unable to set the min number of links to {0} while the LAG connections are being requested".format( - min_links - ) - ) + msg += f"Unable to set the min number of links to {min_links} while the LAG connections are being requested" raise DirectConnectError(msg=msg, last_traceback=traceback.format_exc(), exception=e) else: break @@ -320,7 +315,7 @@ def ensure_present( exists = lag_exists(client, lag_id, lag_name) if not exists and lag_id: raise DirectConnectError( - msg="The Direct Connect link aggregation group {0} does not exist.".format(lag_id), + msg=f"The Direct Connect link aggregation group {lag_id} does not exist.", last_traceback=None, exception="", ) @@ -346,7 +341,7 @@ def describe_virtual_interfaces(client, lag_id): response = client.describe_virtual_interfaces(connectionId=lag_id) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Failed to describe any virtual interfaces associated with LAG: {0}".format(lag_id), + msg=f"Failed to describe any virtual interfaces associated with LAG: {lag_id}", last_traceback=traceback.format_exc(), exception=e, ) @@ -366,7 +361,7 @@ def disassociate_vis(client, lag_id, virtual_interfaces): response = client.delete_virtual_interface(virtualInterfaceId=vi["virtualInterfaceId"]) except botocore.exceptions.ClientError as e: raise DirectConnectError( - msg="Could not delete virtual interface {0} to delete link aggregation group {1}.".format(vi, lag_id), + msg=f"Could not delete virtual interface {vi} to delete link aggregation group {lag_id}.", last_traceback=traceback.format_exc(), exception=e, ) @@ -385,10 +380,13 @@ def ensure_absent(client, lag_id, lag_name, force_delete, delete_with_disassocia # If min_links is not 0, there are associated connections, or if there are virtual interfaces, ask for force_delete if any((latest_status["minimumLinks"], virtual_interfaces, connections)) and not force_delete: raise DirectConnectError( - msg="There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG {0}. " - "To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces they will be deleted). " - "Optionally, to ensure hosted connections are deleted after disassociation use delete_with_disassociation: True " - "and wait: True (as Virtual Interfaces may take a few moments to delete)".format(lag_id), + msg=( + "There are a minimum number of links, hosted connections, or associated virtual interfaces for LAG" + f" {lag_id}. To force deletion of the LAG use delete_force: True (if the LAG has virtual interfaces" + " they will be deleted). Optionally, to ensure hosted connections are deleted after disassociation use" + " delete_with_disassociation: True and wait: True (as Virtual Interfaces may take a few moments to" + " delete)" + ), last_traceback=None, exception=None, ) diff --git a/plugins/modules/directconnect_virtual_interface.py b/plugins/modules/directconnect_virtual_interface.py index ab6ee9d4ea4..ec0c87099a4 100644 --- a/plugins/modules/directconnect_virtual_interface.py +++ b/plugins/modules/directconnect_virtual_interface.py @@ -361,7 +361,7 @@ def vi_state(client, virtual_interface_id): """ Returns the state of the virtual interface. """ - err_msg = "Failed to describe virtual interface: {0}".format(virtual_interface_id) + err_msg = f"Failed to describe virtual interface: {virtual_interface_id}" vi = try_except_ClientError(failure_msg=err_msg)(client.describe_virtual_interfaces)( virtualInterfaceId=virtual_interface_id ) @@ -435,7 +435,7 @@ def modify_vi(client, virtual_interface_id, connection_id): """ Associate a new connection ID """ - err_msg = "Unable to associate {0} with virtual interface {1}".format(connection_id, virtual_interface_id) + err_msg = f"Unable to associate {connection_id} with virtual interface {virtual_interface_id}" try_except_ClientError(failure_msg=err_msg)(client.associate_virtual_interface)( virtualInterfaceId=virtual_interface_id, connectionId=connection_id ) @@ -460,15 +460,15 @@ def ensure_state(connection, module): if virtual_interface_id is False: module.fail_json( - msg="Multiple virtual interfaces were found. Use the virtual_interface_id, name, " - "and connection_id options if applicable to find a unique match." + msg=( + "Multiple virtual interfaces were found. Use the virtual_interface_id, name, " + "and connection_id options if applicable to find a unique match." + ) ) if state == "present": if not virtual_interface_id and module.params["virtual_interface_id"]: - module.fail_json( - msg="The virtual interface {0} does not exist.".format(module.params["virtual_interface_id"]) - ) + module.fail_json(msg=f"The virtual interface {module.params['virtual_interface_id']} does not exist.") elif not virtual_interface_id: assembled_params = assemble_params_for_creating_vi(module.params) diff --git a/plugins/modules/dynamodb_table.py b/plugins/modules/dynamodb_table.py index a9503735557..5be7a4b9c43 100644 --- a/plugins/modules/dynamodb_table.py +++ b/plugins/modules/dynamodb_table.py @@ -660,7 +660,7 @@ def _generate_global_indexes(billing_mode): continue name = index.get("name") if name in index_exists: - module.fail_json(msg="Duplicate key {0} in list of global indexes".format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of global indexes") # Convert the type name to upper case and remove the global_ index["type"] = index["type"].upper()[7:] index = _generate_index(index, include_throughput) @@ -680,7 +680,7 @@ def _generate_local_indexes(): continue name = index.get("name") if name in index_exists: - module.fail_json(msg="Duplicate key {0} in list of local indexes".format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of local indexes") index["type"] = index["type"].upper() index = _generate_index(index, False) index_exists[name] = True @@ -697,7 +697,7 @@ def _generate_global_index_map(current_table): continue name = index.get("name") if name in global_index_map: - module.fail_json(msg="Duplicate key {0} in list of global indexes".format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of global indexes") idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case and remove the global_ idx["type"] = idx["type"].upper()[7:] @@ -713,7 +713,7 @@ def _generate_local_index_map(current_table): continue name = index.get("name") if name in local_index_map: - module.fail_json(msg="Duplicate key {0} in list of local indexes".format(name)) + module.fail_json(msg=f"Duplicate key {name} in list of local indexes") idx = _merge_index_params(index, existing_indexes.get(name, {})) # Convert the type name to upper case idx["type"] = idx["type"].upper() @@ -734,8 +734,8 @@ def _generate_index(index, include_throughput=True): else: if non_key_attributes: module.fail_json( - "DynamoDB does not support specifying non-key-attributes ('includes') for " - "indexes of type 'all'. Index name: {0}".format(index["name"]) + "DynamoDB does not support specifying non-key-attributes ('includes') for indexes of type 'all'. Index" + f" name: {index['name']}" ) idx = dict( @@ -919,9 +919,7 @@ def update_table(current_table): primary_index_changes = _primary_index_changes(current_table) if primary_index_changes: module.fail_json( - "DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {0}".format( - primary_index_changes - ) + f"DynamoDB does not support updating the Primary keys on a table. Changed paramters are: {primary_index_changes}" ) changed = False diff --git a/plugins/modules/ec2_ami_copy.py b/plugins/modules/ec2_ami_copy.py index 5d7e49bde90..170a564e15d 100644 --- a/plugins/modules/ec2_ami_copy.py +++ b/plugins/modules/ec2_ami_copy.py @@ -171,7 +171,7 @@ def copy_image(module, ec2): try: if module.params.get("tag_equality"): - filters = [{"Name": "tag:%s" % k, "Values": [v]} for (k, v) in module.params.get("tags").items()] + filters = [{"Name": f"tag:{k}", "Values": [v]} for (k, v) in module.params.get("tags").items()] filters.append(dict(Name="state", Values=["available", "pending"])) images = ec2.describe_images(Filters=filters) if len(images["Images"]) > 0: @@ -197,7 +197,7 @@ def copy_image(module, ec2): except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Could not copy AMI") except Exception as e: - module.fail_json(msg="Unhandled exception. (%s)" % to_native(e)) + module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") def main(): diff --git a/plugins/modules/ec2_launch_template.py b/plugins/modules/ec2_launch_template.py index 8e1240d285f..01d36ccc57c 100644 --- a/plugins/modules/ec2_launch_template.py +++ b/plugins/modules/ec2_launch_template.py @@ -453,13 +453,11 @@ def determine_iam_role(module, name_or_arn): role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) return {"arn": role["InstanceProfile"]["Arn"]} except is_boto3_error_code("NoSuchEntity") as e: - module.fail_json_aws(e, msg="Could not find instance_role {0}".format(name_or_arn)) + module.fail_json_aws(e, msg=f"Could not find instance_role {name_or_arn}") except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="An error occurred while searching for instance_role {0}. Please try supplying the full ARN.".format( - name_or_arn - ), + msg=f"An error occurred while searching for instance_role {name_or_arn}. Please try supplying the full ARN.", ) @@ -481,15 +479,18 @@ def existing_templates(module): except is_boto3_error_code("InvalidLaunchTemplateId.Malformed") as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="Launch template with ID {0} is not a valid ID. It should start with `lt-....`".format( - module.params.get("launch_template_id") + msg=( + f"Launch template with ID {module.params.get('launch_template_id')} is not a valid ID. It should start" + " with `lt-....`" ), ) except is_boto3_error_code("InvalidLaunchTemplateId.NotFoundException") as e: # pylint: disable=duplicate-except module.fail_json_aws( e, - msg="Launch template with ID {0} could not be found, please supply a name " - "instead so that a new template can be created".format(module.params.get("launch_template_id")), + msg=( + f"Launch template with ID {module.params.get('launch_template_id')} could not be found, please supply a" + " name instead so that a new template can be created" + ), ) except (ClientError, BotoCoreError, WaiterError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Could not check existing launch templates. This may be an IAM permission problem.") @@ -510,9 +511,7 @@ def existing_templates(module): except (ClientError, BotoCoreError, WaiterError) as e: module.fail_json_aws( e, - msg="Could not find launch template versions for {0} (ID: {1}).".format( - template["LaunchTemplateName"], template_id - ), + msg=f"Could not find launch template versions for {template['LaunchTemplateName']} (ID: {template_id}).", ) @@ -547,10 +546,8 @@ def delete_template(module): ) if v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"]: module.warn( - "Failed to delete template versions {0} on launch template {1}".format( - v_resp["UnsuccessfullyDeletedLaunchTemplateVersions"], - template["LaunchTemplateId"], - ) + f"Failed to delete template versions {v_resp['UnsuccessfullyDeletedLaunchTemplateVersions']} on" + f" launch template {template['LaunchTemplateId']}" ) deleted_versions = [ camel_dict_to_snake_dict(v) for v in v_resp["SuccessfullyDeletedLaunchTemplateVersions"] @@ -558,9 +555,7 @@ def delete_template(module): except (ClientError, BotoCoreError) as e: module.fail_json_aws( e, - msg="Could not delete existing versions of the launch template {0}".format( - template["LaunchTemplateId"] - ), + msg=f"Could not delete existing versions of the launch template {template['LaunchTemplateId']}", ) try: resp = ec2.delete_launch_template( @@ -568,7 +563,7 @@ def delete_template(module): aws_retry=True, ) except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not delete launch template {0}".format(template["LaunchTemplateId"])) + module.fail_json_aws(e, msg=f"Could not delete launch template {template['LaunchTemplateId']}") return { "deleted_versions": deleted_versions, "deleted_template": camel_dict_to_snake_dict(resp["LaunchTemplate"]), @@ -647,9 +642,7 @@ def create_or_update(module, template_options): int(module.params.get("source_version")) except ValueError: module.fail_json( - msg='source_version param was not a valid integer, got "{0}"'.format( - module.params.get("source_version") - ) + msg=f"source_version param was not a valid integer, got \"{module.params.get('source_version')}\"" ) # get source template version source_version = next( @@ -658,7 +651,7 @@ def create_or_update(module, template_options): ) if source_version is None: module.fail_json( - msg='source_version does not exist, got "{0}"'.format(module.params.get("source_version")) + msg=f"source_version does not exist, got \"{module.params.get('source_version')}\"" ) resp = ec2.create_launch_template_version( LaunchTemplateId=template["LaunchTemplateId"], @@ -684,9 +677,7 @@ def create_or_update(module, template_options): int(module.params.get("default_version")) except ValueError: module.fail_json( - msg='default_version param was not a valid integer, got "{0}"'.format( - module.params.get("default_version") - ) + msg=f"default_version param was not a valid integer, got \"{module.params.get('default_version')}\"" ) set_default = ec2.modify_launch_template( LaunchTemplateId=template["LaunchTemplateId"], @@ -863,7 +854,7 @@ def main(): elif module.params.get("state") == "absent": out = delete_template(module) else: - module.fail_json(msg='Unsupported value "{0}" for `state` parameter'.format(module.params.get("state"))) + module.fail_json(msg=f"Unsupported value \"{module.params.get('state')}\" for `state` parameter") module.exit_json(**out) diff --git a/plugins/modules/ec2_placement_group.py b/plugins/modules/ec2_placement_group.py index 4e1967c846d..ccdd7d54785 100644 --- a/plugins/modules/ec2_placement_group.py +++ b/plugins/modules/ec2_placement_group.py @@ -120,7 +120,7 @@ def search_placement_group(connection, module): try: response = connection.describe_placement_groups(Filters=[{"Name": "group-name", "Values": [name]}]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find placement group named [%s]" % name) + module.fail_json_aws(e, msg=f"Couldn't find placement group named [{name}]") if len(response["PlacementGroups"]) != 1: return None @@ -178,7 +178,7 @@ def create_placement_group(connection, module): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't create placement group [%s]" % name) + module.fail_json_aws(e, msg=f"Couldn't create placement group [{name}]") module.exit_json(changed=True, placement_group=get_placement_group_information(connection, name)) @@ -190,7 +190,7 @@ def delete_placement_group(connection, module): try: connection.delete_placement_group(GroupName=name, DryRun=module.check_mode) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete placement group [%s]" % name) + module.fail_json_aws(e, msg=f"Couldn't delete placement group [{name}]") module.exit_json(changed=True) @@ -220,9 +220,7 @@ def main(): else: name = module.params.get("name") module.fail_json( - msg=("Placement group '{}' exists, can't change strategy" + " from '{}' to '{}'").format( - name, placement_group["strategy"], strategy - ) + msg=f"Placement group '{name}' exists, can't change strategy from '{placement_group['strategy']}' to '{strategy}'" ) elif state == "absent": diff --git a/plugins/modules/ec2_placement_group_info.py b/plugins/modules/ec2_placement_group_info.py index 970cd302636..75cbc72585c 100644 --- a/plugins/modules/ec2_placement_group_info.py +++ b/plugins/modules/ec2_placement_group_info.py @@ -95,7 +95,7 @@ def get_placement_groups_details(connection, module): else: response = connection.describe_placement_groups() except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't find placement groups named [%s]" % names) + module.fail_json_aws(e, msg=f"Couldn't find placement groups named [{names}]") results = [] for placement_group in response["PlacementGroups"]: diff --git a/plugins/modules/ec2_transit_gateway.py b/plugins/modules/ec2_transit_gateway.py index 8c6282d0b0f..9b50cb21b9c 100644 --- a/plugins/modules/ec2_transit_gateway.py +++ b/plugins/modules/ec2_transit_gateway.py @@ -327,7 +327,7 @@ def get_matching_tgw(self, tgw_id, description=None, skip_deleted=True): if len(tgws) > 1: self._module.fail_json( - msg="EC2 returned more than one transit Gateway for description {0}, aborting".format(description) + msg=f"EC2 returned more than one transit Gateway for description {description}, aborting" ) elif tgws: tgw = camel_dict_to_snake_dict(tgws[0], ignore_list=["Tags"]) @@ -375,7 +375,7 @@ def create_tgw(self, description): else: result = self.get_matching_tgw(tgw_id=tgw_id) - self._results["msg"] = " Transit gateway {0} created".format(result["transit_gateway_id"]) + self._results["msg"] = f" Transit gateway {result['transit_gateway_id']} created" return result @@ -401,7 +401,7 @@ def delete_tgw(self, tgw_id): else: result = self.get_matching_tgw(tgw_id=tgw_id, skip_deleted=False) - self._results["msg"] = " Transit gateway {0} deleted".format(tgw_id) + self._results["msg"] = f" Transit gateway {tgw_id} deleted" return result diff --git a/plugins/modules/ec2_vpc_egress_igw.py b/plugins/modules/ec2_vpc_egress_igw.py index b15bec20f06..0a309b4863c 100644 --- a/plugins/modules/ec2_vpc_egress_igw.py +++ b/plugins/modules/ec2_vpc_egress_igw.py @@ -89,9 +89,7 @@ def delete_eigw(module, connection, eigw_id): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws( - e, msg="Could not delete Egress-Only Internet Gateway {0} from VPC {1}".format(eigw_id, module.vpc_id) - ) + module.fail_json_aws(e, msg=f"Could not delete Egress-Only Internet Gateway {eigw_id} from VPC {module.vpc_id}") if not module.check_mode: changed = response.get("ReturnCode", False) @@ -119,12 +117,12 @@ def create_eigw(module, connection, vpc_id): # We need to catch the error and return something valid changed = True except is_boto3_error_code("InvalidVpcID.NotFound") as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="invalid vpc ID '{0}' provided".format(vpc_id)) + module.fail_json_aws(e, msg=f"invalid vpc ID '{vpc_id}' provided") except ( botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not create Egress-Only Internet Gateway for vpc ID {0}".format(vpc_id)) + module.fail_json_aws(e, msg=f"Could not create Egress-Only Internet Gateway for vpc ID {vpc_id}") if not module.check_mode: gateway = response.get("EgressOnlyInternetGateway", {}) @@ -136,9 +134,7 @@ def create_eigw(module, connection, vpc_id): else: # EIGW gave back a bad attachment state or an invalid response so we error out module.fail_json( - msg="Unable to create and attach Egress Only Internet Gateway to VPCId: {0}. Bad or no state in response".format( - vpc_id - ), + msg=f"Unable to create and attach Egress Only Internet Gateway to VPCId: {vpc_id}. Bad or no state in response", **camel_dict_to_snake_dict(response), ) diff --git a/plugins/modules/ec2_vpc_nacl_info.py b/plugins/modules/ec2_vpc_nacl_info.py index ecf530a9d74..40e0398b974 100644 --- a/plugins/modules/ec2_vpc_nacl_info.py +++ b/plugins/modules/ec2_vpc_nacl_info.py @@ -137,7 +137,7 @@ def list_ec2_vpc_nacls(connection, module): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids)) + module.fail_json_aws(e, msg=f"Unable to describe network ACLs {nacl_ids}") # Turn the boto3 result in to ansible_friendly_snaked_names snaked_nacls = [] diff --git a/plugins/modules/ec2_vpc_vgw.py b/plugins/modules/ec2_vpc_vgw.py index e59fe25839c..3ca4d8013e3 100644 --- a/plugins/modules/ec2_vpc_vgw.py +++ b/plugins/modules/ec2_vpc_vgw.py @@ -274,7 +274,7 @@ def create_vgw(client, module): get_waiter(client, "vpn_gateway_exists").wait(VpnGatewayIds=[response["VpnGateway"]["VpnGatewayId"]]) except botocore.exceptions.WaiterError as e: module.fail_json_aws( - e, msg="Failed to wait for Vpn Gateway {0} to be available".format(response["VpnGateway"]["VpnGatewayId"]) + e, msg=f"Failed to wait for Vpn Gateway {response['VpnGateway']['VpnGatewayId']} to be available" ) except is_boto3_error_code("VpnGatewayLimitExceeded") as e: module.fail_json_aws(e, msg="Too many VPN gateways exist in this account.") diff --git a/plugins/modules/ec2_vpc_vpn.py b/plugins/modules/ec2_vpc_vpn.py index 8d8dc1467e1..0efce4a7470 100644 --- a/plugins/modules/ec2_vpc_vpn.py +++ b/plugins/modules/ec2_vpc_vpn.py @@ -384,7 +384,7 @@ def add_routes(connection, vpn_connection_id, routes_to_add): ) except (BotoCoreError, ClientError) as e: raise VPNConnectionException( - msg="Failed while adding route {0} to the VPN connection {1}.".format(route, vpn_connection_id), + msg=f"Failed while adding route {route} to the VPN connection {vpn_connection_id}.", exception=e, ) @@ -397,7 +397,7 @@ def remove_routes(connection, vpn_connection_id, routes_to_remove): ) except (BotoCoreError, ClientError) as e: raise VPNConnectionException( - msg="Failed to remove route {0} from the VPN connection {1}.".format(route, vpn_connection_id), + msg=f"Failed to remove route {route} from the VPN connection {vpn_connection_id}.", exception=e, ) @@ -435,7 +435,7 @@ def create_filter(module_params, provided_filters): elif raw_param in list(boto3ify_filter.items()): param = raw_param else: - raise VPNConnectionException(msg="{0} is not a valid filter.".format(raw_param)) + raise VPNConnectionException(msg=f"{raw_param} is not a valid filter.") # reformat filters with special formats if param == "tag": @@ -487,8 +487,10 @@ def find_connection_response(connections=None): return None else: raise VPNConnectionException( - msg="More than one matching VPN connection was found. " - "To modify or delete a VPN please specify vpn_connection_id or add filters." + msg=( + "More than one matching VPN connection was found. " + "To modify or delete a VPN please specify vpn_connection_id or add filters." + ) ) # Found unique match @@ -524,8 +526,10 @@ def create_connection( if not (customer_gateway_id and vpn_gateway_id): raise VPNConnectionException( - msg="No matching connection was found. To create a new connection you must provide " - "both vpn_gateway_id and customer_gateway_id." + msg=( + "No matching connection was found. To create a new connection you must provide " + "both vpn_gateway_id and customer_gateway_id." + ) ) try: vpn = connection.create_vpn_connection( @@ -537,7 +541,7 @@ def create_connection( ) except WaiterError as e: raise VPNConnectionException( - msg="Failed to wait for VPN connection {0} to be available".format(vpn["VpnConnection"]["VpnConnectionId"]), + msg=f"Failed to wait for VPN connection {vpn['VpnConnection']['VpnConnectionId']} to be available", exception=e, ) except (BotoCoreError, ClientError) as e: @@ -555,19 +559,17 @@ def delete_connection(connection, vpn_connection_id, delay, max_attempts): ) except WaiterError as e: raise VPNConnectionException( - msg="Failed to wait for VPN connection {0} to be removed".format(vpn_connection_id), exception=e + msg=f"Failed to wait for VPN connection {vpn_connection_id} to be removed", exception=e ) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException( - msg="Failed to delete the VPN connection: {0}".format(vpn_connection_id), exception=e - ) + raise VPNConnectionException(msg=f"Failed to delete the VPN connection: {vpn_connection_id}", exception=e) def add_tags(connection, vpn_connection_id, add): try: connection.create_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=add) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to add the tags: {0}.".format(add), exception=e) + raise VPNConnectionException(msg=f"Failed to add the tags: {add}.", exception=e) def remove_tags(connection, vpn_connection_id, remove): @@ -576,7 +578,7 @@ def remove_tags(connection, vpn_connection_id, remove): try: connection.delete_tags(aws_retry=True, Resources=[vpn_connection_id], Tags=key_dict_list) except (BotoCoreError, ClientError) as e: - raise VPNConnectionException(msg="Failed to remove the tags: {0}.".format(remove), exception=e) + raise VPNConnectionException(msg=f"Failed to remove the tags: {remove}.", exception=e) def check_for_update(connection, module_params, vpn_connection_id): @@ -624,9 +626,10 @@ def check_for_update(connection, module_params, vpn_connection_id): if will_be is not None and to_text(will_be) != to_text(is_now): raise VPNConnectionException( - msg="You cannot modify {0}, the current value of which is {1}. Modifiable VPN " - "connection attributes are tags and routes. The value you tried to change it to " - "is {2}.".format(attribute, is_now, will_be) + msg=( + f"You cannot modify {attribute}, the current value of which is {is_now}. Modifiable VPN connection" + f" attributes are tags and routes. The value you tried to change it to is {will_be}." + ) ) return changes diff --git a/plugins/modules/ec2_win_password.py b/plugins/modules/ec2_win_password.py index d1553c91aae..a9ca8e94ca1 100644 --- a/plugins/modules/ec2_win_password.py +++ b/plugins/modules/ec2_win_password.py @@ -174,7 +174,7 @@ def ec2_win_password(module): decoded = b64decode(data) if wait and datetime.datetime.now() >= end: - module.fail_json(msg="wait for password timeout after %d seconds" % wait_timeout) + module.fail_json(msg=f"wait for password timeout after {int(wait_timeout)} seconds") if key_file is not None and b_key_data is None: try: @@ -182,7 +182,7 @@ def ec2_win_password(module): key = load_pem_private_key(f.read(), b_key_passphrase, default_backend()) except IOError as e: # Handle bad files - module.fail_json(msg="I/O error (%d) opening key file: %s" % (e.errno, e.strerror)) + module.fail_json(msg=f"I/O error ({int(e.errno)}) opening key file: {e.strerror}") except (ValueError, TypeError) as e: # Handle issues loading key module.fail_json(msg="unable to parse key file") diff --git a/plugins/modules/ecs_attribute.py b/plugins/modules/ecs_attribute.py index 085761b19c3..682014675a1 100644 --- a/plugins/modules/ecs_attribute.py +++ b/plugins/modules/ecs_attribute.py @@ -142,13 +142,13 @@ def _parse_attrs(self, attrs): for attr in attrs: if isinstance(attr, dict): if len(attr) != 1: - self.module.fail_json(msg="Incorrect attribute format - %s" % str(attr)) + self.module.fail_json(msg=f"Incorrect attribute format - {str(attr)}") name, value = list(attr.items())[0] attrs_parsed.append({"name": name, "value": value}) elif isinstance(attr, str): attrs_parsed.append({"name": attr, "value": None}) else: - self.module.fail_json(msg="Incorrect attributes format - %s" % str(attrs)) + self.module.fail_json(msg=f"Incorrect attributes format - {str(attrs)}") return attrs_parsed @@ -197,14 +197,14 @@ def _get_ecs_arn(self): cluster=self.cluster, containerInstances=ecs_instances_arns )["containerInstances"] except (ClientError, EndpointConnectionError) as e: - self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) + self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}") try: ecs_arn = next(inst for inst in ec2_instances if inst["ec2InstanceId"] == self.ec2_id)[ "containerInstanceArn" ] except StopIteration: - self.module.fail_json(msg="EC2 instance Id not found in ECS cluster - %s" % str(self.cluster)) + self.module.fail_json(msg=f"EC2 instance Id not found in ECS cluster - {str(self.cluster)}") return ecs_arn @@ -238,7 +238,7 @@ def attrs_get_by_name(self, attrs): for attr_found in self.ecs.list_attributes(cluster=self.cluster, **attr_obj)["attributes"] ] except ClientError as e: - self.module.fail_json(msg="Can't connect to the cluster - %s" % str(e)) + self.module.fail_json(msg=f"Can't connect to the cluster - {str(e)}") matched_objs = [target for target in matched_ecs_targets if target["targetId"] == self.ecs_arn] diff --git a/plugins/modules/ecs_cluster.py b/plugins/modules/ecs_cluster.py index e627cd98f1b..fca35331f69 100644 --- a/plugins/modules/ecs_cluster.py +++ b/plugins/modules/ecs_cluster.py @@ -203,7 +203,7 @@ def describe_cluster(self, cluster_name): c = self.find_in_array(response["clusters"], cluster_name) if c: return c - raise Exception("Unknown problem describing cluster %s." % cluster_name) + raise Exception(f"Unknown problem describing cluster {cluster_name}.") def create_cluster(self, cluster_name, capacity_providers, capacity_provider_strategy): params = dict(clusterName=cluster_name) diff --git a/plugins/modules/ecs_ecr.py b/plugins/modules/ecs_ecr.py index 1e6efd7b331..fb812ca0a45 100644 --- a/plugins/modules/ecs_ecr.py +++ b/plugins/modules/ecs_ecr.py @@ -272,8 +272,7 @@ def create_repository(self, registry_id, name, image_tag_mutability, encryption_ default_registry_id = self.sts.get_caller_identity().get("Account") if registry_id != default_registry_id: raise Exception( - "Cannot create repository in registry {0}." - "Would be created in {1} instead.".format(registry_id, default_registry_id) + f"Cannot create repository in registry {registry_id}. Would be created in {default_registry_id} instead." ) if encryption_configuration is None: @@ -303,8 +302,8 @@ def set_repository_policy(self, registry_id, name, policy_text, force): if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = "{0}:{1}".format(registry_id, name) - raise Exception("could not find repository {0}".format(printable)) + printable = f"{registry_id}:{name}" + raise Exception(f"could not find repository {printable}") return def delete_repository(self, registry_id, name, force): @@ -367,8 +366,8 @@ def put_lifecycle_policy(self, registry_id, name, policy_text): if self.get_repository(registry_id, name) is None: printable = name if registry_id: - printable = "{0}:{1}".format(registry_id, name) - raise Exception("could not find repository {0}".format(printable)) + printable = f"{registry_id}:{name}" + raise Exception(f"could not find repository {printable}") return def purge_lifecycle_policy(self, registry_id, name): diff --git a/plugins/modules/ecs_service.py b/plugins/modules/ecs_service.py index af5ad567dc8..8115b3b34fd 100644 --- a/plugins/modules/ecs_service.py +++ b/plugins/modules/ecs_service.py @@ -761,7 +761,7 @@ def describe_service(self, cluster_name, service_name): c = self.find_in_array(response["services"], service_name) if c: return c - raise Exception("Unknown problem describing service %s." % service_name) + raise Exception(f"Unknown problem describing service {service_name}.") def is_matching_service(self, expected, existing): # aws returns the arn of the task definition @@ -1065,9 +1065,7 @@ def main(): except Exception as e: module.fail_json_aws( e, - msg="Exception describing service '{0}' in cluster '{1}'".format( - module.params["name"], module.params["cluster"] - ), + msg=f"Exception describing service '{module.params['name']}' in cluster '{module.params['cluster']}'", ) results = dict(changed=False) @@ -1265,7 +1263,7 @@ def main(): break time.sleep(delay) if i is repeat - 1: - module.fail_json(msg="Service still not deleted after {0} tries of {1} seconds each.".format(repeat, delay)) + module.fail_json(msg=f"Service still not deleted after {repeat} tries of {delay} seconds each.") return module.exit_json(**results) diff --git a/plugins/modules/ecs_tag.py b/plugins/modules/ecs_tag.py index f11fc1f33ac..109b974eea6 100644 --- a/plugins/modules/ecs_tag.py +++ b/plugins/modules/ecs_tag.py @@ -123,7 +123,7 @@ def get_tags(ecs, module, resource): try: return boto3_tag_list_to_ansible_dict(ecs.list_tags_for_resource(resourceArn=resource)["tags"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to fetch tags for resource {0}".format(resource)) + module.fail_json_aws(e, msg=f"Failed to fetch tags for resource {resource}") def get_arn(ecs, module, cluster_name, resource_type, resource): @@ -144,9 +144,9 @@ def get_arn(ecs, module, cluster_name, resource_type, resource): description = ecs.describe_container_instances(clusters=[resource]) resource_arn = description["containerInstances"][0]["containerInstanceArn"] except (IndexError, KeyError): - module.fail_json(msg="Failed to find {0} {1}".format(resource_type, resource)) + module.fail_json(msg=f"Failed to find {resource_type} {resource}") except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to find {0} {1}".format(resource_type, resource)) + module.fail_json_aws(e, msg=f"Failed to find {resource_type} {resource}") return resource_arn @@ -200,7 +200,7 @@ def main(): try: ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource)) + module.fail_json_aws(e, msg=f"Failed to remove tags {remove_tags} from resource {resource}") if state == "present" and add_tags: result["changed"] = True @@ -211,7 +211,7 @@ def main(): tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name="key", tag_value_key_name="value") ecs.tag_resource(resourceArn=resource_arn, tags=tags) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to set tags {0} on resource {1}".format(add_tags, resource)) + module.fail_json_aws(e, msg=f"Failed to set tags {add_tags} on resource {resource}") result["tags"] = get_tags(ecs, module, resource_arn) module.exit_json(**result) diff --git a/plugins/modules/ecs_taskdefinition.py b/plugins/modules/ecs_taskdefinition.py index 0a8e413dbcd..f150255fb89 100644 --- a/plugins/modules/ecs_taskdefinition.py +++ b/plugins/modules/ecs_taskdefinition.py @@ -830,8 +830,10 @@ def register_task( if network_mode == "awsvpc" and "hostPort" in port_mapping: if port_mapping["hostPort"] != port_mapping.get("containerPort"): self.module.fail_json( - msg="In awsvpc network mode, host port must be set to the same as " - "container port or not be set" + msg=( + "In awsvpc network mode, host port must be set to the same as " + "container port or not be set" + ) ) if "linuxParameters" in container: @@ -1017,17 +1019,19 @@ def main(): if existing and existing["status"] != "ACTIVE": # We cannot reactivate an inactive revision module.fail_json( - msg="A task in family '%s' already exists for revision %d, but it is inactive" % (family, revision) + msg=f"A task in family '{family}' already exists for revision {int(revision)}, but it is inactive" ) elif not existing: if not existing_definitions_in_family and revision != 1: module.fail_json( - msg="You have specified a revision of %d but a created revision would be 1" % revision + msg=f"You have specified a revision of {int(revision)} but a created revision would be 1" ) elif existing_definitions_in_family and existing_definitions_in_family[-1]["revision"] + 1 != revision: module.fail_json( - msg="You have specified a revision of %d but a created revision would be %d" - % (revision, existing_definitions_in_family[-1]["revision"] + 1) + msg=( + f"You have specified a revision of {int(revision)} but a created revision would be" + f" {int(existing_definitions_in_family[-1]['revision'] + 1)}" + ) ) else: existing = None diff --git a/plugins/modules/efs.py b/plugins/modules/efs.py index c1d9f247b34..df79babc92c 100644 --- a/plugins/modules/efs.py +++ b/plugins/modules/efs.py @@ -302,8 +302,8 @@ def get_file_systems(self, **kwargs): AWS documentation is available here: https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html """ - item["MountPoint"] = ".%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) - item["FilesystemAddress"] = "%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" + item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" if "Timestamp" in item["SizeInBytes"]: item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) if item["LifeCycleState"] == self.STATE_AVAILABLE: diff --git a/plugins/modules/efs_info.py b/plugins/modules/efs_info.py index 533af10d84d..76952337b97 100644 --- a/plugins/modules/efs_info.py +++ b/plugins/modules/efs_info.py @@ -195,7 +195,7 @@ def __init__(self, module): self.connection = module.client("efs") self.module = module except Exception as e: - module.fail_json(msg="Failed to connect to AWS: %s" % to_native(e)) + module.fail_json(msg=f"Failed to connect to AWS: {to_native(e)}") self.region = module.region @@ -280,8 +280,8 @@ def get_file_systems(self, file_system_id=None, creation_token=None): AWS documentation is available here: U(https://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html) """ - item["MountPoint"] = ".%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) - item["FilesystemAddress"] = "%s.efs.%s.amazonaws.com:/" % (item["FileSystemId"], self.region) + item["MountPoint"] = f".{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" + item["FilesystemAddress"] = f"{item['FileSystemId']}.efs.{self.region}.amazonaws.com:/" if "Timestamp" in item["SizeInBytes"]: item["SizeInBytes"]["Timestamp"] = str(item["SizeInBytes"]["Timestamp"]) diff --git a/plugins/modules/efs_tag.py b/plugins/modules/efs_tag.py index 80eb5cc7b9c..3a4c5c8ced6 100644 --- a/plugins/modules/efs_tag.py +++ b/plugins/modules/efs_tag.py @@ -118,7 +118,7 @@ def get_tags(efs, module, resource): try: return boto3_tag_list_to_ansible_dict(efs.list_tags_for_resource(aws_retry=True, ResourceId=resource)["Tags"]) except (BotoCoreError, ClientError) as get_tags_error: - module.fail_json_aws(get_tags_error, msg="Failed to fetch tags for resource {0}".format(resource)) + module.fail_json_aws(get_tags_error, msg=f"Failed to fetch tags for resource {resource}") def main(): @@ -164,7 +164,7 @@ def main(): efs.untag_resource(aws_retry=True, ResourceId=resource, TagKeys=list(remove_tags.keys())) except (BotoCoreError, ClientError) as remove_tag_error: module.fail_json_aws( - remove_tag_error, msg="Failed to remove tags {0} from resource {1}".format(remove_tags, resource) + remove_tag_error, msg=f"Failed to remove tags {remove_tags} from resource {resource}" ) if state == "present" and add_tags: @@ -176,9 +176,7 @@ def main(): tags = ansible_dict_to_boto3_tag_list(add_tags) efs.tag_resource(aws_retry=True, ResourceId=resource, Tags=tags) except (BotoCoreError, ClientError) as set_tag_error: - module.fail_json_aws( - set_tag_error, msg="Failed to set tags {0} on resource {1}".format(add_tags, resource) - ) + module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {add_tags} on resource {resource}") result["tags"] = get_tags(efs, module, resource) module.exit_json(**result) diff --git a/plugins/modules/eks_cluster.py b/plugins/modules/eks_cluster.py index 13ea5997d4d..a445def55c3 100644 --- a/plugins/modules/eks_cluster.py +++ b/plugins/modules/eks_cluster.py @@ -219,9 +219,9 @@ def ensure_present(client, module): params["tags"] = module.params["tags"] cluster = client.create_cluster(**params)["cluster"] except botocore.exceptions.EndpointConnectionError as e: - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create cluster %s" % name) + module.fail_json_aws(e, msg=f"Couldn't create cluster {name}") if wait: wait_until(client, module, "cluster_active") @@ -242,9 +242,9 @@ def ensure_absent(client, module): try: client.delete_cluster(name=module.params["name"]) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete cluster %s" % name) + module.fail_json_aws(e, msg=f"Couldn't delete cluster {name}") if wait: wait_until(client, module, "cluster_deleted") @@ -259,12 +259,12 @@ def get_cluster(client, module): except is_boto3_error_code("ResourceNotFoundException"): return None except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except - module.fail_json(msg="Region %s is not supported by EKS" % client.meta.region_name) + module.fail_json(msg=f"Region {client.meta.region_name} is not supported by EKS") except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get cluster %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get cluster {name}") def wait_until(client, module, waiter_name="cluster_active"): diff --git a/plugins/modules/eks_fargate_profile.py b/plugins/modules/eks_fargate_profile.py index 71a632a2223..131f0651bd3 100644 --- a/plugins/modules/eks_fargate_profile.py +++ b/plugins/modules/eks_fargate_profile.py @@ -188,7 +188,7 @@ def validate_tags(client, module, fargate_profile): existing_tags = client.list_tags_for_resource(resourceArn=fargate_profile["fargateProfileArn"])["tags"] tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list or compare tags for Fargate Profile %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to list or compare tags for Fargate Profile {module.params.get('name')}") if tags_to_remove: changed = True @@ -196,7 +196,7 @@ def validate_tags(client, module, fargate_profile): try: client.untag_resource(resourceArn=fargate_profile["fargateProfileArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Fargate Profile %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}") if tags_to_add: changed = True @@ -204,7 +204,7 @@ def validate_tags(client, module, fargate_profile): try: client.tag_resource(resourceArn=fargate_profile["fargateProfileArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Fargate Profile %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Fargate Profile {module.params.get('name')}") return changed @@ -252,7 +252,7 @@ def create_or_update_fargate_profile(client, module): ) fargate_profile = client.create_fargate_profile(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create fargate profile %s" % name) + module.fail_json_aws(e, msg=f"Couldn't create fargate profile {name}") if wait: wait_until(client, module, "fargate_profile_active", name, cluster_name) @@ -274,7 +274,7 @@ def delete_fargate_profile(client, module): try: client.delete_fargate_profile(clusterName=cluster_name, fargateProfileName=name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete fargate profile %s" % name) + module.fail_json_aws(e, msg=f"Couldn't delete fargate profile {name}") if wait: wait_until(client, module, "fargate_profile_deleted", name, cluster_name) diff --git a/plugins/modules/eks_nodegroup.py b/plugins/modules/eks_nodegroup.py index 6704af1af09..f146328f098 100644 --- a/plugins/modules/eks_nodegroup.py +++ b/plugins/modules/eks_nodegroup.py @@ -370,21 +370,21 @@ def validate_tags(client, module, nodegroup): existing_tags = client.list_tags_for_resource(resourceArn=nodegroup["nodegroupArn"])["tags"] tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, desired_tags, module.params.get("purge_tags")) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list or compare tags for Nodegroup %s." % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to list or compare tags for Nodegroup {module.params.get('name')}.") if tags_to_remove: if not module.check_mode: changed = True try: client.untag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tagKeys=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Nodegroup %s." % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.") if tags_to_add: if not module.check_mode: changed = True try: client.tag_resource(aws_retry=True, ResourceArn=nodegroup["nodegroupArn"], tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Nodegroup %s." % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Nodegroup {module.params.get('name')}.") return changed @@ -422,7 +422,7 @@ def validate_taints(client, module, nodegroup, param_taints): try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set taints for Nodegroup %s." % params["nodegroupName"]) + module.fail_json_aws(e, msg=f"Unable to set taints for Nodegroup {params['nodegroupName']}.") return changed @@ -458,7 +458,7 @@ def validate_labels(client, module, nodegroup, param_labels): try: client.update_nodegroup_config(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set labels for Nodegroup %s." % params["nodegroupName"]) + module.fail_json_aws(e, msg=f"Unable to set labels for Nodegroup {params['nodegroupName']}.") return changed @@ -467,7 +467,7 @@ def compare_params(module, params, nodegroup): for param in ["nodeRole", "subnets", "diskSize", "instanceTypes", "amiTypes", "remoteAccess", "capacityType"]: if (param in nodegroup) and (param in params): if nodegroup[param] != params[param]: - module.fail_json(msg="Cannot modify parameter %s." % param) + module.fail_json(msg=f"Cannot modify parameter {param}.") if ("launchTemplate" not in nodegroup) and ("launchTemplate" in params): module.fail_json(msg="Cannot add Launch Template in this Nodegroup.") if nodegroup["updateConfig"] != params["updateConfig"]: @@ -485,7 +485,7 @@ def compare_params_launch_template(module, params, nodegroup): if (key in params["launchTemplate"]) and ( params["launchTemplate"][key] != nodegroup["launchTemplate"][key] ): - module.fail_json(msg="Cannot modify Launch Template %s." % key) + module.fail_json(msg=f"Cannot modify Launch Template {key}.") if ("version" in params["launchTemplate"]) and ( params["launchTemplate"]["version"] != nodegroup["launchTemplate"]["version"] ): @@ -593,7 +593,7 @@ def create_or_update_nodegroups(client, module): try: nodegroup = client.create_nodegroup(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create Nodegroup %s." % params["nodegroupName"]) + module.fail_json_aws(e, msg=f"Couldn't create Nodegroup {params['nodegroupName']}.") if wait: wait_until(client, module, "nodegroup_active", params["nodegroupName"], params["clusterName"]) @@ -613,7 +613,7 @@ def delete_nodegroups(client, module): try: client.delete_nodegroup(clusterName=clusterName, nodegroupName=name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete Nodegroup %s." % name) + module.fail_json_aws(e, msg=f"Couldn't delete Nodegroup {name}.") if wait: wait_until(client, module, "nodegroup_deleted", name, clusterName) @@ -630,7 +630,7 @@ def get_nodegroup(client, module, nodegroup_name, cluster_name): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get Nodegroup %s." % nodegroup_name) + module.fail_json_aws(e, msg=f"Couldn't get Nodegroup {nodegroup_name}.") def wait_until(client, module, waiter_name, nodegroup_name, cluster_name): diff --git a/plugins/modules/elasticache.py b/plugins/modules/elasticache.py index ac6ea78b69f..e7a9b1808ff 100644 --- a/plugins/modules/elasticache.py +++ b/plugins/modules/elasticache.py @@ -218,8 +218,7 @@ def create(self): if self.wait: self._wait_for_status("gone") else: - msg = "'%s' is currently deleting. Cannot create." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json(msg=f"'{self.name}' is currently deleting. Cannot create.") kwargs = dict( CacheClusterId=self.name, @@ -262,8 +261,7 @@ def delete(self): if self.wait: self._wait_for_status("available") else: - msg = "'%s' is currently %s. Cannot delete." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot delete.") try: response = self.conn.delete_cache_cluster(CacheClusterId=self.name) @@ -280,8 +278,7 @@ def delete(self): def sync(self): """Sync settings to cluster if required""" if not self.exists(): - msg = "'%s' is %s. Cannot sync." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot sync.") if self.status in ["creating", "rebooting", "modifying"]: if self.wait: @@ -293,11 +290,13 @@ def sync(self): if self._requires_destroy_and_create(): if not self.hard_modify: - msg = "'%s' requires destructive modification. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires destructive modification. 'hard_modify' must be set to true to proceed." + ) if not self.wait: - msg = "'%s' requires destructive modification. 'wait' must be set to true." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires destructive modification. 'wait' must be set to true to proceed." + ) self.delete() self.create() return @@ -331,16 +330,14 @@ def modify(self): def reboot(self): """Reboot the cache cluster""" if not self.exists(): - msg = "'%s' is %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is {self.status}. Cannot reboot.") if self.status == "rebooting": return if self.status in ["creating", "modifying"]: if self.wait: self._wait_for_status("available") else: - msg = "'%s' is currently %s. Cannot reboot." - self.module.fail_json(msg=msg % (self.name, self.status)) + self.module.fail_json(msg=f"'{self.name}' is currently {self.status}. Cannot reboot.") # Collect ALL nodes for reboot cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] @@ -369,12 +366,12 @@ def _wait_for_status(self, awaited_status): # No need to wait, we're already done return if status_map[self.status] != awaited_status: - msg = "Invalid awaited status. '%s' cannot transition to '%s'" - self.module.fail_json(msg=msg % (self.status, awaited_status)) + self.module.fail_json( + msg=f"Invalid awaited status. '{self.status}' cannot transition to '{awaited_status}'" + ) if awaited_status not in set(status_map.values()): - msg = "'%s' is not a valid awaited status." - self.module.fail_json(msg=msg % awaited_status) + self.module.fail_json(msg=f"'{awaited_status}' is not a valid awaited status.") while True: sleep(1) @@ -470,8 +467,9 @@ def _get_nodes_to_remove(self): return [] if not self.hard_modify: - msg = "'%s' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." - self.module.fail_json(msg=msg % self.name) + self.module.fail_json( + msg=f"'{self.name}' requires removal of cache nodes. 'hard_modify' must be set to true to proceed." + ) cache_node_ids = [cn["CacheNodeId"] for cn in self.data["CacheNodes"]] return cache_node_ids[-num_nodes_to_remove:] diff --git a/plugins/modules/elasticache_info.py b/plugins/modules/elasticache_info.py index 28b31f76a7f..021d3a0270e 100644 --- a/plugins/modules/elasticache_info.py +++ b/plugins/modules/elasticache_info.py @@ -467,14 +467,14 @@ def get_elasticache_clusters(client, module): results = [] for cluster in clusters: cluster = camel_dict_to_snake_dict(cluster) - arn = "arn:aws:elasticache:%s:%s:cluster:%s" % (region, account_id, cluster["cache_cluster_id"]) + arn = f"arn:aws:elasticache:{region}:{account_id}:cluster:{cluster['cache_cluster_id']}" try: tags = get_elasticache_tags_with_backoff(client, arn) except is_boto3_error_code("CacheClusterNotFound"): # e.g: Cluster was listed but is in deleting state continue except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get tags for cluster %s") + module.fail_json_aws(e, msg=f"Couldn't get tags for cluster {cluster['cache_cluster_id']}") cluster["tags"] = boto3_tag_list_to_ansible_dict(tags) diff --git a/plugins/modules/elasticache_parameter_group.py b/plugins/modules/elasticache_parameter_group.py index 1e5a1c63b6f..20f5ed9838b 100644 --- a/plugins/modules/elasticache_parameter_group.py +++ b/plugins/modules/elasticache_parameter_group.py @@ -145,7 +145,7 @@ def make_current_modifiable_param_dict(module, conn, name): """Gets the current state of the cache parameter group and creates a dict with the format: {ParameterName: [Allowed_Values, DataType, ParameterValue]}""" current_info = get_info(conn, name) if current_info is False: - module.fail_json(msg="Could not connect to the cache parameter group %s." % name) + module.fail_json(msg=f"Could not connect to the cache parameter group {name}.") parameters = current_info["Parameters"] modifiable_params = {} @@ -168,8 +168,7 @@ def check_valid_modification(module, values, modifiable_params): # check valid modifiable parameters if parameter not in modifiable_params: module.fail_json( - msg="%s is not a modifiable parameter. Valid parameters to modify are: %s." - % (parameter, modifiable_params.keys()) + msg=f"{parameter} is not a modifiable parameter. Valid parameters to modify are: {modifiable_params.keys()}." ) # check allowed datatype for modified parameters @@ -186,13 +185,17 @@ def check_valid_modification(module, values, modifiable_params): values[parameter] = 1 if new_value else 0 else: module.fail_json( - msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." - % (new_value, type(new_value), parameter, modifiable_params[parameter][1]) + msg=( + f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter" + f" {parameter}. Expected a type {modifiable_params[parameter][1]}." + ) ) else: module.fail_json( - msg="%s (type %s) is not an allowed value for the parameter %s. Expected a type %s." - % (new_value, type(new_value), parameter, modifiable_params[parameter][1]) + msg=( + f"{new_value} (type {type(new_value)}) is not an allowed value for the parameter {parameter}." + f" Expected a type {modifiable_params[parameter][1]}." + ) ) # check allowed values for modifiable parameters @@ -200,8 +203,7 @@ def check_valid_modification(module, values, modifiable_params): if choices: if not (to_text(new_value) in choices or isinstance(new_value, int)): module.fail_json( - msg="%s is not an allowed value for the parameter %s. Valid parameters are: %s." - % (new_value, parameter, choices) + msg=f"{new_value} is not an allowed value for the parameter {parameter}. Valid parameters are: {choices}." ) # check if a new value is different from current value @@ -327,7 +329,7 @@ def main(): module.fail_json(msg="Creating a group requires a family group.") elif state == "reset" and not exists: module.fail_json( - msg="No group %s to reset. Please create the group before using the state 'reset'." % parameter_group_name + msg=f"No group {parameter_group_name} to reset. Please create the group before using the state 'reset'." ) # Taking action diff --git a/plugins/modules/elasticache_snapshot.py b/plugins/modules/elasticache_snapshot.py index 66c9cb9da57..b6b6f55069c 100644 --- a/plugins/modules/elasticache_snapshot.py +++ b/plugins/modules/elasticache_snapshot.py @@ -156,8 +156,10 @@ def delete(module, connection, name): changed = False except is_boto3_error_code("InvalidSnapshotState"): # pylint: disable=duplicate-except module.fail_json( - msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion." - "You may need to wait a few minutes." + msg=( + "Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow" + " deletion.You may need to wait a few minutes." + ) ) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to delete the snapshot.") diff --git a/plugins/modules/elb_instance.py b/plugins/modules/elb_instance.py index 2d6ca291968..6489a86bcf9 100644 --- a/plugins/modules/elb_instance.py +++ b/plugins/modules/elb_instance.py @@ -386,7 +386,7 @@ def main(): if ec2_elbs is not None: for elb in ec2_elbs: if not elb_man.exists(elb): - module.fail_json(msg="ELB {0} does not exist".format(elb)) + module.fail_json(msg=f"ELB {elb} does not exist") if module.params["state"] == "present": elb_man.register(wait, enable_availability_zone, timeout) diff --git a/plugins/modules/elb_target.py b/plugins/modules/elb_target.py index cab7b10aef8..d7dfaf824cb 100644 --- a/plugins/modules/elb_target.py +++ b/plugins/modules/elb_target.py @@ -136,7 +136,7 @@ def convert_tg_name_to_arn(connection, module, tg_name): try: response = describe_target_groups_with_backoff(connection, tg_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe target group {0}".format(tg_name)) + module.fail_json_aws(e, msg=f"Unable to describe target group {tg_name}") tg_arn = response["TargetGroups"][0]["TargetGroupArn"] @@ -170,7 +170,7 @@ def describe_targets(connection, module, tg_arn, target=None): return {} return targets[0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe target health for target {0}".format(target)) + module.fail_json_aws(e, msg=f"Unable to describe target health for target {target}") @AWSRetry.jittered_backoff(retries=10, delay=10) @@ -216,7 +216,7 @@ def register_target(connection, module): connection, module, target_group_arn, target, target_status, target_status_timeout ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to deregister target {0}".format(target)) + module.fail_json_aws(e, msg=f"Unable to deregister target {target}") # Get all targets for the target group target_descriptions = describe_targets(connection, module, target_group_arn) @@ -274,7 +274,7 @@ def deregister_target(connection, module): deregister_target_with_backoff(connection, target_group_arn, target) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Unable to deregister target {0}".format(target)) + module.fail_json(msg=f"Unable to deregister target {target}") else: if current_target_reason != "Target.NotRegistered" and current_target_state != "draining": module.warn( @@ -306,9 +306,7 @@ def target_status_check(connection, module, target_group_arn, target, target_sta sleep(1) if not reached_state: module.fail_json( - msg="Status check timeout of {0} exceeded, last status was {1}: ".format( - target_status_timeout, health_state - ) + msg=f"Status check timeout of {target_status_timeout} exceeded, last status was {health_state}: " ) diff --git a/plugins/modules/elb_target_group.py b/plugins/modules/elb_target_group.py index 93a3f333df6..4eb38f4c2d4 100644 --- a/plugins/modules/elb_target_group.py +++ b/plugins/modules/elb_target_group.py @@ -667,7 +667,7 @@ def create_or_update_target_group(connection, module): if target_group: diffs = [param for param in ("Port", "Protocol", "VpcId") if target_group.get(param) != params.get(param)] if diffs: - module.fail_json(msg="Cannot modify %s parameter(s) for a target group" % ", ".join(diffs)) + module.fail_json(msg=f"Cannot modify {', '.join(diffs)} parameter(s) for a target group") # Target group exists so check health check parameters match what has been passed health_check_params = dict() diff --git a/plugins/modules/elb_target_info.py b/plugins/modules/elb_target_info.py index e318f6c5b65..add122416d9 100644 --- a/plugins/modules/elb_target_info.py +++ b/plugins/modules/elb_target_info.py @@ -279,11 +279,11 @@ def _get_instance_ips(self): # typically this will happen if the instance doesn't exist self.module.fail_json_aws( e, - msg="Could not get instance info for instance '%s'" % (self.instance_id), + msg=f"Could not get instance info for instance '{self.instance_id}'", ) if len(reservations) < 1: - self.module.fail_json(msg="Instance ID %s could not be found" % self.instance_id) + self.module.fail_json(msg=f"Instance ID {self.instance_id} could not be found") instance = reservations[0]["Instances"][0] @@ -340,7 +340,7 @@ def _get_target_descriptions(self, target_groups): response = self.elbv2.describe_target_health(TargetGroupArn=tg.target_group_arn, aws_retry=True) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws( - e, msg="Could not describe target " + "health for target group %s" % tg.target_group_arn + e, msg="Could not describe target " + f"health for target group {tg.target_group_arn}" ) for t in response["TargetHealthDescriptions"]: diff --git a/plugins/modules/glue_connection.py b/plugins/modules/glue_connection.py index b1c935929f8..18039a8616d 100644 --- a/plugins/modules/glue_connection.py +++ b/plugins/modules/glue_connection.py @@ -269,7 +269,7 @@ def _await_glue_connection(connection, module): return glue_connection time.sleep(check_interval) - module.fail_json(msg="Timeout waiting for Glue connection %s" % module.params.get("name")) + module.fail_json(msg=f"Timeout waiting for Glue connection {module.params.get('name')}") def create_or_update_glue_connection(connection, connection_ec2, module, glue_connection): @@ -335,8 +335,10 @@ def create_or_update_glue_connection(connection, connection_ec2, module, glue_co if glue_connection: module.deprecate( - "The 'connection_properties' return key is deprecated and will be replaced" - " by 'raw_connection_properties'. Both values are returned for now.", + ( + "The 'connection_properties' return key is deprecated and will be replaced" + " by 'raw_connection_properties'. Both values are returned for now." + ), date="2024-06-01", collection_name="community.aws", ) diff --git a/plugins/modules/glue_crawler.py b/plugins/modules/glue_crawler.py index 0a8598b6c7a..5d92219df8b 100644 --- a/plugins/modules/glue_crawler.py +++ b/plugins/modules/glue_crawler.py @@ -305,7 +305,7 @@ def ensure_tags(connection, module, glue_crawler): return False account_id, partition = get_aws_account_info(module) - arn = "arn:{0}:glue:{1}:{2}:crawler/{3}".format(partition, module.region, account_id, module.params.get("name")) + arn = f"arn:{partition}:glue:{module.region}:{account_id}:crawler/{module.params.get('name')}" try: existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) @@ -313,7 +313,7 @@ def ensure_tags(connection, module, glue_crawler): if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg="Unable to get tags for Glue crawler %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to get tags for Glue crawler {module.params.get('name')}") tags_to_add, tags_to_remove = compare_aws_tags( existing_tags, module.params.get("tags"), module.params.get("purge_tags") @@ -325,7 +325,7 @@ def ensure_tags(connection, module, glue_crawler): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Glue crawler %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}") if tags_to_add: changed = True @@ -333,7 +333,7 @@ def ensure_tags(connection, module, glue_crawler): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Glue crawler %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue crawler {module.params.get('name')}") return changed diff --git a/plugins/modules/glue_job.py b/plugins/modules/glue_job.py index 4740deed3c9..2567799757e 100644 --- a/plugins/modules/glue_job.py +++ b/plugins/modules/glue_job.py @@ -320,7 +320,7 @@ def ensure_tags(connection, module, glue_job): return False account_id, partition = get_aws_account_info(module) - arn = "arn:{0}:glue:{1}:{2}:job/{3}".format(partition, module.region, account_id, module.params.get("name")) + arn = f"arn:{partition}:glue:{module.region}:{account_id}:job/{module.params.get('name')}" try: existing_tags = connection.get_tags(aws_retry=True, ResourceArn=arn).get("Tags", {}) @@ -328,7 +328,7 @@ def ensure_tags(connection, module, glue_job): if module.check_mode: existing_tags = {} else: - module.fail_json_aws(e, msg="Unable to get tags for Glue job %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to get tags for Glue job {module.params.get('name')}") tags_to_add, tags_to_remove = compare_aws_tags( existing_tags, module.params.get("tags"), module.params.get("purge_tags") @@ -340,7 +340,7 @@ def ensure_tags(connection, module, glue_job): try: connection.untag_resource(aws_retry=True, ResourceArn=arn, TagsToRemove=tags_to_remove) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Glue job %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}") if tags_to_add: changed = True @@ -348,7 +348,7 @@ def ensure_tags(connection, module, glue_job): try: connection.tag_resource(aws_retry=True, ResourceArn=arn, TagsToAdd=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for Glue job %s" % module.params.get("name")) + module.fail_json_aws(e, msg=f"Unable to set tags for Glue job {module.params.get('name')}") return changed diff --git a/plugins/modules/iam_access_key.py b/plugins/modules/iam_access_key.py index a8f03d7bced..ae3e9e7dd11 100644 --- a/plugins/modules/iam_access_key.py +++ b/plugins/modules/iam_access_key.py @@ -157,7 +157,7 @@ def delete_access_key(access_keys, user, access_key_id): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete access key "{0}" for user "{1}"'.format(access_key_id, user)) + module.fail_json_aws(e, msg=f'Failed to delete access key "{access_key_id}" for user "{user}"') return True @@ -165,7 +165,7 @@ def delete_access_key(access_keys, user, access_key_id): def update_access_key(access_keys, user, access_key_id, enabled): if access_key_id not in access_keys: module.fail_json( - msg='Access key "{0}" not found attached to User "{1}"'.format(access_key_id, user), + msg=f'Access key "{access_key_id}" not found attached to User "{user}"', ) changes = dict() @@ -188,7 +188,7 @@ def update_access_key(access_keys, user, access_key_id, enabled): module.fail_json_aws( e, changes=changes, - msg='Failed to update access key "{0}" for user "{1}"'.format(access_key_id, user), + msg=f'Failed to update access key "{access_key_id}" for user "{user}"', ) return True @@ -210,7 +210,7 @@ def create_access_key(access_keys, user, rotate_keys, enabled): try: results = client.create_access_key(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to create access key for user "{0}"'.format(user)) + module.fail_json_aws(e, msg=f'Failed to create access key for user "{user}"') results = camel_dict_to_snake_dict(results) access_key = results.get("access_key") access_key = normalize_boto3_result(access_key) @@ -232,7 +232,7 @@ def get_access_keys(user): try: results = client.list_access_keys(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to get access keys for user "{0}"'.format(user)) + module.fail_json_aws(e, msg=f'Failed to get access keys for user "{user}"') if not results: return None @@ -259,7 +259,7 @@ def main(): ) required_if = [ - ["state", "absent", ("id")], + ["state", "absent", ("id",)], ] mutually_exclusive = [ ["rotate_keys", "id"], diff --git a/plugins/modules/iam_access_key_info.py b/plugins/modules/iam_access_key_info.py index 22bbd564cb0..0ea8b514122 100644 --- a/plugins/modules/iam_access_key_info.py +++ b/plugins/modules/iam_access_key_info.py @@ -85,7 +85,7 @@ def get_access_keys(user): try: results = client.list_access_keys(aws_retry=True, UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to get access keys for user "{0}"'.format(user)) + module.fail_json_aws(e, msg=f'Failed to get access keys for user "{user}"') if not results: return None diff --git a/plugins/modules/iam_group.py b/plugins/modules/iam_group.py index 357671dbdc6..c4f77fde772 100644 --- a/plugins/modules/iam_group.py +++ b/plugins/modules/iam_group.py @@ -263,7 +263,7 @@ def create_or_update_group(connection, module): try: connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach policy from group %s" % params["GroupName"]) + module.fail_json_aws(e, msg=f"Couldn't detach policy from group {params['GroupName']}") # If there are policies to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above if set(managed_policies) - set(current_attached_policies_arn_list): @@ -274,13 +274,13 @@ def create_or_update_group(connection, module): try: connection.attach_group_policy(GroupName=params["GroupName"], PolicyArn=policy_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't attach policy to group %s" % params["GroupName"]) + module.fail_json_aws(e, msg=f"Couldn't attach policy to group {params['GroupName']}") # Manage group memberships try: current_group_members = get_group(connection, module, params["GroupName"])["Users"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") current_group_members_list = [] for member in current_group_members: @@ -296,9 +296,7 @@ def create_or_update_group(connection, module): try: connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Couldn't remove user %s from group %s" % (user, params["GroupName"]) - ) + module.fail_json_aws(e, msg=f"Couldn't remove user {user} from group {params['GroupName']}") # If there are users to adjust that aren't in the current list, then things have changed # Otherwise the only changes were in purging above if set(users) - set(current_group_members_list): @@ -309,7 +307,7 @@ def create_or_update_group(connection, module): try: connection.add_user_to_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't add user %s to group %s" % (user, params["GroupName"])) + module.fail_json_aws(e, msg=f"Couldn't add user {user} to group {params['GroupName']}") if module.check_mode: module.exit_json(changed=changed) @@ -317,7 +315,7 @@ def create_or_update_group(connection, module): try: group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") module.exit_json(changed=changed, iam_group=camel_dict_to_snake_dict(group)) @@ -329,7 +327,7 @@ def destroy_group(connection, module): try: group = get_group(connection, module, params["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") if group: # Check mode means we would remove this group if module.check_mode: @@ -340,26 +338,26 @@ def destroy_group(connection, module): for policy in get_attached_policy_list(connection, module, params["GroupName"]): connection.detach_group_policy(GroupName=params["GroupName"], PolicyArn=policy["PolicyArn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't remove policy from group %s" % params["GroupName"]) + module.fail_json_aws(e, msg=f"Couldn't remove policy from group {params['GroupName']}") # Remove any users in the group otherwise deletion fails current_group_members_list = [] try: current_group_members = get_group(connection, module, params["GroupName"])["Users"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't get group {params['GroupName']}") for member in current_group_members: current_group_members_list.append(member["UserName"]) for user in current_group_members_list: try: connection.remove_user_from_group(GroupName=params["GroupName"], UserName=user) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't remove user %s from group %s" % (user, params["GroupName"])) + module.fail_json_aws(e, f"Couldn't remove user {user} from group {params['GroupName']}") try: connection.delete_group(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't delete group %s" % params["GroupName"]) + module.fail_json_aws(e, f"Couldn't delete group {params['GroupName']}") else: module.exit_json(changed=False) diff --git a/plugins/modules/iam_managed_policy.py b/plugins/modules/iam_managed_policy.py index 0f6189ca454..cc7fd8450e5 100644 --- a/plugins/modules/iam_managed_policy.py +++ b/plugins/modules/iam_managed_policy.py @@ -184,7 +184,7 @@ def get_or_create_policy_version(policy, policy_document): "Document" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get policy version {0}".format(v["VersionId"])) + module.fail_json_aws(e, msg=f"Couldn't get policy version {v['VersionId']}") if module.check_mode and compare_policies(document, json.loads(to_native(policy_document))): return v, True @@ -249,23 +249,23 @@ def detach_all_entities(policy, **kwargs): try: entities = client.list_entities_for_policy(PolicyArn=policy["Arn"], **kwargs) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach list entities for policy {0}".format(policy["PolicyName"])) + module.fail_json_aws(e, msg=f"Couldn't detach list entities for policy {policy['PolicyName']}") for g in entities["PolicyGroups"]: try: client.detach_group_policy(PolicyArn=policy["Arn"], GroupName=g["GroupName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach group policy {0}".format(g["GroupName"])) + module.fail_json_aws(e, msg=f"Couldn't detach group policy {g['GroupName']}") for u in entities["PolicyUsers"]: try: client.detach_user_policy(PolicyArn=policy["Arn"], UserName=u["UserName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach user policy {0}".format(u["UserName"])) + module.fail_json_aws(e, msg=f"Couldn't detach user policy {u['UserName']}") for r in entities["PolicyRoles"]: try: client.detach_role_policy(PolicyArn=policy["Arn"], RoleName=r["RoleName"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't detach role policy {0}".format(r["RoleName"])) + module.fail_json_aws(e, msg=f"Couldn't detach role policy {r['RoleName']}") if entities["IsTruncated"]: detach_all_entities(policy, marker=entities["Marker"]) @@ -289,7 +289,7 @@ def create_or_update_policy(existing_policy): try: rvalue = client.create_policy(PolicyName=name, Path="/", PolicyDocument=policy, Description=description) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't create policy {0}".format(name)) + module.fail_json_aws(e, msg=f"Couldn't create policy {name}") module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue["Policy"])) else: @@ -327,12 +327,12 @@ def delete_policy(existing_policy): try: client.delete_policy_version(PolicyArn=existing_policy["Arn"], VersionId=v["VersionId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy version {0}".format(v["VersionId"])) + module.fail_json_aws(e, msg=f"Couldn't delete policy version {v['VersionId']}") # Delete policy try: client.delete_policy(PolicyArn=existing_policy["Arn"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't delete policy {0}".format(existing_policy["PolicyName"])) + module.fail_json_aws(e, msg=f"Couldn't delete policy {existing_policy['PolicyName']}") # This is the one case where we will return the old policy module.exit_json(changed=True, policy=camel_dict_to_snake_dict(existing_policy)) diff --git a/plugins/modules/iam_password_policy.py b/plugins/modules/iam_password_policy.py index 7c93da4139f..5c65f7ebaec 100644 --- a/plugins/modules/iam_password_policy.py +++ b/plugins/modules/iam_password_policy.py @@ -112,7 +112,7 @@ def __init__(self, module): self.connection = module.resource("iam") self.module = module except Exception as e: - module.fail_json(msg="Failed to connect to AWS: %s" % str(e)) + module.fail_json(msg=f"Failed to connect to AWS: {str(e)}") def policy_to_dict(self, policy): policy_attributes = [ diff --git a/plugins/modules/iam_role.py b/plugins/modules/iam_role.py index 3cafe85d2cb..be05707238a 100644 --- a/plugins/modules/iam_role.py +++ b/plugins/modules/iam_role.py @@ -290,7 +290,7 @@ def attach_policies(module, client, policies_to_attach, role_name): client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn, aws_retry=True) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach policy {0} to role {1}".format(policy_arn, role_name)) + module.fail_json_aws(e, msg=f"Unable to attach policy {policy_arn} to role {role_name}") return changed @@ -309,7 +309,7 @@ def remove_policies(module, client, policies_to_remove, role_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to detach policy {0} from {1}".format(policy, role_name)) + module.fail_json_aws(e, msg=f"Unable to detach policy {policy} from {role_name}") return changed @@ -324,7 +324,7 @@ def remove_inline_policies(module, client, role_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete policy {0} embedded in {1}".format(policy, role_name)) + module.fail_json_aws(e, msg=f"Unable to delete policy {policy} embedded in {role_name}") def generate_create_params(module): @@ -376,7 +376,7 @@ def update_role_assumed_policy(module, client, role_name, target_assumed_policy, try: client.update_assume_role_policy(RoleName=role_name, PolicyDocument=target_assumed_policy, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update assume role policy for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to update assume role policy for role {role_name}") return True @@ -391,7 +391,7 @@ def update_role_description(module, client, role_name, target_description, curre try: client.update_role(RoleName=role_name, Description=target_description, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update description for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to update description for role {role_name}") return True @@ -406,7 +406,7 @@ def update_role_max_session_duration(module, client, role_name, target_duration, try: client.update_role(RoleName=role_name, MaxSessionDuration=target_duration, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update maximum session duration for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to update maximum session duration for role {role_name}") return True @@ -424,14 +424,14 @@ def update_role_permissions_boundary( try: client.delete_role_permissions_boundary(RoleName=role_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to remove permission boundary for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to remove permission boundary for role {role_name}") else: try: client.put_role_permissions_boundary( RoleName=role_name, PermissionsBoundary=target_permissions_boundary, aws_retry=True ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update permission boundary for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to update permission boundary for role {role_name}") return True @@ -540,7 +540,7 @@ def create_instance_profiles(module, client, role_name, path): "InstanceProfiles" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to list instance profiles for role {role_name}") # Profile already exists if any(p["InstanceProfileName"] == role_name for p in instance_profiles): @@ -560,13 +560,13 @@ def create_instance_profiles(module, client, role_name, path): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to create instance profile for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to create instance profile for role {role_name}") # And attach the role to the profile try: client.add_role_to_instance_profile(InstanceProfileName=role_name, RoleName=role_name, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach role {0} to instance profile {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to attach role {role_name} to instance profile {role_name}") return True @@ -579,7 +579,7 @@ def remove_instance_profiles(module, client, role_name): "InstanceProfiles" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list instance profiles for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to list instance profiles for role {role_name}") # Remove the role from the instance profile(s) for profile in instance_profiles: @@ -599,11 +599,9 @@ def remove_instance_profiles(module, client, role_name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to remove instance profile {0}".format(profile_name)) + module.fail_json_aws(e, msg=f"Unable to remove instance profile {profile_name}") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, msg="Unable to remove role {0} from instance profile {1}".format(role_name, profile_name) - ) + module.fail_json_aws(e, msg=f"Unable to remove role {role_name} from instance profile {profile_name}") def destroy_role(module, client): @@ -640,7 +638,7 @@ def get_role_with_backoff(module, client, name): "Role" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to get role {name}") def get_role(module, client, name): @@ -652,21 +650,21 @@ def get_role(module, client, name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get role {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to get role {name}") def get_attached_policy_list(module, client, name): try: return client.list_attached_role_policies(RoleName=name, aws_retry=True)["AttachedPolicies"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to list attached policies for role {name}") def get_inline_policy_list(module, client, name): try: return client.list_role_policies(RoleName=name, aws_retry=True)["PolicyNames"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list attached policies for role {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to list attached policies for role {name}") def get_role_tags(module, client): @@ -674,7 +672,7 @@ def get_role_tags(module, client): try: return boto3_tag_list_to_ansible_dict(client.list_role_tags(RoleName=role_name, aws_retry=True)["Tags"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to list tags for role {0}".format(role_name)) + module.fail_json_aws(e, msg=f"Unable to list tags for role {role_name}") def update_role_tags(module, client, role_name, new_tags, purge_tags): @@ -698,7 +696,7 @@ def update_role_tags(module, client, role_name, new_tags, purge_tags): if tags_to_add: client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for role %s" % role_name) + module.fail_json_aws(e, msg=f"Unable to set tags for role {role_name}") changed = bool(tags_to_add) or bool(tags_to_remove) return changed diff --git a/plugins/modules/iam_role_info.py b/plugins/modules/iam_role_info.py index a7576a131ec..d23754d90a0 100644 --- a/plugins/modules/iam_role_info.py +++ b/plugins/modules/iam_role_info.py @@ -195,15 +195,15 @@ def describe_iam_role(module, client, role): try: role["InlinePolicies"] = list_iam_role_policies_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get inline policies for role %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get inline policies for role {name}") try: role["ManagedPolicies"] = list_iam_attached_role_policies_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get managed policies for role %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get managed policies for role {name}") try: role["InstanceProfiles"] = list_iam_instance_profiles_for_role_with_backoff(client, name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't get instance profiles for role %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get instance profiles for role {name}") try: role["tags"] = boto3_tag_list_to_ansible_dict(role["Tags"]) del role["Tags"] @@ -224,7 +224,7 @@ def describe_iam_roles(module, client): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM role %s" % name) + module.fail_json_aws(e, msg=f"Couldn't get IAM role {name}") else: params = dict() if path_prefix: diff --git a/plugins/modules/iam_saml_federation.py b/plugins/modules/iam_saml_federation.py index 238aa5d9a3f..acaaa38fc37 100644 --- a/plugins/modules/iam_saml_federation.py +++ b/plugins/modules/iam_saml_federation.py @@ -148,13 +148,13 @@ def create_or_update_saml_provider(self, name, metadata): try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not get the ARN of the identity provider '{name}'") if arn: # see if metadata needs updating try: resp = self._get_saml_provider(arn) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not retrieve the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not retrieve the identity provider '{name}'") if metadata.strip() != resp["SAMLMetadataDocument"].strip(): # provider needs updating @@ -164,7 +164,7 @@ def create_or_update_saml_provider(self, name, metadata): resp = self._update_saml_provider(arn, metadata) res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not update the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not update the identity provider '{name}'") else: res["saml_provider"] = self._build_res(arn) @@ -175,7 +175,7 @@ def create_or_update_saml_provider(self, name, metadata): resp = self._create_saml_provider(metadata, name) res["saml_provider"] = self._build_res(resp["SAMLProviderArn"]) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not create the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not create the identity provider '{name}'") self.module.exit_json(**res) @@ -184,7 +184,7 @@ def delete_saml_provider(self, name): try: arn = self._get_provider_arn(name) except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not get the ARN of the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not get the ARN of the identity provider '{name}'") if arn: # delete res["changed"] = True @@ -192,7 +192,7 @@ def delete_saml_provider(self, name): try: self._delete_saml_provider(arn) except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Could not delete the identity provider '{0}'".format(name)) + self.module.fail_json_aws(e, msg=f"Could not delete the identity provider '{name}'") self.module.exit_json(**res) diff --git a/plugins/modules/iam_server_certificate.py b/plugins/modules/iam_server_certificate.py index dd8427dc15b..6a7734acacb 100644 --- a/plugins/modules/iam_server_certificate.py +++ b/plugins/modules/iam_server_certificate.py @@ -124,7 +124,7 @@ def check_duplicate_cert(new_cert): continue module.fail_json( changed=False, - msg="This certificate already exists under the name {0} and dup_ok=False".format(cert_name), + msg=f"This certificate already exists under the name {cert_name} and dup_ok=False", duplicate_cert=cert, ) @@ -195,7 +195,7 @@ def create_server_certificate(): try: client.upload_server_certificate(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update server certificate {0}".format(name)) + module.fail_json_aws(e, msg=f"Failed to update server certificate {name}") return True @@ -217,7 +217,7 @@ def rename_server_certificate(current_cert): cert_metadata = current_cert.get("server_certificate_metadata", {}) if not current_cert: - module.fail_json(msg="Unable to find certificate {0}".format(name)) + module.fail_json(msg=f"Unable to find certificate {name}") current_path = cert_metadata.get("path", None) if new_path and current_path != new_path: @@ -232,7 +232,7 @@ def rename_server_certificate(current_cert): try: client.update_server_certificate(aws_retry=True, ServerCertificateName=name, **changes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to update server certificate {0}".format(name), changes=changes) + module.fail_json_aws(e, msg=f"Failed to update server certificate {name}", changes=changes) return True @@ -257,7 +257,7 @@ def delete_server_certificate(current_cert): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete server certificate {0}".format(name)) + module.fail_json_aws(e, msg=f"Failed to delete server certificate {name}") return True @@ -276,7 +276,7 @@ def get_server_certificate(name): botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError, ) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get server certificate {0}".format(name)) + module.fail_json_aws(e, msg=f"Failed to get server certificate {name}") cert = dict(camel_dict_to_snake_dict(result.get("ServerCertificate"))) return cert @@ -353,7 +353,7 @@ def main(): if changed: results["deleted_cert"] = name else: - msg = "Certificate with the name {0} already absent".format(name) + msg = f"Certificate with the name {name} already absent" results["msg"] = msg else: if new_name or new_path: diff --git a/plugins/modules/kinesis_stream.py b/plugins/modules/kinesis_stream.py index 8147f60f3db..d1ba65c86b2 100644 --- a/plugins/modules/kinesis_stream.py +++ b/plugins/modules/kinesis_stream.py @@ -317,7 +317,7 @@ def wait_for_status(client, stream_name, status, wait_timeout=300, check_mode=Fa if not status_achieved: err_msg = "Wait time out reached, while waiting for results" else: - err_msg = "Status {0} achieved successfully".format(status) + err_msg = f"Status {status} achieved successfully" return status_achieved, err_msg, stream @@ -361,14 +361,14 @@ def tags_action(client, stream_name, tags, action="create", check_mode=False): client.remove_tags_from_stream(**params) success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" else: if action == "create": success = True elif action == "delete": success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -461,14 +461,14 @@ def stream_action(client, stream_name, shard_count=1, action="create", timeout=3 client.delete_stream(**params) success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" else: if action == "create": success = True elif action == "delete": success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -519,14 +519,14 @@ def stream_encryption_action( client.stop_stream_encryption(**params) success = True else: - err_msg = "Invalid encryption action {0}".format(action) + err_msg = f"Invalid encryption action {action}" else: if action == "start_encryption": success = True elif action == "stop_encryption": success = True else: - err_msg = "Invalid encryption action {0}".format(action) + err_msg = f"Invalid encryption action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -567,21 +567,21 @@ def retention_action(client, stream_name, retention_period=24, action="increase" params["RetentionPeriodHours"] = retention_period client.increase_stream_retention_period(**params) success = True - err_msg = "Retention Period increased successfully to {0}".format(retention_period) + err_msg = f"Retention Period increased successfully to {retention_period}" elif action == "decrease": params["RetentionPeriodHours"] = retention_period client.decrease_stream_retention_period(**params) success = True - err_msg = "Retention Period decreased successfully to {0}".format(retention_period) + err_msg = f"Retention Period decreased successfully to {retention_period}" else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" else: if action == "increase": success = True elif action == "decrease": success = True else: - err_msg = "Invalid action {0}".format(action) + err_msg = f"Invalid action {action}" except botocore.exceptions.ClientError as e: err_msg = to_native(e) @@ -695,9 +695,7 @@ def update( ) elif retention_period == current_stream["RetentionPeriodHours"]: - retention_msg = "Retention {0} is the same as {1}".format( - retention_period, current_stream["RetentionPeriodHours"] - ) + retention_msg = f"Retention {retention_period} is the same as {current_stream['RetentionPeriodHours']}" success = True if retention_changed: @@ -715,13 +713,12 @@ def update( stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found: if current_stream["StreamStatus"] != "ACTIVE": - err_msg = "Retention Period for {0} is in the process of updating".format(stream_name) + err_msg = f"Retention Period for {stream_name} is in the process of updating" return success, changed, err_msg else: err_msg = ( - "StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}".format( - current_stream.get("StreamStatus", "UNKNOWN") - ) + "StreamStatus has to be ACTIVE in order to modify the retention period." + f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}" ) return success, changed, err_msg @@ -742,7 +739,7 @@ def update( else: stream_found, stream_msg, current_stream = find_stream(client, stream_name) if stream_found and current_stream["StreamStatus"] != "ACTIVE": - err_msg = "Number of shards for {0} is in the process of updating".format(stream_name) + err_msg = f"Number of shards for {stream_name} is in the process of updating" return success, changed, err_msg if tags: @@ -753,9 +750,9 @@ def update( client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) if success and changed: - err_msg = "Kinesis Stream {0} updated successfully.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} updated successfully." elif success and not changed: - err_msg = "Kinesis Stream {0} did not change.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} did not change." return success, changed, err_msg @@ -829,7 +826,7 @@ def create_stream( ) if not create_success: changed = True - err_msg = "Failed to create Kinesis stream: {0}".format(create_msg) + err_msg = f"Failed to create Kinesis stream: {create_msg}" return False, True, err_msg, {} else: changed = True @@ -837,11 +834,11 @@ def create_stream( wait_success, wait_msg, results = wait_for_status( client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - err_msg = "Kinesis Stream {0} is in the process of being created".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} is in the process of being created" if not wait_success: return wait_success, True, wait_msg, results else: - err_msg = "Kinesis Stream {0} created successfully".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} created successfully" if tags: changed, err_msg = tags_action(client, stream_name, tags, action="create", check_mode=check_mode) @@ -860,8 +857,9 @@ def create_stream( if not success: return success, changed, err_msg, results else: - err_msg = "StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}".format( - current_stream.get("StreamStatus", "UNKNOWN") + err_msg = ( + "StreamStatus has to be ACTIVE in order to modify the retention period." + f" Current status is {current_stream.get('StreamStatus', 'UNKNOWN')}" ) success = create_success changed = True @@ -916,15 +914,15 @@ def delete_stream(client, stream_name, wait=False, wait_timeout=300, check_mode= success, err_msg, results = wait_for_status( client, stream_name, "DELETING", wait_timeout, check_mode=check_mode ) - err_msg = "Stream {0} deleted successfully".format(stream_name) + err_msg = f"Stream {stream_name} deleted successfully" if not success: return success, True, err_msg, results else: - err_msg = "Stream {0} is in the process of being deleted".format(stream_name) + err_msg = f"Stream {stream_name} is in the process of being deleted" else: success = True changed = False - err_msg = "Stream {0} does not exist".format(stream_name) + err_msg = f"Stream {stream_name} does not exist" return success, changed, err_msg, results @@ -968,7 +966,7 @@ def start_stream_encryption( if current_stream.get("EncryptionType") == encryption_type and current_stream.get("KeyId") == key_id: changed = False success = True - err_msg = "Kinesis Stream {0} encryption already configured.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption already configured." else: success, err_msg = stream_encryption_action( client, @@ -984,15 +982,15 @@ def start_stream_encryption( success, err_msg, results = wait_for_status( client, stream_name, "ACTIVE", wait_timeout, check_mode=check_mode ) - err_msg = "Kinesis Stream {0} encryption started successfully.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption started successfully." if not success: return success, True, err_msg, results else: - err_msg = "Kinesis Stream {0} is in the process of starting encryption.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} is in the process of starting encryption." else: success = True changed = False - err_msg = "Kinesis Stream {0} does not exist".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} does not exist" if success: stream_found, stream_msg, results = find_stream(client, stream_name) @@ -1056,16 +1054,16 @@ def stop_stream_encryption( ) if not success: return success, True, err_msg, results - err_msg = "Kinesis Stream {0} encryption stopped successfully.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption stopped successfully." else: - err_msg = "Stream {0} is in the process of stopping encryption.".format(stream_name) + err_msg = f"Stream {stream_name} is in the process of stopping encryption." elif current_stream.get("EncryptionType") == "NONE": success = True - err_msg = "Kinesis Stream {0} encryption already stopped.".format(stream_name) + err_msg = f"Kinesis Stream {stream_name} encryption already stopped." else: success = True changed = False - err_msg = "Stream {0} does not exist.".format(stream_name) + err_msg = f"Stream {stream_name} does not exist." if success: stream_found, stream_msg, results = find_stream(client, stream_name) diff --git a/plugins/modules/lightsail.py b/plugins/modules/lightsail.py index 6fb83b26b1f..16b4338e7dc 100644 --- a/plugins/modules/lightsail.py +++ b/plugins/modules/lightsail.py @@ -229,8 +229,7 @@ def wait_for_instance_state(module, client, instance_name, states): module.fail_json_aws(e) else: module.fail_json( - msg='Timed out waiting for instance "{0}" to get to one of the following states -' - " {1}".format(instance_name, states) + msg=f'Timed out waiting for instance "{instance_name}" to get to one of the following states - {states}' ) diff --git a/plugins/modules/msk_cluster.py b/plugins/modules/msk_cluster.py index 960ae115bcb..aa0383294b2 100644 --- a/plugins/modules/msk_cluster.py +++ b/plugins/modules/msk_cluster.py @@ -301,7 +301,7 @@ def find_cluster_by_name(client, module, cluster_name): module.fail_json_aws(e, "Failed to find kafka cluster by name") if cluster_list: if len(cluster_list) != 1: - module.fail_json(msg="Found more than one cluster with name '{0}'".format(cluster_name)) + module.fail_json(msg=f"Found more than one cluster with name '{cluster_name}'") return cluster_list[0] return {} @@ -340,9 +340,7 @@ def wait_for_cluster_state(client, module, arn, state="ACTIVE"): if current_state == state: return if time.time() - start > timeout: - module.fail_json( - msg="Timeout waiting for cluster {0} (desired state is '{1}')".format(current_state, state) - ) + module.fail_json(msg=f"Timeout waiting for cluster {current_state} (desired state is '{state}')") time.sleep(check_interval) @@ -559,7 +557,7 @@ def create_or_update_cluster(client, module): try: update_method = getattr(client, options.get("update_method", "update_" + method)) except AttributeError as e: - module.fail_json_aws(e, "There is no update method 'update_{0}'".format(method)) + module.fail_json_aws(e, f"There is no update method 'update_{method}'") if options["current_value"] != options["target_value"]: changed = True @@ -575,9 +573,7 @@ def create_or_update_cluster(client, module): wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") else: module.fail_json( - msg="Cluster can be updated only in active state, current state is '{0}'. check cluster state or use wait option".format( - state - ) + msg=f"Cluster can be updated only in active state, current state is '{state}'. check cluster state or use wait option" ) try: response["changes"][method] = update_method( @@ -587,7 +583,7 @@ def create_or_update_cluster(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws(e, "Failed to update cluster via 'update_{0}'".format(method)) + module.fail_json_aws(e, f"Failed to update cluster via 'update_{method}'") if module.params["wait"]: wait_for_cluster_state(client, module, arn=cluster["ClusterArn"], state="ACTIVE") @@ -606,7 +602,7 @@ def update_cluster_tags(client, module, arn): try: existing_tags = client.list_tags_for_resource(ResourceArn=arn, aws_retry=True)["Tags"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to retrieve tags for cluster '{0}'".format(arn)) + module.fail_json_aws(e, msg=f"Unable to retrieve tags for cluster '{arn}'") tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) @@ -617,7 +613,7 @@ def update_cluster_tags(client, module, arn): if tags_to_add: client.tag_resource(ResourceArn=arn, Tags=tags_to_add, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to set tags for cluster '{0}'".format(arn)) + module.fail_json_aws(e, msg=f"Unable to set tags for cluster '{arn}'") changed = bool(tags_to_add) or bool(tags_to_remove) return changed @@ -761,7 +757,7 @@ def main(): ) if len(module.params["name"]) > 64: module.fail_json( - module.fail_json(msg='Cluster name "{0}" exceeds 64 character limit'.format(module.params["name"])) + module.fail_json(msg=f"Cluster name \"{module.params['name']}\" exceeds 64 character limit") ) changed, response = create_or_update_cluster(client, module) elif module.params["state"] == "absent": @@ -784,7 +780,7 @@ def main(): ) as e: module.fail_json_aws( e, - "Can not obtain information about cluster {0}".format(response["ClusterArn"]), + f"Can not obtain information about cluster {response['ClusterArn']}", ) module.exit_json( diff --git a/plugins/modules/msk_config.py b/plugins/modules/msk_config.py index 5b67cd9924f..864827eb610 100644 --- a/plugins/modules/msk_config.py +++ b/plugins/modules/msk_config.py @@ -107,7 +107,7 @@ def dict_to_prop(d): """convert dictionary to multi-line properties""" if len(d) == 0: return "" - return "\n".join("{0}={1}".format(k, v) for k, v in d.items()) + return "\n".join(f"{k}={v}" for k, v in d.items()) def prop_to_dict(p): @@ -149,7 +149,7 @@ def find_active_config(client, module): if len(active_configs) == 1: return active_configs[0] else: - module.fail_json_aws(msg="found more than one active config with name '{0}'".format(name)) + module.fail_json_aws(msg=f"found more than one active config with name '{name}'") return None diff --git a/plugins/modules/opensearch.py b/plugins/modules/opensearch.py index 88055d1a6dd..967f0c98d01 100644 --- a/plugins/modules/opensearch.py +++ b/plugins/modules/opensearch.py @@ -565,9 +565,7 @@ def upgrade_domain(client, module, source_version, target_engine_version): # Check the module parameters to determine if this is allowed or not. if not module.params.get("allow_intermediate_upgrades"): module.fail_json( - msg="Cannot upgrade from {0} to version {1}. The highest compatible version is {2}".format( - source_version, target_engine_version, next_version - ) + msg=f"Cannot upgrade from {source_version} to version {target_engine_version}. The highest compatible version is {next_version}" ) parameters = { @@ -591,15 +589,13 @@ def upgrade_domain(client, module, source_version, target_engine_version): # raised if it's not possible to upgrade to the target version. module.fail_json_aws( e, - msg="Couldn't upgrade domain {0} from {1} to {2}".format(domain_name, current_version, next_version), + msg=f"Couldn't upgrade domain {domain_name} from {current_version} to {next_version}", ) if module.check_mode: module.exit_json( changed=True, - msg="Would have upgraded domain from {0} to {1} if not in check mode".format( - current_version, next_version - ), + msg=f"Would have upgraded domain from {current_version} to {next_version} if not in check mode", ) current_version = next_version @@ -664,9 +660,7 @@ def set_cluster_config(module, current_domain_config, desired_domain_config, cha } if current_domain_config is not None and current_domain_config["ClusterConfig"] != cluster_config: - change_set.append( - "ClusterConfig changed from {0} to {1}".format(current_domain_config["ClusterConfig"], cluster_config) - ) + change_set.append(f"ClusterConfig changed from {current_domain_config['ClusterConfig']} to {cluster_config}") changed = True return changed @@ -693,7 +687,7 @@ def set_ebs_options(module, current_domain_config, desired_domain_config, change ebs_config["Iops"] = ebs_opts.get("iops") if current_domain_config is not None and current_domain_config["EBSOptions"] != ebs_config: - change_set.append("EBSOptions changed from {0} to {1}".format(current_domain_config["EBSOptions"], ebs_config)) + change_set.append(f"EBSOptions changed from {current_domain_config['EBSOptions']} to {ebs_config}") changed = True return changed @@ -719,10 +713,8 @@ def set_encryption_at_rest_options(module, current_domain_config, desired_domain and current_domain_config["EncryptionAtRestOptions"] != encryption_at_rest_config ): change_set.append( - "EncryptionAtRestOptions changed from {0} to {1}".format( - current_domain_config["EncryptionAtRestOptions"], - encryption_at_rest_config, - ) + f"EncryptionAtRestOptions changed from {current_domain_config['EncryptionAtRestOptions']} to" + f" {encryption_at_rest_config}" ) changed = True return changed @@ -742,10 +734,8 @@ def set_node_to_node_encryption_options(module, current_domain_config, desired_d and current_domain_config["NodeToNodeEncryptionOptions"] != node_to_node_encryption_config ): change_set.append( - "NodeToNodeEncryptionOptions changed from {0} to {1}".format( - current_domain_config["NodeToNodeEncryptionOptions"], - node_to_node_encryption_config, - ) + f"NodeToNodeEncryptionOptions changed from {current_domain_config['NodeToNodeEncryptionOptions']} to" + f" {node_to_node_encryption_config}" ) changed = True return changed @@ -805,18 +795,14 @@ def set_vpc_options(module, current_domain_config, desired_domain_config, change # Note the subnets may be the same but be listed in a different order. if set(current_domain_config["VPCOptions"]["SubnetIds"]) != set(vpc_config["SubnetIds"]): change_set.append( - "SubnetIds changed from {0} to {1}".format( - current_domain_config["VPCOptions"]["SubnetIds"], - vpc_config["SubnetIds"], - ) + f"SubnetIds changed from {current_domain_config['VPCOptions']['SubnetIds']} to" + f" {vpc_config['SubnetIds']}" ) changed = True if set(current_domain_config["VPCOptions"]["SecurityGroupIds"]) != set(vpc_config["SecurityGroupIds"]): change_set.append( - "SecurityGroup changed from {0} to {1}".format( - current_domain_config["VPCOptions"]["SecurityGroupIds"], - vpc_config["SecurityGroupIds"], - ) + f"SecurityGroup changed from {current_domain_config['VPCOptions']['SecurityGroupIds']} to" + f" {vpc_config['SecurityGroupIds']}" ) changed = True return changed @@ -857,9 +843,7 @@ def set_cognito_options(module, current_domain_config, desired_domain_config, ch cognito_config["RoleArn"] = cognito_opts.get("cognito_role_arn") if current_domain_config is not None and current_domain_config["CognitoOptions"] != cognito_config: - change_set.append( - "CognitoOptions changed from {0} to {1}".format(current_domain_config["CognitoOptions"], cognito_config) - ) + change_set.append(f"CognitoOptions changed from {current_domain_config['CognitoOptions']} to {cognito_config}") changed = True return changed @@ -922,10 +906,8 @@ def set_advanced_security_options(module, current_domain_config, desired_domain_ and current_domain_config["AdvancedSecurityOptions"] != advanced_security_config ): change_set.append( - "AdvancedSecurityOptions changed from {0} to {1}".format( - current_domain_config["AdvancedSecurityOptions"], - advanced_security_config, - ) + f"AdvancedSecurityOptions changed from {current_domain_config['AdvancedSecurityOptions']} to" + f" {advanced_security_config}" ) changed = True return changed @@ -953,9 +935,8 @@ def set_domain_endpoint_options(module, current_domain_config, desired_domain_co if current_domain_config is not None and current_domain_config["DomainEndpointOptions"] != domain_endpoint_config: change_set.append( - "DomainEndpointOptions changed from {0} to {1}".format( - current_domain_config["DomainEndpointOptions"], domain_endpoint_config - ) + f"DomainEndpointOptions changed from {current_domain_config['DomainEndpointOptions']} to" + f" {domain_endpoint_config}" ) changed = True return changed @@ -997,18 +978,15 @@ def set_auto_tune_options(module, current_domain_config, desired_domain_config, if current_domain_config is not None: if current_domain_config["AutoTuneOptions"]["DesiredState"] != auto_tune_config["DesiredState"]: change_set.append( - "AutoTuneOptions.DesiredState changed from {0} to {1}".format( - current_domain_config["AutoTuneOptions"]["DesiredState"], - auto_tune_config["DesiredState"], - ) + "AutoTuneOptions.DesiredState changed from" + f" {current_domain_config['AutoTuneOptions']['DesiredState']} to {auto_tune_config['DesiredState']}" ) changed = True if auto_tune_config["MaintenanceSchedules"] != current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"]: change_set.append( - "AutoTuneOptions.MaintenanceSchedules changed from {0} to {1}".format( - current_domain_config["AutoTuneOptions"]["MaintenanceSchedules"], - auto_tune_config["MaintenanceSchedules"], - ) + "AutoTuneOptions.MaintenanceSchedules changed from" + f" {current_domain_config['AutoTuneOptions']['MaintenanceSchedules']} to" + f" {auto_tune_config['MaintenanceSchedules']}" ) changed = True return changed @@ -1023,12 +1001,12 @@ def set_access_policy(module, current_domain_config, desired_domain_config, chan try: access_policy_config = json.dumps(access_policy_opt) except Exception as e: - module.fail_json(msg="Failed to convert the policy into valid JSON: %s" % str(e)) + module.fail_json(msg=f"Failed to convert the policy into valid JSON: {str(e)}") if current_domain_config is not None: # Updating existing domain current_access_policy = json.loads(current_domain_config["AccessPolicies"]) if not compare_policies(current_access_policy, access_policy_opt): - change_set.append("AccessPolicy changed from {0} to {1}".format(current_access_policy, access_policy_opt)) + change_set.append(f"AccessPolicy changed from {current_access_policy} to {access_policy_opt}") changed = True desired_domain_config["AccessPolicies"] = access_policy_config else: @@ -1134,7 +1112,7 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws(e, msg="Couldn't update domain {0}".format(domain_name)) + module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}") else: # Create new OpenSearch cluster @@ -1152,12 +1130,12 @@ def ensure_domain_present(client, module): botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, ) as e: - module.fail_json_aws(e, msg="Couldn't update domain {0}".format(domain_name)) + module.fail_json_aws(e, msg=f"Couldn't update domain {domain_name}") try: existing_tags = boto3_tag_list_to_ansible_dict(client.list_tags(ARN=domain_arn, aws_retry=True)["TagList"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for domain %s" % domain_name) + module.fail_json_aws(e, f"Couldn't get tags for domain {domain_name}") desired_tags = module.params["tags"] purge_tags = module.params["purge_tags"] diff --git a/plugins/modules/redshift.py b/plugins/modules/redshift.py index 61b9e3aeb4a..91993648de0 100644 --- a/plugins/modules/redshift.py +++ b/plugins/modules/redshift.py @@ -277,7 +277,7 @@ def _ensure_tags(redshift, identifier, existing_tags, module): account_id = get_aws_account_id(module) region = module.params.get("region") - resource_arn = "arn:aws:redshift:{0}:{1}:cluster:{2}".format(region, account_id, identifier) + resource_arn = f"arn:aws:redshift:{region}:{account_id}:cluster:{identifier}" tags = module.params.get("tags") purge_tags = module.params.get("purge_tags") @@ -565,7 +565,7 @@ def modify_cluster(module, redshift): redshift, ClusterIdentifier=identifier, EnhancedVpcRouting=module.params.get("enhanced_vpc_routing") ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") if wait: attempts = wait_timeout // 60 waiter = redshift.get_waiter("cluster_available") @@ -580,7 +580,7 @@ def modify_cluster(module, redshift): redshift, ClusterIdentifier=identifier, **snake_dict_to_camel_dict(params, capitalize_first=True) ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") if module.params.get("new_cluster_identifier"): identifier = module.params.get("new_cluster_identifier") @@ -595,7 +595,7 @@ def modify_cluster(module, redshift): try: resource = _describe_cluster(redshift, identifier) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Couldn't modify redshift cluster %s " % identifier) + module.fail_json_aws(e, msg=f"Couldn't modify redshift cluster {identifier} ") if _ensure_tags(redshift, identifier, resource["Tags"], module): resource = redshift.describe_clusters(ClusterIdentifier=identifier)["Clusters"][0] diff --git a/plugins/modules/redshift_cross_region_snapshots.py b/plugins/modules/redshift_cross_region_snapshots.py index f4d895cb1cb..d2894dfcba8 100644 --- a/plugins/modules/redshift_cross_region_snapshots.py +++ b/plugins/modules/redshift_cross_region_snapshots.py @@ -164,8 +164,7 @@ def run_module(): if module.params.get("state") == "present": if requesting_unsupported_modifications(current_config, module.params): message = ( - "Cannot modify destination_region or grant_name. " - "Please disable cross-region snapshots, and re-run." + "Cannot modify destination_region or grant_name. Please disable cross-region snapshots, and re-run." ) module.fail_json(msg=message, **result) if needs_update(current_config, module.params): diff --git a/plugins/modules/s3_bucket_notification.py b/plugins/modules/s3_bucket_notification.py index 9ba6e5e6799..1045164dce3 100644 --- a/plugins/modules/s3_bucket_notification.py +++ b/plugins/modules/s3_bucket_notification.py @@ -184,7 +184,7 @@ def full_config(self): try: config_lookup = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg="{0}".format(e)) + self.module.fail_json(msg=f"{e}") # Handle different event targets if config_lookup.get("QueueConfigurations"): @@ -251,7 +251,7 @@ def _upload_bucket_config(self, configs): try: self.client.put_bucket_notification_configuration(**api_params) except (ClientError, BotoCoreError) as e: - self.module.fail_json(msg="{0}".format(e)) + self.module.fail_json(msg=f"{e}") class Config: @@ -299,7 +299,7 @@ def from_params(cls, **params): elif params["lambda_alias"]: qualifier = str(params["lambda_alias"]) if qualifier: - params["lambda_function_arn"] = "{0}:{1}".format(function_arn, qualifier) + params["lambda_function_arn"] = f"{function_arn}:{qualifier}" bucket_event_params["LambdaFunctionArn"] = params["lambda_function_arn"] diff --git a/plugins/modules/s3_cors.py b/plugins/modules/s3_cors.py index 0d92ba56eac..d153c7df823 100644 --- a/plugins/modules/s3_cors.py +++ b/plugins/modules/s3_cors.py @@ -127,7 +127,7 @@ def create_or_update_bucket_cors(connection, module): try: cors = connection.put_bucket_cors(Bucket=name, CORSConfiguration={"CORSRules": new_camel_rules}) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to update CORS for bucket {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to update CORS for bucket {name}") module.exit_json(changed=changed, name=name, rules=rules) @@ -140,7 +140,7 @@ def destroy_bucket_cors(connection, module): cors = connection.delete_bucket_cors(Bucket=name) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete CORS for bucket {0}".format(name)) + module.fail_json_aws(e, msg=f"Unable to delete CORS for bucket {name}") module.exit_json(changed=changed) diff --git a/plugins/modules/s3_lifecycle.py b/plugins/modules/s3_lifecycle.py index 24517b1e372..27f1179688d 100644 --- a/plugins/modules/s3_lifecycle.py +++ b/plugins/modules/s3_lifecycle.py @@ -661,7 +661,7 @@ def main(): if module.params.get(param) is None: break else: - msg = "one of the following is required when 'state' is 'present': %s" % ", ".join(required_when_present) + msg = f"one of the following is required when 'state' is 'present': {', '.join(required_when_present)}" module.fail_json(msg=msg) # If dates have been set, make sure they're in a valid format diff --git a/plugins/modules/s3_logging.py b/plugins/modules/s3_logging.py index b2eda67d135..193455a4be2 100644 --- a/plugins/modules/s3_logging.py +++ b/plugins/modules/s3_logging.py @@ -91,7 +91,7 @@ def verify_acls(connection, module, target_bucket): current_acl = connection.get_bucket_acl(aws_retry=True, Bucket=target_bucket) current_grants = current_acl["Grants"] except is_boto3_error_code("NoSuchBucket"): - module.fail_json(msg="Target Bucket '{0}' not found".format(target_bucket)) + module.fail_json(msg=f"Target Bucket '{target_bucket}' not found") except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, @@ -132,7 +132,7 @@ def enable_bucket_logging(connection, module): try: bucket_logging = connection.get_bucket_logging(aws_retry=True, Bucket=bucket_name) except is_boto3_error_code("NoSuchBucket"): - module.fail_json(msg="Bucket '{0}' not found".format(bucket_name)) + module.fail_json(msg=f"Bucket '{bucket_name}' not found") except ( botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError, diff --git a/plugins/modules/s3_metrics_configuration.py b/plugins/modules/s3_metrics_configuration.py index 90429ca64b4..d90e7d0e603 100644 --- a/plugins/modules/s3_metrics_configuration.py +++ b/plugins/modules/s3_metrics_configuration.py @@ -153,7 +153,7 @@ def create_or_update_metrics_configuration(client, module): aws_retry=True, Bucket=bucket_name, Id=mc_id, MetricsConfiguration=new_configuration ) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to put bucket metrics configuration '%s'" % mc_id) + module.fail_json_aws(e, msg=f"Failed to put bucket metrics configuration '{mc_id}'") module.exit_json(changed=True) @@ -177,7 +177,7 @@ def delete_metrics_configuration(client, module): except is_boto3_error_code("NoSuchConfiguration"): module.exit_json(changed=False) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to delete bucket metrics configuration '%s'" % mc_id) + module.fail_json_aws(e, msg=f"Failed to delete bucket metrics configuration '{mc_id}'") module.exit_json(changed=True) diff --git a/plugins/modules/s3_sync.py b/plugins/modules/s3_sync.py index efc07efb150..36809ed2f75 100644 --- a/plugins/modules/s3_sync.py +++ b/plugins/modules/s3_sync.py @@ -413,8 +413,8 @@ def filter_list(s3, bucket, s3filelist, strategy): remote_size = entry["s3_head"]["ContentLength"] - entry["whytime"] = "{0} / {1}".format(local_modified_epoch, remote_modified_epoch) - entry["whysize"] = "{0} / {1}".format(local_size, remote_size) + entry["whytime"] = f"{local_modified_epoch} / {remote_modified_epoch}" + entry["whysize"] = f"{local_size} / {remote_size}" if local_modified_epoch <= remote_modified_epoch and local_size == remote_size: entry["skip_flag"] = True diff --git a/plugins/modules/secretsmanager_secret.py b/plugins/modules/secretsmanager_secret.py index f611d600967..1a1340df723 100644 --- a/plugins/modules/secretsmanager_secret.py +++ b/plugins/modules/secretsmanager_secret.py @@ -366,7 +366,7 @@ def put_resource_policy(self, secret): try: json.loads(secret.secret_resource_policy_args.get("ResourcePolicy")) except (TypeError, ValueError) as e: - self.module.fail_json(msg="Failed to parse resource policy as JSON: %s" % (str(e)), exception=format_exc()) + self.module.fail_json(msg=f"Failed to parse resource policy as JSON: {str(e)}", exception=format_exc()) try: response = self.client.put_resource_policy(**secret.secret_resource_policy_args) diff --git a/plugins/modules/ses_identity.py b/plugins/modules/ses_identity.py index 7a966da4a48..e324a7e12f7 100644 --- a/plugins/modules/ses_identity.py +++ b/plugins/modules/ses_identity.py @@ -242,9 +242,7 @@ def get_verification_attributes(connection, module, identity, retries=0, retryDe try: response = connection.get_identity_verification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, msg="Failed to retrieve identity verification attributes for {identity}".format(identity=identity) - ) + module.fail_json_aws(e, msg=f"Failed to retrieve identity verification attributes for {identity}") identity_verification = response["VerificationAttributes"] if identity in identity_verification: break @@ -265,9 +263,7 @@ def get_identity_notifications(connection, module, identity, retries=0, retryDel try: response = connection.get_identity_notification_attributes(Identities=[identity], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, msg="Failed to retrieve identity notification attributes for {identity}".format(identity=identity) - ) + module.fail_json_aws(e, msg=f"Failed to retrieve identity notification attributes for {identity}") notification_attributes = response["NotificationAttributes"] # No clear AWS docs on when this happens, but it appears sometimes identities are not included in @@ -341,10 +337,7 @@ def update_notification_topic(connection, module, identity, identity_notificatio except (BotoCoreError, ClientError) as e: module.fail_json_aws( e, - msg="Failed to set identity notification topic for {identity} {notification_type}".format( - identity=identity, - notification_type=notification_type, - ), + msg=f"Failed to set identity notification topic for {identity} {notification_type}", ) return True return False @@ -378,11 +371,7 @@ def update_notification_topic_headers(connection, module, identity, identity_not ) except (BotoCoreError, ClientError) as e: module.fail_json_aws( - e, - msg="Failed to set identity headers in notification for {identity} {notification_type}".format( - identity=identity, - notification_type=notification_type, - ), + e, msg=f"Failed to set identity headers in notification for {identity} {notification_type}" ) return True return False @@ -411,9 +400,7 @@ def update_feedback_forwarding(connection, module, identity, identity_notificati Identity=identity, ForwardingEnabled=required, aws_retry=True ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, msg="Failed to set identity feedback forwarding for {identity}".format(identity=identity) - ) + module.fail_json_aws(e, msg=f"Failed to set identity feedback forwarding for {identity}") return True return False @@ -460,8 +447,10 @@ def validate_params_for_identity_present(module): if module.params.get("feedback_forwarding") is False: if not (desired_topic(module, "Bounce") and desired_topic(module, "Complaint")): module.fail_json( - msg="Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " - "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics" + msg=( + "Invalid Parameter Value 'False' for 'feedback_forwarding'. AWS requires " + "feedback forwarding to be enabled unless bounces and complaints are handled by SNS topics" + ) ) @@ -477,7 +466,7 @@ def create_or_update_identity(connection, module, region, account_id): else: connection.verify_domain_identity(Domain=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to verify identity {identity}".format(identity=identity)) + module.fail_json_aws(e, msg=f"Failed to verify identity {identity}") if module.check_mode: verification_attributes = { "VerificationStatus": "Pending", @@ -520,7 +509,7 @@ def destroy_identity(connection, module): if not module.check_mode: connection.delete_identity(Identity=identity, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete identity {identity}".format(identity=identity)) + module.fail_json_aws(e, msg=f"Failed to delete identity {identity}") changed = True module.exit_json( diff --git a/plugins/modules/ses_identity_policy.py b/plugins/modules/ses_identity_policy.py index a28d027549a..9b7a3d6b6fa 100644 --- a/plugins/modules/ses_identity_policy.py +++ b/plugins/modules/ses_identity_policy.py @@ -101,7 +101,7 @@ def get_identity_policy(connection, module, identity, policy_name): try: response = connection.get_identity_policies(Identity=identity, PolicyNames=[policy_name], aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to retrieve identity policy {policy}".format(policy=policy_name)) + module.fail_json_aws(e, msg=f"Failed to retrieve identity policy {policy_name}") policies = response["Policies"] if policy_name in policies: return policies[policy_name] @@ -125,7 +125,7 @@ def create_or_update_identity_policy(connection, module): Identity=identity, PolicyName=policy_name, Policy=required_policy, aws_retry=True ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to put identity policy {policy}".format(policy=policy_name)) + module.fail_json_aws(e, msg=f"Failed to put identity policy {policy_name}") # Load the list of applied policies to include in the response. # In principle we should be able to just return the response, but given @@ -162,7 +162,7 @@ def delete_identity_policy(connection, module): if not module.check_mode: connection.delete_identity_policy(Identity=identity, PolicyName=policy_name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete identity policy {policy}".format(policy=policy_name)) + module.fail_json_aws(e, msg=f"Failed to delete identity policy {policy_name}") changed = True policies_present = list(policies_present) policies_present.remove(policy_name) diff --git a/plugins/modules/ses_rule_set.py b/plugins/modules/ses_rule_set.py index 9915622ed7d..8d09965774f 100644 --- a/plugins/modules/ses_rule_set.py +++ b/plugins/modules/ses_rule_set.py @@ -155,7 +155,7 @@ def update_active_rule_set(client, module, name, desired_active): try: client.set_active_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't set active rule set to {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't set active rule set to {name}.") changed = True active = True elif not desired_active and active: @@ -177,7 +177,7 @@ def create_or_update_rule_set(client, module): try: client.create_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't create rule set {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't create rule set {name}.") changed = True rule_sets = list(rule_sets) rule_sets.append( @@ -206,12 +206,13 @@ def remove_rule_set(client, module): active = ruleset_active(client, module, name) if active and not module.params.get("force"): module.fail_json( - msg="Couldn't delete rule set {0} because it is currently active. Set force=true to delete an active ruleset.".format( - name + msg=( + f"Couldn't delete rule set {name} because it is currently active. Set force=true to delete an" + " active ruleset." ), error={ "code": "CannotDelete", - "message": "Cannot delete active rule set: {0}".format(name), + "message": f"Cannot delete active rule set: {name}", }, ) if not check_mode: @@ -220,7 +221,7 @@ def remove_rule_set(client, module): try: client.delete_receipt_rule_set(RuleSetName=name, aws_retry=True) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete rule set {0}.".format(name)) + module.fail_json_aws(e, msg=f"Couldn't delete rule set {name}.") changed = True rule_sets = [x for x in rule_sets if x["Name"] != name] diff --git a/plugins/modules/sns.py b/plugins/modules/sns.py index 53c63a05645..493855b76e0 100644 --- a/plugins/modules/sns.py +++ b/plugins/modules/sns.py @@ -226,7 +226,7 @@ def main(): sns_kwargs["TopicArn"] = topic_arn_lookup(client, module, topic) if not sns_kwargs["TopicArn"]: - module.fail_json(msg="Could not find topic: {0}".format(topic)) + module.fail_json(msg=f"Could not find topic: {topic}") if sns_kwargs["MessageStructure"] == "json": sns_kwargs["Message"] = json.dumps(dict_msg) diff --git a/plugins/modules/sns_topic.py b/plugins/modules/sns_topic.py index 90929a476ea..c99b7580663 100644 --- a/plugins/modules/sns_topic.py +++ b/plugins/modules/sns_topic.py @@ -413,7 +413,7 @@ def _create_topic(self): try: response = self.connection.create_topic(Name=self.name, Attributes=attributes, Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't create topic %s" % self.name) + self.module.fail_json_aws(e, msg=f"Couldn't create topic {self.name}") self.topic_arn = response["TopicArn"] return True @@ -422,7 +422,7 @@ def _set_topic_attrs(self): try: topic_attributes = self.connection.get_topic_attributes(TopicArn=self.topic_arn)["Attributes"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't get topic attributes for topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't get topic attributes for topic {self.topic_arn}") if self.display_name and self.display_name != topic_attributes["DisplayName"]: changed = True @@ -509,7 +509,7 @@ def _set_topic_subs(self): try: self.connection.subscribe(TopicArn=self.topic_arn, Protocol=protocol, Endpoint=endpoint) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't subscribe to topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't subscribe to topic {self.topic_arn}") return changed def _init_desired_subscription_attributes(self): @@ -537,7 +537,7 @@ def _set_topic_subs_attributes(self): "Attributes" ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, "Couldn't get subscription attributes for subscription %s" % sub_arn) + self.module.fail_json_aws(e, f"Couldn't get subscription attributes for subscription {sub_arn}") raw_message = self.desired_subscription_attributes[sub_key].get("RawMessageDelivery") if raw_message is not None and "RawMessageDelivery" in sub_current_attributes: @@ -575,7 +575,7 @@ def _delete_topic(self): try: self.connection.delete_topic(TopicArn=self.topic_arn) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Couldn't delete topic %s" % self.topic_arn) + self.module.fail_json_aws(e, msg=f"Couldn't delete topic {self.topic_arn}") return True def _name_is_arn(self): diff --git a/plugins/modules/stepfunctions_state_machine.py b/plugins/modules/stepfunctions_state_machine.py index 4bbd1503ab8..a2558c8085c 100644 --- a/plugins/modules/stepfunctions_state_machine.py +++ b/plugins/modules/stepfunctions_state_machine.py @@ -130,7 +130,7 @@ def create(sfn_client, module): def remove(state_machine_arn, sfn_client, module): - check_mode(module, msg="State machine would be deleted: {0}".format(state_machine_arn), changed=True) + check_mode(module, msg=f"State machine would be deleted: {state_machine_arn}", changed=True) sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) module.exit_json(changed=True, state_machine_arn=state_machine_arn) @@ -140,7 +140,7 @@ def update(state_machine_arn, sfn_client, module): tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module) if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove: - check_mode(module, msg="State machine would be updated: {0}".format(state_machine_arn), changed=True) + check_mode(module, msg=f"State machine would be updated: {state_machine_arn}", changed=True) sfn_client.update_state_machine( stateMachineArn=state_machine_arn, diff --git a/plugins/modules/waf_condition.py b/plugins/modules/waf_condition.py index efbb17e2cf8..b1baae378e8 100644 --- a/plugins/modules/waf_condition.py +++ b/plugins/modules/waf_condition.py @@ -606,7 +606,7 @@ def list_conditions(self): try: return func()[self.conditionsets] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg="Could not list %s conditions" % self.type) + self.module.fail_json_aws(e, msg=f"Could not list {self.type} conditions") def tidy_up_regex_patterns(self, regex_match_set): all_regex_match_sets = self.list_conditions() @@ -643,7 +643,7 @@ def find_and_delete_condition(self, condition_set_id): in_use_rules = self.find_condition_in_rules(condition_set_id) if in_use_rules: rulenames = ", ".join(in_use_rules) - self.module.fail_json(msg="Condition %s is in use by %s" % (current_condition["Name"], rulenames)) + self.module.fail_json(msg=f"Condition {current_condition['Name']} is in use by {rulenames}") if current_condition[self.conditiontuples]: # Filters are deleted using update with the DELETE action func = getattr(self.client, "update_" + self.method_suffix) diff --git a/plugins/modules/waf_info.py b/plugins/modules/waf_info.py index ea294c92ed4..711d1d8de74 100644 --- a/plugins/modules/waf_info.py +++ b/plugins/modules/waf_info.py @@ -134,7 +134,7 @@ def main(): if name: web_acls = [web_acl for web_acl in web_acls if web_acl["Name"] == name] if not web_acls: - module.fail_json(msg="WAF named %s not found" % name) + module.fail_json(msg=f"WAF named {name} not found") module.exit_json(wafs=[get_web_acl(client, module, web_acl["WebACLId"]) for web_acl in web_acls]) diff --git a/plugins/modules/waf_rule.py b/plugins/modules/waf_rule.py index 98064dd8ca4..28ff981623d 100644 --- a/plugins/modules/waf_rule.py +++ b/plugins/modules/waf_rule.py @@ -210,7 +210,7 @@ def find_and_update_rule(client, module, rule_id): try: pred_results = func()[MATCH_LOOKUP[condition_type]["conditionset"] + "s"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not list %s conditions" % condition_type) + module.fail_json_aws(e, msg=f"Could not list {condition_type} conditions") for pred in pred_results: pred["DataId"] = pred[MATCH_LOOKUP[condition_type]["conditionset"] + "Id"] all_conditions[condition_type][pred["Name"]] = camel_dict_to_snake_dict(pred) @@ -231,7 +231,7 @@ def find_and_update_rule(client, module, rule_id): for condition_type in desired_conditions: for condition_name, condition in desired_conditions[condition_type].items(): if condition_name not in all_conditions[condition_type]: - module.fail_json(msg="Condition %s of type %s does not exist" % (condition_name, condition_type)) + module.fail_json(msg=f"Condition {condition_name} of type {condition_type} does not exist") condition["data_id"] = all_conditions[condition_type][condition_name]["data_id"] if condition["data_id"] not in existing_conditions[condition_type]: insertions.append(format_for_insertion(condition)) @@ -326,7 +326,7 @@ def ensure_rule_absent(client, module): in_use_web_acls = find_rule_in_web_acls(client, module, rule_id) if in_use_web_acls: web_acl_names = ", ".join(in_use_web_acls) - module.fail_json(msg="Rule %s is in use by Web ACL(s) %s" % (module.params["name"], web_acl_names)) + module.fail_json(msg=f"Rule {module.params['name']} is in use by Web ACL(s) {web_acl_names}") if rule_id: remove_rule_conditions(client, module, rule_id) try: diff --git a/plugins/modules/waf_web_acl.py b/plugins/modules/waf_web_acl.py index 4b71231aec9..dd78a2778a5 100644 --- a/plugins/modules/waf_web_acl.py +++ b/plugins/modules/waf_web_acl.py @@ -205,7 +205,7 @@ def get_web_acl(client, module, web_acl_id): try: return client.get_web_acl(WebACLId=web_acl_id)["WebACL"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Could not get Web ACL with id %s" % web_acl_id) + module.fail_json_aws(e, msg=f"Could not get Web ACL with id {web_acl_id}") def list_web_acls( diff --git a/tests/unit/mock/loader.py b/tests/unit/mock/loader.py index 524870cfacc..30a53946bc3 100644 --- a/tests/unit/mock/loader.py +++ b/tests/unit/mock/loader.py @@ -51,7 +51,7 @@ def _get_file_contents(self, file_name): if file_name in self._file_mapping: return (to_bytes(self._file_mapping[file_name]), False) else: - raise AnsibleParserError("file not found: %s" % file_name) + raise AnsibleParserError(f"file not found: {file_name}") def path_exists(self, path): path = to_text(path) diff --git a/tests/unit/plugins/modules/test_acm_certificate.py b/tests/unit/plugins/modules/test_acm_certificate.py index 95f669f7c42..bb40e4413f2 100644 --- a/tests/unit/plugins/modules/test_acm_certificate.py +++ b/tests/unit/plugins/modules/test_acm_certificate.py @@ -50,7 +50,7 @@ def test_chain_compare(): pprint(expected) print("Actual:") pprint(actual) - raise AssertionError("Failed to properly split %s" % fname) + raise AssertionError(f"Failed to properly split {fname}") # Now test real chains # chains with same same_as should be considered equal @@ -92,10 +92,10 @@ def test_chain_compare(): print(chain["pem_text"]) print("Cert after split") pprint(chain["split"]) - print("path: %s" % chain["path"]) - print("Expected chain length: %d" % chain["length"]) - print("Actual chain length: %d" % len(chain["split"])) - raise AssertionError("Chain %s was not split properly" % chain["path"]) + print(f"path: {chain['path']}") + print(f"Expected chain length: {int(chain['length'])}") + print(f"Actual chain length: {len(chain['split'])}") + raise AssertionError(f"Chain {chain['path']} was not split properly") for chain_a in test_chains: for chain_b in test_chains: @@ -104,6 +104,6 @@ def test_chain_compare(): # Now test the comparison function actual = chain_compare(module, chain_a["pem_text"], chain_b["pem_text"]) if expected != actual: - print("Error, unexpected comparison result between \n%s\nand\n%s" % (chain_a["path"], chain_b["path"])) - print("Expected %s got %s" % (str(expected), str(actual))) + print(f"Error, unexpected comparison result between \n{chain_a['path']}\nand\n{chain_b['path']}") + print(f"Expected {str(expected)} got {str(actual)}") assert expected == actual diff --git a/tests/unit/plugins/modules/test_ec2_vpc_vpn.py b/tests/unit/plugins/modules/test_ec2_vpc_vpn.py index 6d6f7799b6d..06be32db6db 100644 --- a/tests/unit/plugins/modules/test_ec2_vpc_vpn.py +++ b/tests/unit/plugins/modules/test_ec2_vpc_vpn.py @@ -357,9 +357,7 @@ def test_check_for_update_nonmodifiable_attr(placeboify, maybe_sleep): # update a parameter that isn't modifiable m.params.update(vpn_gateway_id="invalidchange") - expected_message = "You cannot modify vpn_gateway_id, the current value of which is {0}. Modifiable VPN connection attributes are".format( - current_vgw - ) + expected_message = f"You cannot modify vpn_gateway_id, the current value of which is {current_vgw}. Modifiable VPN connection attributes are" with pytest.raises(ec2_vpc_vpn.VPNConnectionException, match=expected_message): ec2_vpc_vpn.check_for_update(conn, m.params, vpn["VpnConnectionId"])