diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e2241a53c..291b9cf51 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,16 +18,11 @@ repos: rev: v1.1.2 hooks: - id: doc8 - - repo: https://github.com/asottile/pyupgrade - rev: v3.19.0 - hooks: - - id: pyupgrade - args: ['--py38-plus'] - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.7.3 hooks: - id: ruff - args: ['--fix'] + args: ['--fix', '--unsafe-fixes'] - id: ruff-format - repo: https://opendev.org/openstack/hacking rev: 7.0.0 diff --git a/examples/compute/create.py b/examples/compute/create.py index d4250efec..dc68af1ae 100644 --- a/examples/compute/create.py +++ b/examples/compute/create.py @@ -46,7 +46,7 @@ def create_keypair(conn): raise e with open(PRIVATE_KEYPAIR_FILE, 'w') as f: - f.write("%s" % keypair.private_key) + f.write(str(keypair.private_key)) os.chmod(PRIVATE_KEYPAIR_FILE, 0o400) @@ -71,8 +71,4 @@ def create_server(conn): server = conn.compute.wait_for_server(server) - print( - "ssh -i {key} root@{ip}".format( - key=PRIVATE_KEYPAIR_FILE, ip=server.access_ipv4 - ) - ) + print(f"ssh -i {PRIVATE_KEYPAIR_FILE} root@{server.access_ipv4}") diff --git a/openstack/__main__.py b/openstack/__main__.py index cf3fcb34f..631429cd4 100644 --- a/openstack/__main__.py +++ b/openstack/__main__.py @@ -20,8 +20,9 @@ import pbr.version def show_version(args): print( - "OpenstackSDK Version %s" - % pbr.version.VersionInfo('openstacksdk').version_string_with_vcs() + "OpenstackSDK Version {}".format( + pbr.version.VersionInfo('openstacksdk').version_string_with_vcs() + ) ) diff --git a/openstack/accelerator/v2/deployable.py b/openstack/accelerator/v2/deployable.py index 27873937e..910caa3e9 100644 --- a/openstack/accelerator/v2/deployable.py +++ b/openstack/accelerator/v2/deployable.py @@ -64,7 +64,7 @@ class Deployable(resource.Resource): call = getattr(session, method.lower()) except AttributeError: raise exceptions.ResourceFailure( - "Invalid commit method: %s" % method + f"Invalid commit method: {method}" ) request.url = request.url + "/program" diff --git a/openstack/baremetal/configdrive.py b/openstack/baremetal/configdrive.py index 24bedba82..ebfa63cdc 100644 --- a/openstack/baremetal/configdrive.py +++ b/openstack/baremetal/configdrive.py @@ -67,9 +67,7 @@ def populate_directory( # Strictly speaking, user data is binary, but in many cases # it's actually a text (cloud-init, ignition, etc). flag = 't' if isinstance(user_data, str) else 'b' - with open( - os.path.join(subdir, 'user_data'), 'w%s' % flag - ) as fp: + with open(os.path.join(subdir, 'user_data'), f'w{flag}') as fp: fp.write(user_data) yield d @@ -147,15 +145,14 @@ def pack(path: str) -> str: raise RuntimeError( 'Error generating the configdrive. Make sure the ' '"genisoimage", "mkisofs" or "xorrisofs" tool is installed. ' - 'Error: %s' % error + f'Error: {error}' ) stdout, stderr = p.communicate() if p.returncode != 0: raise RuntimeError( 'Error generating the configdrive.' - 'Stdout: "%(stdout)s". Stderr: "%(stderr)s"' - % {'stdout': stdout.decode(), 'stderr': stderr.decode()} + f'Stdout: "{stdout.decode()}". Stderr: "{stderr.decode()}"' ) tmpfile.seek(0) diff --git a/openstack/baremetal/v1/_proxy.py b/openstack/baremetal/v1/_proxy.py index 7cb2b5f45..38907b460 100644 --- a/openstack/baremetal/v1/_proxy.py +++ b/openstack/baremetal/v1/_proxy.py @@ -63,9 +63,7 @@ class Proxy(proxy.Proxy): kwargs['fields'] = _common.fields_type(fields, resource_type) return res.fetch( self, - error_message="No {resource_type} found for {value}".format( - resource_type=resource_type.__name__, value=value - ), + error_message=f"No {resource_type.__name__} found for {value}", **kwargs, ) @@ -560,9 +558,8 @@ class Proxy(proxy.Proxy): try: for count in utils.iterate_timeout( timeout, - "Timeout waiting for nodes %(nodes)s to reach " - "target state '%(state)s'" - % {'nodes': log_nodes, 'state': expected_state}, + f"Timeout waiting for nodes {log_nodes} to reach " + f"target state '{expected_state}'", ): nodes = [self.get_node(n) for n in remaining] remaining = [] diff --git a/openstack/baremetal/v1/allocation.py b/openstack/baremetal/v1/allocation.py index d4f78163e..eb0dad8d9 100644 --- a/openstack/baremetal/v1/allocation.py +++ b/openstack/baremetal/v1/allocation.py @@ -93,14 +93,13 @@ class Allocation(_common.Resource): return self for count in utils.iterate_timeout( - timeout, "Timeout waiting for the allocation %s" % self.id + timeout, f"Timeout waiting for the allocation {self.id}" ): self.fetch(session) if self.state == 'error' and not ignore_error: raise exceptions.ResourceFailure( - "Allocation %(allocation)s failed: %(error)s" - % {'allocation': self.id, 'error': self.last_error} + f"Allocation {self.id} failed: {self.last_error}" ) elif self.state != 'allocating': return self diff --git a/openstack/baremetal/v1/driver.py b/openstack/baremetal/v1/driver.py index cea8f73de..d8a9e97fe 100644 --- a/openstack/baremetal/v1/driver.py +++ b/openstack/baremetal/v1/driver.py @@ -188,8 +188,6 @@ class Driver(resource.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed call to method {method} on driver {driver_name}".format( - method=method, driver_name=self.name - ) + msg = f"Failed call to method {method} on driver {self.name}" exceptions.raise_from_response(response, error_message=msg) return response diff --git a/openstack/baremetal/v1/node.py b/openstack/baremetal/v1/node.py index 06fc452ae..b7d203ae1 100644 --- a/openstack/baremetal/v1/node.py +++ b/openstack/baremetal/v1/node.py @@ -324,9 +324,8 @@ class Node(_common.Resource): microversion = _common.STATE_VERSIONS[expected_provision_state] except KeyError: raise ValueError( - "Node's provision_state must be one of %s for creation, " - "got %s" - % ( + "Node's provision_state must be one of {} for creation, " + "got {}".format( ', '.join(_common.STATE_VERSIONS), expected_provision_state, ) @@ -334,7 +333,7 @@ class Node(_common.Resource): else: error_message = ( "Cannot create a node with initial provision " - "state %s" % expected_provision_state + f"state {expected_provision_state}" ) # Nodes cannot be created as available using new API versions maximum = ( @@ -546,8 +545,8 @@ class Node(_common.Resource): expected_state = _common.EXPECTED_STATES[target] except KeyError: raise ValueError( - 'For target %s the expected state is not ' - 'known, cannot wait for it' % target + f'For target {target} the expected state is not ' + 'known, cannot wait for it' ) request = self._prepare_request(requires_id=True) @@ -561,8 +560,8 @@ class Node(_common.Resource): ) msg = ( - "Failed to set provision state for bare metal node {node} " - "to {target}".format(node=self.id, target=target) + f"Failed to set provision state for bare metal node {self.id} " + f"to {target}" ) exceptions.raise_from_response(response, error_message=msg) @@ -588,9 +587,8 @@ class Node(_common.Resource): """ for count in utils.iterate_timeout( timeout, - "Timeout waiting for node %(node)s to reach " - "power state '%(state)s'" - % {'node': self.id, 'state': expected_state}, + f"Timeout waiting for node {self.id} to reach " + f"power state '{expected_state}'", ): self.fetch(session) if self.power_state == expected_state: @@ -629,9 +627,8 @@ class Node(_common.Resource): """ for count in utils.iterate_timeout( timeout, - "Timeout waiting for node %(node)s to reach " - "target state '%(state)s'" - % {'node': self.id, 'state': expected_state}, + f"Timeout waiting for node {self.id} to reach " + f"target state '{expected_state}'", ): self.fetch(session) if self._check_state_reached( @@ -677,7 +674,7 @@ class Node(_common.Resource): for count in utils.iterate_timeout( timeout, - "Timeout waiting for the lock to be released on node %s" % self.id, + f"Timeout waiting for the lock to be released on node {self.id}", ): self.fetch(session) if self.reservation is None: @@ -719,13 +716,8 @@ class Node(_common.Resource): or self.provision_state == 'error' ): raise exceptions.ResourceFailure( - "Node %(node)s reached failure state \"%(state)s\"; " - "the last error is %(error)s" - % { - 'node': self.id, - 'state': self.provision_state, - 'error': self.last_error, - } + f"Node {self.id} reached failure state \"{self.provision_state}\"; " + f"the last error is {self.last_error}" ) # Special case: a failure state for "manage" transition can be # "enroll" @@ -735,10 +727,9 @@ class Node(_common.Resource): and self.last_error ): raise exceptions.ResourceFailure( - "Node %(node)s could not reach state manageable: " + f"Node {self.id} could not reach state manageable: " "failed to verify management credentials; " - "the last error is %(error)s" - % {'node': self.id, 'error': self.last_error} + f"the last error is {self.last_error}" ) def inject_nmi(self, session): @@ -789,8 +780,8 @@ class Node(_common.Resource): expected = _common.EXPECTED_POWER_STATES[target] except KeyError: raise ValueError( - "Cannot use target power state %s with wait, " - "the expected state is not known" % target + f"Cannot use target power state {target} with wait, " + "the expected state is not known" ) session = self._get_session(session) @@ -816,8 +807,8 @@ class Node(_common.Resource): ) msg = ( - "Failed to set power state for bare metal node {node} " - "to {target}".format(node=self.id, target=target) + f"Failed to set power state for bare metal node {self.id} " + f"to {target}" ) exceptions.raise_from_response(response, error_message=msg) @@ -893,9 +884,7 @@ class Node(_common.Resource): retriable_status_codes=retriable_status_codes, ) - msg = "Failed to attach VIF {vif} to bare metal node {node}".format( - node=self.id, vif=vif_id - ) + msg = f"Failed to attach VIF {vif_id} to bare metal node {self.id}" exceptions.raise_from_response(response, error_message=msg) def detach_vif(self, session, vif_id, ignore_missing=True): @@ -940,9 +929,7 @@ class Node(_common.Resource): ) return False - msg = "Failed to detach VIF {vif} from bare metal node {node}".format( - node=self.id, vif=vif_id - ) + msg = f"Failed to detach VIF {vif_id} from bare metal node {self.id}" exceptions.raise_from_response(response, error_message=msg) return True @@ -973,9 +960,7 @@ class Node(_common.Resource): request.url, headers=request.headers, microversion=version ) - msg = "Failed to list VIFs attached to bare metal node {node}".format( - node=self.id - ) + msg = f"Failed to list VIFs attached to bare metal node {self.id}" exceptions.raise_from_response(response, error_message=msg) return [vif['id'] for vif in response.json()['vifs']] @@ -1015,8 +1000,8 @@ class Node(_common.Resource): if failed: raise exceptions.ValidationException( - 'Validation failed for required interfaces of node {node}:' - ' {failures}'.format( + 'Validation failed for required interfaces of node ' + '{node}: {failures}'.format( node=self.id, failures=', '.join(failed) ) ) @@ -1058,9 +1043,7 @@ class Node(_common.Resource): headers=request.headers, microversion=version, ) - msg = "Failed to change maintenance mode for node {node}".format( - node=self.id - ) + msg = f"Failed to change maintenance mode for node {self.id}" exceptions.raise_from_response(response, error_message=msg) def get_boot_device(self, session): @@ -1081,9 +1064,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to get boot device for node {node}".format( - node=self.id, - ) + msg = f"Failed to get boot device for node {self.id}" exceptions.raise_from_response(response, error_message=msg) return response.json() @@ -1138,9 +1119,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to get supported boot devices for node {node}".format( - node=self.id, - ) + msg = f"Failed to get supported boot devices for node {self.id}" exceptions.raise_from_response(response, error_message=msg) return response.json() @@ -1164,8 +1143,8 @@ class Node(_common.Resource): request.url = utils.urljoin(request.url, 'states', 'boot_mode') if target not in ('uefi', 'bios'): raise ValueError( - "Unrecognized boot mode %s." - "Boot mode should be one of 'uefi' or 'bios'." % target + f"Unrecognized boot mode {target}." + "Boot mode should be one of 'uefi' or 'bios'." ) body = {'target': target} @@ -1200,8 +1179,8 @@ class Node(_common.Resource): request.url = utils.urljoin(request.url, 'states', 'secure_boot') if not isinstance(target, bool): raise ValueError( - "Invalid target %s. It should be True or False " - "corresponding to secure boot state 'on' or 'off'" % target + f"Invalid target {target}. It should be True or False " + "corresponding to secure boot state 'on' or 'off'" ) body = {'target': target} @@ -1213,9 +1192,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to change secure boot state for {node}".format( - node=self.id - ) + msg = f"Failed to change secure boot state for {self.id}" exceptions.raise_from_response(response, error_message=msg) def add_trait(self, session, trait): @@ -1237,9 +1214,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to add trait {trait} for node {node}".format( - trait=trait, node=self.id - ) + msg = f"Failed to add trait {trait} for node {self.id}" exceptions.raise_from_response(response, error_message=msg) self.traits = list(set(self.traits or ()) | {trait}) @@ -1342,10 +1317,8 @@ class Node(_common.Resource): ) msg = ( - "Failed to call vendor_passthru for node {node}, verb {verb}" - " and method {method}".format( - node=self.id, verb=verb, method=method - ) + f"Failed to call vendor_passthru for node {self.id}, verb {verb} " + f"and method {method}" ) exceptions.raise_from_response(response, error_message=msg) @@ -1369,9 +1342,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to list vendor_passthru methods for node {node}".format( - node=self.id - ) + msg = f"Failed to list vendor_passthru methods for node {self.id}" exceptions.raise_from_response(response, error_message=msg) return response.json() @@ -1394,9 +1365,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to get console for node {node}".format( - node=self.id, - ) + msg = f"Failed to get console for node {self.id}" exceptions.raise_from_response(response, error_message=msg) return response.json() @@ -1414,8 +1383,8 @@ class Node(_common.Resource): request.url = utils.urljoin(request.url, 'states', 'console') if not isinstance(enabled, bool): raise ValueError( - "Invalid enabled %s. It should be True or False " - "corresponding to console enabled or disabled" % enabled + f"Invalid enabled {enabled}. It should be True or False " + "corresponding to console enabled or disabled" ) body = {'enabled': enabled} @@ -1427,9 +1396,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to change console mode for {node}".format( - node=self.id, - ) + msg = f"Failed to change console mode for {self.id}" exceptions.raise_from_response(response, error_message=msg) def get_node_inventory(self, session, node_id): @@ -1457,9 +1424,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to get inventory for node {node}".format( - node=self.id, - ) + msg = f"Failed to get inventory for node {node_id}" exceptions.raise_from_response(response, error_message=msg) return response.json() @@ -1487,9 +1452,7 @@ class Node(_common.Resource): retriable_status_codes=_common.RETRIABLE_STATUS_CODES, ) - msg = "Failed to list firmware components for node {node}".format( - node=self.id - ) + msg = f"Failed to list firmware components for node {self.id}" exceptions.raise_from_response(response, error_message=msg) return response.json() diff --git a/openstack/baremetal_introspection/v1/introspection.py b/openstack/baremetal_introspection/v1/introspection.py index db9eda0c5..d7f3b878d 100644 --- a/openstack/baremetal_introspection/v1/introspection.py +++ b/openstack/baremetal_introspection/v1/introspection.py @@ -102,9 +102,7 @@ class Introspection(resource.Resource): response = session.get( request.url, headers=request.headers, microversion=version ) - msg = "Failed to fetch introspection data for node {id}".format( - id=self.id - ) + msg = f"Failed to fetch introspection data for node {self.id}" exceptions.raise_from_response(response, error_message=msg) return response.json() @@ -127,7 +125,7 @@ class Introspection(resource.Resource): return self for count in utils.iterate_timeout( - timeout, "Timeout waiting for introspection on node %s" % self.id + timeout, f"Timeout waiting for introspection on node {self.id}" ): self.fetch(session) if self._check_state(ignore_error): @@ -142,8 +140,7 @@ class Introspection(resource.Resource): def _check_state(self, ignore_error): if self.state == 'error' and not ignore_error: raise exceptions.ResourceFailure( - "Introspection of node %(node)s failed: %(error)s" - % {'node': self.id, 'error': self.error} + f"Introspection of node {self.id} failed: {self.error}" ) else: return self.is_finished diff --git a/openstack/block_storage/_base_proxy.py b/openstack/block_storage/_base_proxy.py index cba07da93..809d069b9 100644 --- a/openstack/block_storage/_base_proxy.py +++ b/openstack/block_storage/_base_proxy.py @@ -38,8 +38,8 @@ class BaseBlockStorageProxy(proxy.Proxy, metaclass=abc.ABCMeta): volume_obj = self.get_volume(volume) if not volume_obj: raise exceptions.SDKException( - "Volume {volume} given to create_image could" - " not be found".format(volume=volume) + f"Volume {volume} given to create_image could " + f"not be found" ) volume_id = volume_obj['id'] data = self.post( diff --git a/openstack/block_storage/v2/backup.py b/openstack/block_storage/v2/backup.py index 12c78f1cb..5fb7adf3b 100644 --- a/openstack/block_storage/v2/backup.py +++ b/openstack/block_storage/v2/backup.py @@ -142,7 +142,7 @@ class Backup(resource.Resource): else: # Just for safety of the implementation (since PUT removed) raise exceptions.ResourceFailure( - "Invalid create method: %s" % self.create_method + f"Invalid create method: {self.create_method}" ) has_body = ( diff --git a/openstack/block_storage/v3/backup.py b/openstack/block_storage/v3/backup.py index b942fe98d..9efcd3418 100644 --- a/openstack/block_storage/v3/backup.py +++ b/openstack/block_storage/v3/backup.py @@ -158,7 +158,7 @@ class Backup(resource.Resource): else: # Just for safety of the implementation (since PUT removed) raise exceptions.ResourceFailure( - "Invalid create method: %s" % self.create_method + f"Invalid create method: {self.create_method}" ) has_body = ( diff --git a/openstack/cloud/_baremetal.py b/openstack/cloud/_baremetal.py index c6e74c50d..9a0bc077c 100644 --- a/openstack/cloud/_baremetal.py +++ b/openstack/cloud/_baremetal.py @@ -35,7 +35,7 @@ def _normalize_port_list(nics): except KeyError: raise TypeError( "Either 'address' or 'mac' must be provided " - "for port %s" % row + f"for port {row}" ) ports.append(dict(row, address=address)) return ports @@ -126,10 +126,9 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): if node.provision_state == 'available': if node.instance_id: raise exceptions.SDKException( - "Refusing to inspect available machine %(node)s " + f"Refusing to inspect available machine {node.id} " "which is associated with an instance " - "(instance_uuid %(inst)s)" - % {'node': node.id, 'inst': node.instance_id} + f"(instance_uuid {node.instance_id})" ) return_to_available = True @@ -142,10 +141,9 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): if node.provision_state not in ('manageable', 'inspect failed'): raise exceptions.SDKException( - "Machine %(node)s must be in 'manageable', 'inspect failed' " + f"Machine {node.id} must be in 'manageable', 'inspect failed' " "or 'available' provision state to start inspection, the " - "current state is %(state)s" - % {'node': node.id, 'state': node.provision_state} + f"current state is {node.provision_state}" ) node = self.baremetal.set_node_provision_state( @@ -229,7 +227,7 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): if provision_state not in ('enroll', 'manageable', 'available'): raise ValueError( 'Initial provision state must be enroll, ' - 'manageable or available, got %s' % provision_state + f'manageable or available, got {provision_state}' ) # Available is tricky: it cannot be directly requested on newer API @@ -306,8 +304,8 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed'] if machine['provision_state'] in invalid_states: raise exceptions.SDKException( - "Error unregistering node '%s' due to current provision " - "state '%s'" % (uuid, machine['provision_state']) + "Error unregistering node '{}' due to current provision " + "state '{}'".format(uuid, machine['provision_state']) ) # NOTE(TheJulia) There is a high possibility of a lock being present @@ -318,8 +316,8 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): self.baremetal.wait_for_node_reservation(machine, timeout) except exceptions.SDKException as e: raise exceptions.SDKException( - "Error unregistering node '%s': Exception occured while" - " waiting to be able to proceed: %s" % (machine['uuid'], e) + "Error unregistering node '{}': Exception occured while " + "waiting to be able to proceed: {}".format(machine['uuid'], e) ) for nic in _normalize_port_list(nics): @@ -382,7 +380,7 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): machine = self.get_machine(name_or_id) if not machine: raise exceptions.SDKException( - "Machine update failed to find Machine: %s. " % name_or_id + f"Machine update failed to find Machine: {name_or_id}. " ) new_config = dict(machine._to_munch(), **attrs) @@ -394,8 +392,7 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): except Exception as e: raise exceptions.SDKException( "Machine update failed - Error generating JSON patch object " - "for submission to the API. Machine: %s Error: %s" - % (name_or_id, e) + f"for submission to the API. Machine: {name_or_id} Error: {e}" ) if not patch: diff --git a/openstack/cloud/_block_storage.py b/openstack/cloud/_block_storage.py index 244aca56a..5e6c5fe02 100644 --- a/openstack/cloud/_block_storage.py +++ b/openstack/cloud/_block_storage.py @@ -169,7 +169,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin): volume = self.get_volume(name_or_id) if not volume: - raise exceptions.SDKException("Volume %s not found." % name_or_id) + raise exceptions.SDKException(f"Volume {name_or_id} not found.") volume = self.block_storage.update_volume(volume, **kwargs) @@ -193,9 +193,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin): if not volume: raise exceptions.SDKException( - "Volume {name_or_id} does not exist".format( - name_or_id=name_or_id - ) + f"Volume {name_or_id} does not exist" ) self.block_storage.set_volume_bootable_status(volume, bootable) @@ -371,14 +369,16 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin): dev = self.get_volume_attach_device(volume, server['id']) if dev: raise exceptions.SDKException( - "Volume %s already attached to server %s on device %s" - % (volume['id'], server['id'], dev) + "Volume {} already attached to server {} on device {}".format( + volume['id'], server['id'], dev + ) ) if volume['status'] != 'available': raise exceptions.SDKException( - "Volume %s is not available. Status is '%s'" - % (volume['id'], volume['status']) + "Volume {} is not available. Status is '{}'".format( + volume['id'], volume['status'] + ) ) payload = {} @@ -766,7 +766,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin): volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exceptions.SDKException( - "VolumeType not found: %s" % name_or_id + f"VolumeType not found: {name_or_id}" ) return self.block_storage.get_type_access(volume_type) @@ -786,7 +786,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin): volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exceptions.SDKException( - "VolumeType not found: %s" % name_or_id + f"VolumeType not found: {name_or_id}" ) self.block_storage.add_type_access(volume_type, project_id) @@ -804,7 +804,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin): volume_type = self.get_volume_type(name_or_id) if not volume_type: raise exceptions.SDKException( - "VolumeType not found: %s" % name_or_id + f"VolumeType not found: {name_or_id}" ) self.block_storage.remove_type_access(volume_type, project_id) diff --git a/openstack/cloud/_coe.py b/openstack/cloud/_coe.py index 01fa92f8b..8783aea6c 100644 --- a/openstack/cloud/_coe.py +++ b/openstack/cloud/_coe.py @@ -124,7 +124,7 @@ class CoeCloudMixin(openstackcloud._OpenStackCloudMixin): cluster = self.get_coe_cluster(name_or_id) if not cluster: raise exceptions.SDKException( - "COE cluster %s not found." % name_or_id + f"COE cluster {name_or_id} not found." ) cluster = self.container_infrastructure_management.update_cluster( @@ -283,7 +283,7 @@ class CoeCloudMixin(openstackcloud._OpenStackCloudMixin): cluster_template = self.get_cluster_template(name_or_id) if not cluster_template: raise exceptions.SDKException( - "Cluster template %s not found." % name_or_id + f"Cluster template {name_or_id} not found." ) cluster_template = ( diff --git a/openstack/cloud/_compute.py b/openstack/cloud/_compute.py index aabd3ddc4..6bbfaf89a 100644 --- a/openstack/cloud/_compute.py +++ b/openstack/cloud/_compute.py @@ -111,9 +111,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): ): return flavor raise exceptions.SDKException( - "Could not find a flavor with {ram} and '{include}'".format( - ram=ram, include=include - ) + f"Could not find a flavor with {ram} and '{include}'" ) def search_keypairs(self, name_or_id=None, filters=None): @@ -622,8 +620,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): server_obj = self.get_server(server, bare=True) if not server_obj: raise exceptions.SDKException( - "Server {server} could not be found and therefore" - " could not be snapshotted.".format(server=server) + f"Server {server} could not be found and therefore " + f"could not be snapshotted." ) server = server_obj image = self.compute.create_server_image( @@ -853,8 +851,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): kwargs['nics'] = [kwargs['nics']] else: raise exceptions.SDKException( - 'nics parameter to create_server takes a list of dicts.' - ' Got: {nics}'.format(nics=kwargs['nics']) + 'nics parameter to create_server takes a list of dicts. ' + 'Got: {nics}'.format(nics=kwargs['nics']) ) if network and ('nics' not in kwargs or not kwargs['nics']): @@ -902,8 +900,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): fixed_ip = nic.pop(ip_key, None) if fixed_ip and net.get('fixed_ip'): raise exceptions.SDKException( - "Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip" - " may be given" + "Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip " + "may be given" ) if fixed_ip: net['fixed_ip'] = fixed_ip @@ -917,8 +915,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): net['tag'] = nic.pop('tag') if nic: raise exceptions.SDKException( - "Additional unsupported keys given for server network" - " creation: {keys}".format(keys=nic.keys()) + f"Additional unsupported keys given for server network " + f"creation: {nic.keys()}" ) networks.append(net) if networks: @@ -1220,23 +1218,21 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): ) self.log.debug( - 'Server %(server)s reached ACTIVE state without' - ' being allocated an IP address.' - ' Deleting server.', - {'server': server['id']}, + f'Server {server["id"]} reached ACTIVE state without ' + f'being allocated an IP address. Deleting server.', ) try: self._delete_server(server=server, wait=wait, timeout=timeout) except Exception as e: raise exceptions.SDKException( - 'Server reached ACTIVE state without being' - ' allocated an IP address AND then could not' - ' be deleted: {}'.format(e), + f'Server reached ACTIVE state without being ' + f'allocated an IP address AND then could not ' + f'be deleted: {e}', extra_data=dict(server=server), ) raise exceptions.SDKException( - 'Server reached ACTIVE state without being' - ' allocated an IP address.', + 'Server reached ACTIVE state without being ' + 'allocated an IP address.', extra_data=dict(server=server), ) return None @@ -1378,9 +1374,9 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): deleted = self.delete_floating_ip(ip['id'], retry=delete_ip_retry) if not deleted: raise exceptions.SDKException( - "Tried to delete floating ip {floating_ip}" - " associated with server {id} but there was" - " an error deleting it. Not deleting server.".format( + "Tried to delete floating ip {floating_ip} " + "associated with server {id} but there was " + "an error deleting it. Not deleting server.".format( floating_ip=ip['floating_ip_address'], id=server['id'] ) ) @@ -1725,7 +1721,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): aggregate = self.get_aggregate(name_or_id) if not aggregate: raise exceptions.SDKException( - "Host aggregate %s not found." % name_or_id + f"Host aggregate {name_or_id} not found." ) return self.compute.set_aggregate_metadata(aggregate, metadata) @@ -1742,7 +1738,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): aggregate = self.get_aggregate(name_or_id) if not aggregate: raise exceptions.SDKException( - "Host aggregate %s not found." % name_or_id + f"Host aggregate {name_or_id} not found." ) return self.compute.add_host_to_aggregate(aggregate, host_name) @@ -1759,7 +1755,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): aggregate = self.get_aggregate(name_or_id) if not aggregate: raise exceptions.SDKException( - "Host aggregate %s not found." % name_or_id + f"Host aggregate {name_or_id} not found." ) return self.compute.remove_host_from_aggregate(aggregate, host_name) @@ -1823,9 +1819,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): # implementation detail - and the error message is actually # less informative. raise exceptions.SDKException( - "Date given, {date}, is invalid. Please pass in a date" - " string in ISO 8601 format -" - " YYYY-MM-DDTHH:MM:SS".format(date=date) + f"Date given, {date}, is invalid. Please pass in a date " + f"string in ISO 8601 format (YYYY-MM-DDTHH:MM:SS)" ) if isinstance(start, str): @@ -1844,7 +1839,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): if not isinstance(userdata, bytes): # If the userdata passed in is bytes, just send it unmodified if not isinstance(userdata, str): - raise TypeError("%s can't be encoded" % type(userdata)) + raise TypeError(f"{type(userdata)} can't be encoded") # If it's not bytes, make it bytes userdata = userdata.encode('utf-8', 'strict') diff --git a/openstack/cloud/_dns.py b/openstack/cloud/_dns.py index 2fe13bed8..c1a58a5a5 100644 --- a/openstack/cloud/_dns.py +++ b/openstack/cloud/_dns.py @@ -78,8 +78,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin): zone_type = zone_type.upper() if zone_type not in ('PRIMARY', 'SECONDARY'): raise exceptions.SDKException( - "Invalid type %s, valid choices are PRIMARY or SECONDARY" - % zone_type + f"Invalid type {zone_type}, valid choices are PRIMARY or SECONDARY" ) zone = { @@ -119,7 +118,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin): """ zone = self.get_zone(name_or_id) if not zone: - raise exceptions.SDKException("Zone %s not found." % name_or_id) + raise exceptions.SDKException(f"Zone {name_or_id} not found.") return self.dns.update_zone(zone['id'], **kwargs) @@ -156,7 +155,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin): else: zone_obj = self.get_zone(zone) if zone_obj is None: - raise exceptions.SDKException("Zone %s not found." % zone) + raise exceptions.SDKException(f"Zone {zone} not found.") return list(self.dns.recordsets(zone_obj)) def get_recordset(self, zone, name_or_id): @@ -175,7 +174,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin): else: zone_obj = self.get_zone(zone) if not zone_obj: - raise exceptions.SDKException("Zone %s not found." % zone) + raise exceptions.SDKException(f"Zone {name_or_id} not found.") return self.dns.find_recordset( zone=zone_obj, name_or_id=name_or_id, ignore_missing=True ) @@ -206,7 +205,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin): else: zone_obj = self.get_zone(zone) if not zone_obj: - raise exceptions.SDKException("Zone %s not found." % zone) + raise exceptions.SDKException(f"Zone {zone} not found.") # We capitalize the type in case the user sends in lowercase recordset_type = recordset_type.upper() @@ -239,9 +238,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin): rs = self.get_recordset(zone, name_or_id) if not rs: - raise exceptions.SDKException( - "Recordset %s not found." % name_or_id - ) + raise exceptions.SDKException(f"Recordset {name_or_id} not found.") rs = self.dns.update_recordset(recordset=rs, **kwargs) diff --git a/openstack/cloud/_identity.py b/openstack/cloud/_identity.py index d70be679a..0e270f886 100644 --- a/openstack/cloud/_identity.py +++ b/openstack/cloud/_identity.py @@ -167,6 +167,8 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin): domain_id=domain_id, ignore_missing=False, ) + if not project: + raise exceptions.SDKException(f"Project {name_or_id} not found.") if enabled is not None: kwargs.update({'enabled': enabled}) project = self.identity.update_project(project, **kwargs) @@ -218,11 +220,7 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin): self.identity.delete_project(project) return True except exceptions.SDKException: - self.log.exception( - "Error in deleting project {project}".format( - project=name_or_id - ) - ) + self.log.exception(f"Error in deleting project {name_or_id}") return False @_utils.valid_kwargs('domain_id', 'name') @@ -577,9 +575,7 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin): service = self.get_service(name_or_id=service_name_or_id) if service is None: raise exceptions.SDKException( - "service {service} not found".format( - service=service_name_or_id - ) + f"service {service_name_or_id} not found" ) endpoints_args = [] @@ -786,7 +782,7 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin): return True except exceptions.SDKException: - self.log.exception("Failed to delete domain %s" % domain_id) + self.log.exception(f"Failed to delete domain {domain_id}") raise def list_domains(self, **filters): @@ -928,8 +924,8 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin): dom = self.get_domain(domain) if not dom: raise exceptions.SDKException( - "Creating group {group} failed: Invalid domain " - "{domain}".format(group=name, domain=domain) + f"Creating group {name} failed: Invalid domain " + f"{domain}" ) group_ref['domain_id'] = dom['id'] @@ -1124,11 +1120,11 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin): for k in ['role', 'group', 'user']: if k in filters: - filters['%s_id' % k] = filters.pop(k) + filters[f'{k}_id'] = filters.pop(k) for k in ['domain', 'project']: if k in filters: - filters['scope_%s_id' % k] = filters.pop(k) + filters[f'scope_{k}_id'] = filters.pop(k) if 'system' in filters: system_scope = filters.pop('system') diff --git a/openstack/cloud/_image.py b/openstack/cloud/_image.py index 1a0fe61a9..d521c80e9 100644 --- a/openstack/cloud/_image.py +++ b/openstack/cloud/_image.py @@ -113,16 +113,17 @@ class ImageCloudMixin(openstackcloud._OpenStackCloudMixin): """ if output_path is None and output_file is None: raise exceptions.SDKException( - 'No output specified, an output path or file object' - ' is necessary to write the image data to' + 'No output specified, an output path or file object ' + 'is necessary to write the image data to' ) elif output_path is not None and output_file is not None: raise exceptions.SDKException( - 'Both an output path and file object were provided,' - ' however only one can be used at once' + 'Both an output path and file object were provided, ' + 'however only one can be used at once' ) image = self.image.find_image(name_or_id, ignore_missing=False) + return self.image.download_image( image, output=output_file or output_path, chunk_size=chunk_size ) diff --git a/openstack/cloud/_network.py b/openstack/cloud/_network.py index f51c7b897..9538b5475 100644 --- a/openstack/cloud/_network.py +++ b/openstack/cloud/_network.py @@ -579,7 +579,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): network = self.get_network(name_or_id) if not network: - raise exceptions.SDKException("Network %s not found." % name_or_id) + raise exceptions.SDKException(f"Network {name_or_id} not found.") network = self.network.update_network(network, **kwargs) @@ -1356,7 +1356,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not curr_policy: raise exceptions.SDKException( - "QoS policy %s not found." % name_or_id + f"QoS policy {name_or_id} not found." ) return self.network.update_qos_policy(curr_policy, **kwargs) @@ -1426,9 +1426,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) # Translate None from search interface to empty {} for kwargs below @@ -1460,9 +1458,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) return self.network.get_qos_bandwidth_limit_rule(rule_id, policy) @@ -1498,9 +1494,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) if kwargs.get("direction") is not None: @@ -1544,9 +1538,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) if kwargs.get("direction") is not None: @@ -1594,9 +1586,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) try: @@ -1657,9 +1647,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) # Translate None from search interface to empty {} for kwargs below @@ -1686,9 +1674,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) return self.network.get_qos_dscp_marking_rule(rule_id, policy) @@ -1718,9 +1704,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) return self.network.create_qos_dscp_marking_rule( @@ -1752,9 +1736,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) if not kwargs: @@ -1792,9 +1774,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) try: @@ -1859,9 +1839,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) # Translate None from search interface to empty {} for kwargs below @@ -1891,9 +1869,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) return self.network.get_qos_minimum_bandwidth_rule(rule_id, policy) @@ -1927,9 +1903,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) kwargs['min_kbps'] = min_kbps @@ -1963,9 +1937,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) if not kwargs: @@ -2005,9 +1977,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): ) if not policy: raise exceptions.NotFoundException( - "QoS policy {name_or_id} not Found.".format( - name_or_id=policy_name_or_id - ) + f"QoS policy {policy_name_or_id} not Found." ) try: @@ -2235,7 +2205,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): curr_router = self.get_router(name_or_id) if not curr_router: - raise exceptions.SDKException("Router %s not found." % name_or_id) + raise exceptions.SDKException(f"Router {name_or_id} not found.") return self.network.update_router(curr_router, **router) @@ -2348,7 +2318,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): network = self.get_network(network_name_or_id, filters) if not network: raise exceptions.SDKException( - "Network %s not found." % network_name_or_id + f"Network {network_name_or_id} not found." ) if disable_gateway_ip and gateway_ip: @@ -2378,7 +2348,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): subnetpool = self.get_subnetpool(subnetpool_name_or_id) if not subnetpool: raise exceptions.SDKException( - "Subnetpool %s not found." % subnetpool_name_or_id + f"Subnetpool {subnetpool_name_or_id} not found." ) # Be friendly on ip_version and allow strings @@ -2523,7 +2493,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): curr_subnet = self.get_subnet(name_or_id) if not curr_subnet: - raise exceptions.SDKException("Subnet %s not found." % name_or_id) + raise exceptions.SDKException(f"Subnet {name_or_id} not found.") return self.network.update_subnet(curr_subnet, **subnet) diff --git a/openstack/cloud/_network_common.py b/openstack/cloud/_network_common.py index cd440f99a..3a9187892 100644 --- a/openstack/cloud/_network_common.py +++ b/openstack/cloud/_network_common.py @@ -174,11 +174,11 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): if nat_source: raise exceptions.SDKException( 'Multiple networks were found matching ' - '{nat_net} which is the network configured ' + f'{self._nat_source} which is the network configured ' 'to be the NAT source. Please check your ' 'cloud resources. It is probably a good idea ' 'to configure this network by ID rather than ' - 'by name.'.format(nat_net=self._nat_source) + 'by name.' ) external_ipv4_floating_networks.append(network) nat_source = network @@ -192,11 +192,11 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): if nat_destination: raise exceptions.SDKException( 'Multiple networks were found matching ' - '{nat_net} which is the network configured ' + f'{self._nat_destination} which is the network configured ' 'to be the NAT destination. Please check your ' 'cloud resources. It is probably a good idea ' 'to configure this network by ID rather than ' - 'by name.'.format(nat_net=self._nat_destination) + 'by name.' ) nat_destination = network elif self._nat_destination is None: @@ -230,12 +230,12 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): if default_network: raise exceptions.SDKException( 'Multiple networks were found matching ' - '{default_net} which is the network ' + f'{self._default_network} which is the network ' 'configured to be the default interface ' 'network. Please check your cloud resources. ' 'It is probably a good idea ' 'to configure this network by ID rather than ' - 'by name.'.format(default_net=self._default_network) + 'by name.' ) default_network = network @@ -243,58 +243,50 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): for net_name in self._external_ipv4_names: if net_name not in [net['name'] for net in external_ipv4_networks]: raise exceptions.SDKException( - "Networks: {network} was provided for external IPv4 " - "access and those networks could not be found".format( - network=net_name - ) + f"Networks: {net_name} was provided for external IPv4 " + "access and those networks could not be found" ) for net_name in self._internal_ipv4_names: if net_name not in [net['name'] for net in internal_ipv4_networks]: raise exceptions.SDKException( - "Networks: {network} was provided for internal IPv4 " - "access and those networks could not be found".format( - network=net_name - ) + f"Networks: {net_name} was provided for internal IPv4 " + "access and those networks could not be found" ) for net_name in self._external_ipv6_names: if net_name not in [net['name'] for net in external_ipv6_networks]: raise exceptions.SDKException( - "Networks: {network} was provided for external IPv6 " - "access and those networks could not be found".format( - network=net_name - ) + f"Networks: {net_name} was provided for external IPv6 " + "access and those networks could not be found" ) for net_name in self._internal_ipv6_names: if net_name not in [net['name'] for net in internal_ipv6_networks]: raise exceptions.SDKException( - "Networks: {network} was provided for internal IPv6 " - "access and those networks could not be found".format( - network=net_name - ) + f"Networks: {net_name} was provided for internal IPv6 " + "access and those networks could not be found" ) if self._nat_destination and not nat_destination: raise exceptions.SDKException( - 'Network {network} was configured to be the ' + f'Network {self._nat_destination} was configured to be the ' 'destination for inbound NAT but it could not be ' - 'found'.format(network=self._nat_destination) + 'found' ) if self._nat_source and not nat_source: raise exceptions.SDKException( - 'Network {network} was configured to be the ' + f'Network {self._nat_source} was configured to be the ' 'source for inbound NAT but it could not be ' - 'found'.format(network=self._nat_source) + 'found' ) if self._default_network and not default_network: raise exceptions.SDKException( - 'Network {network} was configured to be the ' + f'Network {self._default_network} was configured to be the ' 'default network interface but it could not be ' - 'found'.format(network=self._default_network) + 'found' ) self._external_ipv4_networks = external_ipv4_networks @@ -812,7 +804,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): except exceptions.NotFoundException: raise exceptions.NotFoundException( "unable to find network for floating ips with ID " - "{}".format(network_name_or_id) + f"{network_name_or_id}" ) network_id = network['id'] else: @@ -879,8 +871,8 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): ) else: raise exceptions.SDKException( - "Attempted to create FIP on port {port} " - "but something went wrong".format(port=port) + f"Attempted to create FIP on port {port} " + "but something went wrong" ) return fip @@ -970,9 +962,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): try: proxy._json_response( self.compute.delete(f'/os-floating-ips/{floating_ip_id}'), - error_message='Unable to delete floating IP {fip_id}'.format( - fip_id=floating_ip_id - ), + error_message=f'Unable to delete floating IP {floating_ip_id}', ) except exceptions.NotFoundException: return False @@ -1123,8 +1113,8 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): raise exceptions.SDKException( f"unable to find floating IP {floating_ip_id}" ) - error_message = "Error attaching IP {ip} to instance {id}".format( - ip=floating_ip_id, id=server_id + error_message = ( + f"Error attaching IP {floating_ip_id} to instance {server_id}" ) body = {'address': f_ip['floating_ip_address']} if fixed_address: @@ -1174,9 +1164,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): self.network.update_ip(floating_ip_id, port_id=None) except exceptions.SDKException: raise exceptions.SDKException( - "Error detaching IP {ip} from server {server_id}".format( - ip=floating_ip_id, server_id=server_id - ) + f"Error detaching IP {floating_ip_id} from server {server_id}" ) return True @@ -1187,8 +1175,8 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): raise exceptions.SDKException( f"unable to find floating IP {floating_ip_id}" ) - error_message = "Error detaching IP {ip} from instance {id}".format( - ip=floating_ip_id, id=server_id + error_message = ( + f"Error detaching IP {floating_ip_id} from instance {server_id}" ) return proxy._json_response( self.compute.post( @@ -1533,27 +1521,25 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): ) if not nat_network: raise exceptions.SDKException( - 'NAT Destination {nat_destination} was configured' - ' but not found on the cloud. Please check your' - ' config and your cloud and try again.'.format( - nat_destination=nat_destination - ) + f'NAT Destination {nat_destination} was ' + f'configured but not found on the cloud. Please ' + f'check your config and your cloud and try again.' ) else: nat_network = self.get_nat_destination() if not nat_network: raise exceptions.SDKException( - 'Multiple ports were found for server {server}' - ' but none of the networks are a valid NAT' - ' destination, so it is impossible to add a' - ' floating IP. If you have a network that is a valid' - ' destination for NAT and we could not find it,' - ' please file a bug. But also configure the' - ' nat_destination property of the networks list in' - ' your clouds.yaml file. If you do not have a' - ' clouds.yaml file, please make one - your setup' - ' is complicated.'.format(server=server['id']) + f'Multiple ports were found for server {server["id"]} ' + f'but none of the networks are a valid NAT ' + f'destination, so it is impossible to add a ' + f'floating IP. If you have a network that is a valid ' + f'destination for NAT and we could not find it, ' + f'please file a bug. But also configure the ' + f'nat_destination property of the networks list in ' + f'your clouds.yaml file. If you do not have a ' + f'clouds.yaml file, please make one - your setup ' + f'is complicated.' ) maybe_ports = [] @@ -1562,11 +1548,9 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): maybe_ports.append(maybe_port) if not maybe_ports: raise exceptions.SDKException( - 'No port on server {server} was found matching' - ' your NAT destination network {dest}. Please ' - ' check your config'.format( - server=server['id'], dest=nat_network['name'] - ) + f'No port on server {server["id"]} was found matching ' + f'your NAT destination network {nat_network["name"]}.' + f'Please check your config' ) ports = maybe_ports @@ -1914,7 +1898,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): if group is None: raise exceptions.SDKException( - "Security group %s not found." % name_or_id + f"Security group {name_or_id} not found." ) if self._use_neutron_secgroups(): @@ -2006,7 +1990,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): secgroup = self.get_security_group(secgroup_name_or_id) if not secgroup: raise exceptions.SDKException( - "Security group %s not found." % secgroup_name_or_id + f"Security group {secgroup_name_or_id} not found." ) if self._use_neutron_secgroups(): diff --git a/openstack/cloud/_object_store.py b/openstack/cloud/_object_store.py index 594a06856..e7815413e 100644 --- a/openstack/cloud/_object_store.py +++ b/openstack/cloud/_object_store.py @@ -114,11 +114,9 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin): return False except exceptions.ConflictException: raise exceptions.SDKException( - 'Attempt to delete container {container} failed. The' - ' container is not empty. Please delete the objects' - ' inside it before deleting the container'.format( - container=name - ) + f'Attempt to delete container {name} failed. The ' + f'container is not empty. Please delete the objects ' + f'inside it before deleting the container' ) def update_container(self, name, headers): @@ -142,8 +140,8 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin): """ if access not in OBJECT_CONTAINER_ACLS: raise exceptions.SDKException( - "Invalid container access specified: %s. Must be one of %s" - % (access, list(OBJECT_CONTAINER_ACLS.keys())) + f"Invalid container access specified: {access}. " + f"Must be one of {list(OBJECT_CONTAINER_ACLS.keys())}" ) return self.object_store.set_container_metadata( name, read_ACL=OBJECT_CONTAINER_ACLS[access], refresh=refresh @@ -159,7 +157,7 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin): """ container = self.get_container(name, skip_cache=True) if not container: - raise exceptions.SDKException("Container not found: %s" % name) + raise exceptions.SDKException(f"Container not found: {name}") acl = container.read_ACL or '' for key, value in OBJECT_CONTAINER_ACLS.items(): # Convert to string for the comparison because swiftclient @@ -168,7 +166,7 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin): if str(acl) == str(value): return key raise exceptions.SDKException( - "Could not determine container access for ACL: %s." % acl + f"Could not determine container access for ACL: {acl}." ) def get_object_capabilities(self): @@ -423,13 +421,9 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin): def _get_object_endpoint(self, container, obj=None, query_string=None): endpoint = urllib.parse.quote(container) if obj: - endpoint = '{endpoint}/{object}'.format( - endpoint=endpoint, object=urllib.parse.quote(obj) - ) + endpoint = f'{endpoint}/{urllib.parse.quote(obj)}' if query_string: - endpoint = '{endpoint}?{query_string}'.format( - endpoint=endpoint, query_string=query_string - ) + endpoint = f'{endpoint}?{query_string}' return endpoint def stream_object( @@ -517,9 +511,7 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin): keystoneauth1.exceptions.RetriableConnectionFailure, exceptions.HttpException, ) as e: - error_text = "Exception processing async task: {}".format( - str(e) - ) + error_text = f"Exception processing async task: {str(e)}" if raise_on_error: self.log.exception(error_text) raise diff --git a/openstack/cloud/_utils.py b/openstack/cloud/_utils.py index 5f7efc65f..2861ca551 100644 --- a/openstack/cloud/_utils.py +++ b/openstack/cloud/_utils.py @@ -161,21 +161,21 @@ def _get_entity(cloud, resource, name_or_id, filters, **kwargs): # If a uuid is passed short-circuit it calling the # get__by_id method if getattr(cloud, 'use_direct_get', False) and _is_uuid_like(name_or_id): - get_resource = getattr(cloud, 'get_%s_by_id' % resource, None) + get_resource = getattr(cloud, f'get_{resource}_by_id', None) if get_resource: return get_resource(name_or_id) search = ( resource if callable(resource) - else getattr(cloud, 'search_%ss' % resource, None) + else getattr(cloud, f'search_{resource}s', None) ) if search: entities = search(name_or_id, filters, **kwargs) if entities: if len(entities) > 1: raise exceptions.SDKException( - "Multiple matches found for %s" % name_or_id + f"Multiple matches found for {name_or_id}" ) return entities[0] return None @@ -219,8 +219,8 @@ def valid_kwargs(*valid_args): for k in kwargs: if k not in argspec.args[1:] and k not in valid_args: raise TypeError( - "{f}() got an unexpected keyword argument " - "'{arg}'".format(f=inspect.stack()[1][3], arg=k) + f"{inspect.stack()[1][3]}() got an unexpected keyword argument " + f"'{k}'" ) return func(*args, **kwargs) @@ -276,9 +276,7 @@ def safe_dict_min(key, data): except ValueError: raise exceptions.SDKException( "Search for minimum value failed. " - "Value for {key} is not an integer: {value}".format( - key=key, value=d[key] - ) + f"Value for {key} is not an integer: {d[key]}" ) if (min_value is None) or (val < min_value): min_value = val @@ -309,9 +307,7 @@ def safe_dict_max(key, data): except ValueError: raise exceptions.SDKException( "Search for maximum value failed. " - "Value for {key} is not an integer: {value}".format( - key=key, value=d[key] - ) + f"Value for {key} is not an integer: {d[key]}" ) if (max_value is None) or (val > max_value): max_value = val @@ -429,7 +425,7 @@ def generate_patches_from_kwargs(operation, **kwargs): """ patches = [] for k, v in kwargs.items(): - patch = {'op': operation, 'value': v, 'path': '/%s' % k} + patch = {'op': operation, 'value': v, 'path': f'/{k}'} patches.append(patch) return sorted(patches) diff --git a/openstack/cloud/exc.py b/openstack/cloud/exc.py index 9c5ddfe66..986a1873c 100644 --- a/openstack/cloud/exc.py +++ b/openstack/cloud/exc.py @@ -29,9 +29,7 @@ class OpenStackCloudUnavailableFeature(OpenStackCloudException): class OpenStackCloudCreateException(OpenStackCloudException): def __init__(self, resource, resource_id, extra_data=None, **kwargs): super().__init__( - message="Error creating {resource}: {resource_id}".format( - resource=resource, resource_id=resource_id - ), + message=f"Error creating {resource}: {resource_id}", extra_data=extra_data, **kwargs, ) diff --git a/openstack/cloud/meta.py b/openstack/cloud/meta.py index 245b4e7c7..79bba5173 100644 --- a/openstack/cloud/meta.py +++ b/openstack/cloud/meta.py @@ -245,7 +245,7 @@ def find_best_address(addresses, public=False, cloud_public=True): for address in addresses: try: for count in utils.iterate_timeout( - 5, "Timeout waiting for %s" % address, wait=0.1 + 5, f"Timeout waiting for {address}", wait=0.1 ): # Return the first one that is reachable try: @@ -275,10 +275,10 @@ def find_best_address(addresses, public=False, cloud_public=True): if do_check: log = _log.setup_logging('openstack') log.debug( - "The cloud returned multiple addresses %s:, and we could not " + f"The cloud returned multiple addresses {addresses}:, and we could not " "connect to port 22 on either. That might be what you wanted, " "but we have no clue what's going on, so we picked the first one " - "%s" % (addresses, addresses[0]) + f"{addresses[0]}" ) return addresses[0] @@ -379,7 +379,7 @@ def get_groups_from_server(cloud, server, server_vars): if extra_group: groups.append(extra_group) - groups.append('instance-%s' % server['id']) + groups.append('instance-{}'.format(server['id'])) for key in ('flavor', 'image'): if 'name' in server_vars[key]: @@ -439,11 +439,11 @@ def _get_supplemental_addresses(cloud, server): if fixed_net is None: log = _log.setup_logging('openstack') log.debug( - "The cloud returned floating ip %(fip)s attached" - " to server %(server)s but the fixed ip associated" - " with the floating ip in the neutron listing" - " does not exist in the nova listing. Something" - " is exceptionally broken.", + "The cloud returned floating ip %(fip)s attached " + "to server %(server)s but the fixed ip associated " + "with the floating ip in the neutron listing " + "does not exist in the nova listing. Something " + "is exceptionally broken.", dict(fip=fip['id'], server=server['id']), ) else: diff --git a/openstack/cloud/openstackcloud.py b/openstack/cloud/openstackcloud.py index a8c8e22c1..bb82a8023 100644 --- a/openstack/cloud/openstackcloud.py +++ b/openstack/cloud/openstackcloud.py @@ -540,13 +540,8 @@ class _OpenStackCloudMixin(_services_mixin.ServicesMixin): raise except Exception as e: raise exceptions.SDKException( - "Error getting {service} endpoint on {cloud}:{region}: " - "{error}".format( - service=service_key, - cloud=self.name, - region=self.config.get_region_name(service_key), - error=str(e), - ) + f"Error getting {service_key} endpoint on {self.name}:{self.config.get_region_name(service_key)}: " + f"{str(e)}" ) return endpoint @@ -611,15 +606,14 @@ class _OpenStackCloudMixin(_services_mixin.ServicesMixin): (service_name, resource_name) = resource_type.split('.') if not hasattr(self, service_name): raise exceptions.SDKException( - "service %s is not existing/enabled" % service_name + f"service {service_name} is not existing/enabled" ) service_proxy = getattr(self, service_name) try: resource_type = service_proxy._resource_registry[resource_name] except KeyError: raise exceptions.SDKException( - "Resource %s is not known in service %s" - % (resource_name, service_name) + f"Resource {resource_name} is not known in service {service_name}" ) if name_or_id: @@ -745,6 +739,6 @@ def cleanup_task(graph, service, fn): fn() except Exception: log = _log.setup_logging('openstack.project_cleanup') - log.exception('Error in the %s cleanup function' % service) + log.exception(f'Error in the {service} cleanup function') finally: graph.node_done(service) diff --git a/openstack/compute/v2/server.py b/openstack/compute/v2/server.py index 925699474..653d77f99 100644 --- a/openstack/compute/v2/server.py +++ b/openstack/compute/v2/server.py @@ -894,7 +894,7 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin): """ action = CONSOLE_TYPE_ACTION_MAPPING.get(console_type) if not action: - raise ValueError("Unsupported console type %s" % console_type) + raise ValueError(f"Unsupported console type {console_type}") body = {action: {'type': console_type}} resp = self._action(session, body) return resp.json().get('console') @@ -967,12 +967,12 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin): body['host'] = host if not force: raise ValueError( - "Live migration on this cloud implies 'force'" - " if the 'host' option has been given and it is not" - " possible to disable. It is recommended to not use 'host'" - " at all on this cloud as it is inherently unsafe, but if" - " it is unavoidable, please supply 'force=True' so that it" - " is clear you understand the risks." + "Live migration on this cloud implies 'force' " + "if the 'host' option has been given and it is not " + "possible to disable. It is recommended to not use 'host' " + "at all on this cloud as it is inherently unsafe, but if " + "it is unavoidable, please supply 'force=True' so that it " + "is clear you understand the risks." ) self._action( session, @@ -994,8 +994,8 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin): } if block_migration == 'auto': raise ValueError( - "Live migration on this cloud does not support 'auto' as" - " a parameter to block_migration, but only True and False." + "Live migration on this cloud does not support 'auto' as " + "a parameter to block_migration, but only True and False." ) body['block_migration'] = block_migration or False body['disk_over_commit'] = disk_over_commit or False @@ -1003,12 +1003,12 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin): body['host'] = host if not force: raise ValueError( - "Live migration on this cloud implies 'force'" - " if the 'host' option has been given and it is not" - " possible to disable. It is recommended to not use 'host'" - " at all on this cloud as it is inherently unsafe, but if" - " it is unavoidable, please supply 'force=True' so that it" - " is clear you understand the risks." + "Live migration on this cloud implies 'force' " + "if the 'host' option has been given and it is not " + "possible to disable. It is recommended to not use 'host' " + "at all on this cloud as it is inherently unsafe, but if " + "it is unavoidable, please supply 'force=True' so that it " + "is clear you understand the risks." ) self._action( session, diff --git a/openstack/compute/v2/server_group.py b/openstack/compute/v2/server_group.py index fa70a9557..f61382b16 100644 --- a/openstack/compute/v2/server_group.py +++ b/openstack/compute/v2/server_group.py @@ -122,7 +122,7 @@ class ServerGroup(resource.Resource): ) else: raise exceptions.ResourceFailure( - "Invalid create method: %s" % self.create_method + f"Invalid create method: {self.create_method}" ) has_body = ( diff --git a/openstack/config/cloud_region.py b/openstack/config/cloud_region.py index a7ad99201..a769f4870 100644 --- a/openstack/config/cloud_region.py +++ b/openstack/config/cloud_region.py @@ -190,10 +190,8 @@ def from_conf(conf, session=None, service_types=None, **kwargs): _disable_service( config_dict, st, - reason="No section for project '{project}' (service type " - "'{service_type}') was present in the config.".format( - project=project_name, service_type=st - ), + reason=f"No section for project '{project_name}' (service type " + f"'{st}') was present in the config.", ) continue opt_dict: ty.Dict[str, str] = {} @@ -212,16 +210,10 @@ def from_conf(conf, session=None, service_types=None, **kwargs): # *that* blow up. reason = ( "Encountered an exception attempting to process config " - "for project '{project}' (service type " - "'{service_type}'): {exception}".format( - project=project_name, service_type=st, exception=e - ) - ) - _logger.warning( - "Disabling service '{service_type}': {reason}".format( - service_type=st, reason=reason - ) + f"for project '{project_name}' (service type " + f"'{st}'): {e}" ) + _logger.warning(f"Disabling service '{st}': {reason}") _disable_service(config_dict, st, reason=reason) continue # Load them into config_dict under keys prefixed by ${service_type}_ @@ -699,9 +691,8 @@ class CloudRegion: # cert verification if not verify: self.log.debug( - 'Turning off SSL warnings for %(full_name)s since ' - 'verify=False', - {'full_name': self.full_name}, + f"Turning off SSL warnings for {self.full_name} " + f"since verify=False" ) requestsexceptions.squelch_warnings(insecure_requests=not verify) self._keystone_session = self._session_constructor( @@ -765,13 +756,10 @@ class CloudRegion: and implied_microversion != default_microversion ): raise exceptions.ConfigException( - "default_microversion of {default_microversion} was given" - " for {service_type}, but api_version looks like a" - " microversion as well. Please set api_version to just the" - " desired major version, or omit default_microversion".format( - default_microversion=default_microversion, - service_type=service_type, - ) + f"default_microversion of {default_microversion} was given " + f"for {service_type}, but api_version looks like a " + f"microversion as well. Please set api_version to just the " + f"desired major version, or omit default_microversion" ) if implied_microversion: default_microversion = implied_microversion @@ -896,10 +884,10 @@ class CloudRegion: ): if self.get_default_microversion(service_type): raise exceptions.ConfigException( - "A default microversion for service {service_type} of" - " {default_microversion} was requested, but the cloud" - " only supports a minimum of {min_microversion} and" - " a maximum of {max_microversion}.".format( + "A default microversion for service {service_type} of " + "{default_microversion} was requested, but the cloud " + "only supports a minimum of {min_microversion} and " + "a maximum of {max_microversion}.".format( service_type=service_type, default_microversion=default_microversion, min_microversion=discover.version_to_string( @@ -912,17 +900,17 @@ class CloudRegion: ) else: raise exceptions.ConfigException( - "A default microversion for service {service_type} of" - " {default_microversion} was requested, but the cloud" - " only supports a minimum of {min_microversion} and" - " a maximum of {max_microversion}. The default" - " microversion was set because a microversion" - " formatted version string, '{api_version}', was" - " passed for the api_version of the service. If it" - " was not intended to set a default microversion" - " please remove anything other than an integer major" - " version from the version setting for" - " the service.".format( + "A default microversion for service {service_type} of " + "{default_microversion} was requested, but the cloud " + "only supports a minimum of {min_microversion} and " + "a maximum of {max_microversion}. The default " + "microversion was set because a microversion " + "formatted version string, '{api_version}', was " + "passed for the api_version of the service. If it " + "was not intended to set a default microversion " + "please remove anything other than an integer major " + "version from the version setting for " + "the service.".format( service_type=service_type, api_version=self.get_api_version(service_type), default_microversion=default_microversion, diff --git a/openstack/config/loader.py b/openstack/config/loader.py index 4566f052b..8b7d23110 100644 --- a/openstack/config/loader.py +++ b/openstack/config/loader.py @@ -138,10 +138,10 @@ def _fix_argv(argv): overlap.extend(old) if overlap: raise exceptions.ConfigException( - "The following options were given: '{options}' which contain" - " duplicates except that one has _ and one has -. There is" - " no sane way for us to know what you're doing. Remove the" - " duplicate option and try again".format(options=','.join(overlap)) + "The following options were given: '{options}' which contain " + "duplicates except that one has _ and one has -. There is " + "no sane way for us to know what you're doing. Remove the " + "duplicate option and try again".format(options=','.join(overlap)) ) @@ -264,12 +264,11 @@ class OpenStackConfig: self.envvar_key = self._get_envvar('OS_CLOUD_NAME', 'envvars') if self.envvar_key in self.cloud_config['clouds']: raise exceptions.ConfigException( - '"{0}" defines a cloud named "{1}", but' - ' OS_CLOUD_NAME is also set to "{1}". Please rename' - ' either your environment based cloud, or one of your' - ' file-based clouds.'.format( - self.config_filename, self.envvar_key - ) + f'{self.config_filename!r} defines a cloud named ' + f'{self.envvar_key!r}, but OS_CLOUD_NAME is also set to ' + f'{self.envvar_key!r}. ' + f'Please rename either your environment-based cloud, ' + f'or one of your file-based clouds.' ) self.default_cloud = self._get_envvar('OS_CLOUD') @@ -501,7 +500,7 @@ class OpenStackConfig: region ): raise exceptions.ConfigException( - 'Invalid region entry at: %s' % region + f'Invalid region entry at: {region}' ) if 'values' not in region: region['values'] = {} @@ -564,9 +563,9 @@ class OpenStackConfig: return region raise exceptions.ConfigException( - 'Region {region_name} is not a valid region name for cloud' - ' {cloud}. Valid choices are {region_list}. Please note that' - ' region names are case sensitive.'.format( + 'Region {region_name} is not a valid region name for cloud ' + '{cloud}. Valid choices are {region_list}. Please note that ' + 'region names are case sensitive.'.format( region_name=region_name, region_list=','.join([r['name'] for r in regions]), cloud=cloud, @@ -638,10 +637,8 @@ class OpenStackConfig: ) elif status == 'shutdown': raise exceptions.ConfigException( - "{profile_name} references a cloud that no longer" - " exists: {message}".format( - profile_name=profile_name, message=message - ) + f"{profile_name} references a cloud that no longer " + f"exists: {message}" ) _auth_update(cloud, profile_data) else: @@ -665,8 +662,8 @@ class OpenStackConfig: for net in networks: if value and net[key]: raise exceptions.ConfigException( - "Duplicate network entries for {key}: {net1} and {net2}." - " Only one network can be flagged with {key}".format( + "Duplicate network entries for {key}: {net1} and {net2}. " + "Only one network can be flagged with {key}".format( key=key, net1=value['name'], net2=net['name'] ) ) @@ -705,9 +702,9 @@ class OpenStackConfig: external = key.startswith('external') if key in cloud and 'networks' in cloud: raise exceptions.ConfigException( - "Both {key} and networks were specified in the config." - " Please remove {key} from the config and use the network" - " list to configure network behavior.".format(key=key) + f"Both {key} and networks were specified in the config. " + f"Please remove {key} from the config and use the network " + f"list to configure network behavior." ) if key in cloud: warnings.warn( @@ -906,8 +903,8 @@ class OpenStackConfig: options, _args = parser.parse_known_args(argv) plugin_names = loading.get_available_plugin_names() raise exceptions.ConfigException( - "An invalid auth-type was specified: {auth_type}." - " Valid choices are: {plugin_names}.".format( + "An invalid auth-type was specified: {auth_type}. " + "Valid choices are: {plugin_names}.".format( auth_type=options.os_auth_type, plugin_names=",".join(plugin_names), ) diff --git a/openstack/config/vendors/__init__.py b/openstack/config/vendors/__init__.py index 68d68b74e..28671f713 100644 --- a/openstack/config/vendors/__init__.py +++ b/openstack/config/vendors/__init__.py @@ -58,12 +58,8 @@ def get_profile(profile_name): response = requests.get(well_known_url) if not response.ok: raise exceptions.ConfigException( - "{profile_name} is a remote profile that could not be fetched:" - " {status_code} {reason}".format( - profile_name=profile_name, - status_code=response.status_code, - reason=response.reason, - ) + f"{profile_name} is a remote profile that could not be fetched: " + f"{response.status_code} {response.reason}" ) vendor_defaults[profile_name] = None return diff --git a/openstack/exceptions.py b/openstack/exceptions.py index 528cc4c0f..f9f767b58 100644 --- a/openstack/exceptions.py +++ b/openstack/exceptions.py @@ -69,13 +69,9 @@ class HttpException(SDKException, _rex.HTTPError): # to be None once we're not mocking Session everywhere. if not message: if response is not None: - message = "{name}: {code}".format( - name=self.__class__.__name__, code=response.status_code - ) + message = f"{self.__class__.__name__}: {response.status_code}" else: - message = "{name}: Unknown error".format( - name=self.__class__.__name__ - ) + message = f"{self.__class__.__name__}: Unknown error" # Call directly rather than via super to control parameters SDKException.__init__(self, message=message) @@ -102,18 +98,13 @@ class HttpException(SDKException, _rex.HTTPError): if not self.url or self.message == 'Error': return self.message if self.url: - remote_error = "{source} Error for url: {url}".format( - source=self.source, url=self.url - ) + remote_error = f"{self.source} Error for url: {self.url}" if self.details: remote_error += ', ' if self.details: remote_error += str(self.details) - return "{message}: {remote_error}".format( - message=super().__str__(), - remote_error=remote_error, - ) + return f"{super().__str__()}: {remote_error}" class BadRequestException(HttpException): @@ -146,11 +137,7 @@ class MethodNotSupported(SDKException): except AttributeError: name = resource.__class__.__name__ - message = 'The {} method is not supported for {}.{}'.format( - method, - resource.__module__, - name, - ) + message = f'The {method} method is not supported for {resource.__module__}.{name}' super().__init__(message=message) diff --git a/openstack/format.py b/openstack/format.py index b72c34f97..51d92b2d0 100644 --- a/openstack/format.py +++ b/openstack/format.py @@ -28,6 +28,4 @@ class BoolStr(Formatter): elif "false" == expr: return False else: - raise ValueError( - "Unable to deserialize boolean string: %s" % value - ) + raise ValueError(f"Unable to deserialize boolean string: {value}") diff --git a/openstack/image/_download.py b/openstack/image/_download.py index ce9ad5285..1751bf237 100644 --- a/openstack/image/_download.py +++ b/openstack/image/_download.py @@ -85,9 +85,7 @@ class DownloadMixin: return resp except Exception as e: - raise exceptions.SDKException( - "Unable to download image: %s" % e - ) + raise exceptions.SDKException(f"Unable to download image: {e}") # if we are returning the repsonse object, ensure that it # has the content-md5 header so that the caller doesn't # need to jump through the same hoops through which we diff --git a/openstack/image/v2/_proxy.py b/openstack/image/v2/_proxy.py index 5d54dbff5..543a6c9df 100644 --- a/openstack/image/v2/_proxy.py +++ b/openstack/image/v2/_proxy.py @@ -464,7 +464,7 @@ class Proxy(proxy.Proxy): if 'queued' != image.status: raise exceptions.SDKException( 'Image stage is only possible for images in the queued state. ' - 'Current state is {status}'.format(status=image.status) + f'Current state is {image.status}' ) if filename: @@ -694,9 +694,9 @@ class Proxy(proxy.Proxy): ): if not self._connection.has_service('object-store'): raise exceptions.SDKException( - "The cloud {cloud} is configured to use tasks for image " + f"The cloud {self._connection.config.name} is configured to use tasks for image " "upload, but no object-store service is available. " - "Aborting.".format(cloud=self._connection.config.name) + "Aborting." ) properties = image_kwargs.get('properties', {}) @@ -759,9 +759,7 @@ class Proxy(proxy.Proxy): except exceptions.ResourceFailure as e: glance_task = self.get_task(glance_task) raise exceptions.SDKException( - "Image creation failed: {message}".format( - message=e.message - ), + f"Image creation failed: {e.message}", extra_data=glance_task, ) finally: @@ -1839,9 +1837,7 @@ class Proxy(proxy.Proxy): return task name = f"{task.__class__.__name__}:{task.id}" - msg = "Timeout waiting for {name} to transition to {status}".format( - name=name, status=status - ) + msg = f"Timeout waiting for {name} to transition to {status}" for count in utils.iterate_timeout( timeout=wait, message=msg, wait=interval @@ -1850,9 +1846,7 @@ class Proxy(proxy.Proxy): if not task: raise exceptions.ResourceFailure( - "{name} went away while waiting for {status}".format( - name=name, status=status - ) + f"{name} went away while waiting for {status}" ) new_status = task.status @@ -1863,12 +1857,10 @@ class Proxy(proxy.Proxy): if task.message == _IMAGE_ERROR_396: task_args = {'input': task.input, 'type': task.type} task = self.create_task(**task_args) - self.log.debug('Got error 396. Recreating task %s' % task) + self.log.debug(f'Got error 396. Recreating task {task}') else: raise exceptions.ResourceFailure( - "{name} transitioned to failure state {status}".format( - name=name, status=new_status - ) + f"{name} transitioned to failure state {new_status}" ) self.log.debug( diff --git a/openstack/key_manager/v1/_format.py b/openstack/key_manager/v1/_format.py index 58313c0a0..74ad58366 100644 --- a/openstack/key_manager/v1/_format.py +++ b/openstack/key_manager/v1/_format.py @@ -24,7 +24,7 @@ class HREFToUUID(format.Formatter): # Only try to proceed if we have an actual URI. # Just check that we have a scheme, netloc, and path. if not all(parts[:3]): - raise ValueError("Unable to convert %s to an ID" % value) + raise ValueError(f"Unable to convert {value} to an ID") # The UUID will be the last portion of the URI. return parts.path.split("/")[-1] diff --git a/openstack/message/v2/message.py b/openstack/message/v2/message.py index 0b73b0605..4041b181d 100644 --- a/openstack/message/v2/message.py +++ b/openstack/message/v2/message.py @@ -155,7 +155,7 @@ class Message(resource.Resource): # parameter when deleting a message that has been claimed, we # rebuild the request URI if claim_id is not None. if self.claim_id: - request.url += '?claim_id=%s' % self.claim_id + request.url += f'?claim_id={self.claim_id}' response = session.delete(request.url, headers=headers) self._translate_response(response, has_body=False) diff --git a/openstack/network/v2/_proxy.py b/openstack/network/v2/_proxy.py index e571de02a..5fca2548f 100644 --- a/openstack/network/v2/_proxy.py +++ b/openstack/network/v2/_proxy.py @@ -5490,8 +5490,7 @@ class Proxy(proxy.Proxy): resource.tags except AttributeError: raise exceptions.InvalidRequest( - '%s resource does not support tag' - % resource.__class__.__name__ + f'{resource.__class__.__name__} resource does not support tag' ) def get_tags(self, resource): @@ -7190,7 +7189,7 @@ class Proxy(proxy.Proxy): for port in self.ports( project_id=project_id, network_id=net.id ): - self.log.debug('Looking at port %s' % port) + self.log.debug(f'Looking at port {port}') if port.device_owner in [ 'network:router_interface', 'network:router_interface_distributed', @@ -7213,7 +7212,7 @@ class Proxy(proxy.Proxy): if network_has_ports_allocated: # If some ports are on net - we cannot delete it continue - self.log.debug('Network %s should be deleted' % net) + self.log.debug(f'Network {net} should be deleted') # __Check__ if we need to drop network according to filters network_must_be_deleted = self._service_cleanup_del_res( self.delete_network, @@ -7253,7 +7252,7 @@ class Proxy(proxy.Proxy): router=port.device_id, port_id=port.id ) except exceptions.SDKException: - self.log.error('Cannot delete object %s' % obj) + self.log.error(f'Cannot delete object {obj}') # router disconnected, drop it self._service_cleanup_del_res( self.delete_router, diff --git a/openstack/object_store/v1/_proxy.py b/openstack/object_store/v1/_proxy.py index 9bb6de618..a419ad1c6 100644 --- a/openstack/object_store/v1/_proxy.py +++ b/openstack/object_store/v1/_proxy.py @@ -430,9 +430,7 @@ class Proxy(proxy.Proxy): metadata[self._connection._OBJECT_SHA256_KEY] = sha256 container_name = self._get_container_name(container=container) - endpoint = '{container}/{name}'.format( - container=container_name, name=name - ) + endpoint = f'{container_name}/{name}' if data is not None: self.log.debug( @@ -582,9 +580,7 @@ class Proxy(proxy.Proxy): metadata = self.get_object_metadata(name, container).metadata except exceptions.NotFoundException: self._connection.log.debug( - "swift stale check, no object: {container}/{name}".format( - container=container, name=name - ) + f"swift stale check, no object: {container}/{name}" ) return True @@ -608,7 +604,7 @@ class Proxy(proxy.Proxy): if not up_to_date: self._connection.log.debug( "swift checksum mismatch: " - " %(filename)s!=%(container)s/%(name)s", + "%(filename)s!=%(container)s/%(name)s", {'filename': filename, 'container': container, 'name': name}, ) return True @@ -758,9 +754,7 @@ class Proxy(proxy.Proxy): offset, segment_size if segment_size < remaining else remaining, ) - name = '{endpoint}/{index:0>6}'.format( - endpoint=endpoint, index=index - ) + name = f'{endpoint}/{index:0>6}' segments[name] = segment return segments @@ -878,8 +872,8 @@ class Proxy(proxy.Proxy): temp_url_key = self.get_temp_url_key(container) if not temp_url_key: raise exceptions.SDKException( - 'temp_url_key was not given, nor was a temporary url key' - ' found for the account or the container.' + 'temp_url_key was not given, nor was a temporary url key ' + 'found for the account or the container.' ) return temp_url_key @@ -933,13 +927,7 @@ class Proxy(proxy.Proxy): endpoint = parse.urlparse(self.get_endpoint()) path = '/'.join([endpoint.path, res.name, object_prefix]) - data = '{}\n{}\n{}\n{}\n{}'.format( - path, - redirect_url, - max_file_size, - max_upload_count, - expires, - ) + data = f'{path}\n{redirect_url}\n{max_file_size}\n{max_upload_count}\n{expires}' sig = hmac.new(temp_url_key, data.encode(), sha1).hexdigest() return (expires, sig) @@ -1067,7 +1055,7 @@ class Proxy(proxy.Proxy): ip_range = ip_range.decode('utf-8') except UnicodeDecodeError: raise ValueError('ip_range must be representable as UTF-8') - hmac_parts.insert(0, "ip=%s" % ip_range) + hmac_parts.insert(0, f"ip={ip_range}") hmac_body = '\n'.join(hmac_parts) @@ -1084,11 +1072,7 @@ class Proxy(proxy.Proxy): else: exp = str(expiration) - temp_url = '{path}?temp_url_sig={sig}&temp_url_expires={exp}'.format( - path=path_for_body, - sig=sig, - exp=exp, - ) + temp_url = f'{path_for_body}?temp_url_sig={sig}&temp_url_expires={exp}' if ip_range: temp_url += f'&temp_url_ip_range={ip_range}' diff --git a/openstack/orchestration/util/environment_format.py b/openstack/orchestration/util/environment_format.py index 7afbe06b9..f547550a5 100644 --- a/openstack/orchestration/util/environment_format.py +++ b/openstack/orchestration/util/environment_format.py @@ -58,6 +58,6 @@ def parse(env_str): for param in env: if param not in SECTIONS: - raise ValueError('environment has wrong section "%s"' % param) + raise ValueError(f'environment has wrong section "{param}"') return env diff --git a/openstack/orchestration/util/event_utils.py b/openstack/orchestration/util/event_utils.py index 4e079f4e0..d5f86905f 100644 --- a/openstack/orchestration/util/event_utils.py +++ b/openstack/orchestration/util/event_utils.py @@ -50,7 +50,7 @@ def poll_for_events( """Continuously poll events and logs for performed action on stack.""" def stop_check_action(a): - stop_status = ('%s_FAILED' % action, '%s_COMPLETE' % action) + stop_status = (f'{action}_FAILED', f'{action}_COMPLETE') return a in stop_status def stop_check_no_action(a): diff --git a/openstack/orchestration/util/template_utils.py b/openstack/orchestration/util/template_utils.py index 48797f24d..0d8e9ebec 100644 --- a/openstack/orchestration/util/template_utils.py +++ b/openstack/orchestration/util/template_utils.py @@ -50,13 +50,13 @@ def get_template_contents( return {}, None else: raise exceptions.SDKException( - 'Must provide one of template_file,' - ' template_url or template_object' + 'Must provide one of template_file, template_url or ' + 'template_object' ) if not tpl: raise exceptions.SDKException( - 'Could not fetch template from %s' % template_url + f'Could not fetch template from {template_url}' ) try: @@ -65,8 +65,7 @@ def get_template_contents( template = template_format.parse(tpl) except ValueError as e: raise exceptions.SDKException( - 'Error parsing template %(url)s %(error)s' - % {'url': template_url, 'error': e} + f'Error parsing template {template_url} {e}' ) tmpl_base_url = utils.base_url_for_url(template_url) diff --git a/openstack/orchestration/util/utils.py b/openstack/orchestration/util/utils.py index 6a166c574..7644d6fa8 100644 --- a/openstack/orchestration/util/utils.py +++ b/openstack/orchestration/util/utils.py @@ -40,7 +40,7 @@ def read_url_content(url): # TODO(mordred) Use requests content = request.urlopen(url).read() except error.URLError: - raise exceptions.SDKException('Could not fetch contents for %s' % url) + raise exceptions.SDKException(f'Could not fetch contents for {url}') if content: try: diff --git a/openstack/orchestration/v1/_proxy.py b/openstack/orchestration/v1/_proxy.py index 49904115f..050aefeef 100644 --- a/openstack/orchestration/v1/_proxy.py +++ b/openstack/orchestration/v1/_proxy.py @@ -573,7 +573,7 @@ class Proxy(proxy.Proxy): ) except Exception as e: raise exceptions.SDKException( - "Error in processing template files: %s" % str(e) + f"Error in processing template files: {str(e)}" ) def _get_cleanup_dependencies(self): diff --git a/openstack/orchestration/v1/stack.py b/openstack/orchestration/v1/stack.py index 5247a7f60..2571e2a61 100644 --- a/openstack/orchestration/v1/stack.py +++ b/openstack/orchestration/v1/stack.py @@ -133,16 +133,11 @@ class Stack(resource.Resource): # we need to use other endpoint for update preview. base_path = None if self.name and self.id: - base_path = '/stacks/{stack_name}/{stack_id}'.format( - stack_name=self.name, - stack_id=self.id, - ) + base_path = f'/stacks/{self.name}/{self.id}' elif self.name or self.id: # We have only one of name/id. Do not try to build a stacks/NAME/ID # path - base_path = '/stacks/{stack_identity}'.format( - stack_identity=self.name or self.id - ) + base_path = f'/stacks/{self.name or self.id}' request = self._prepare_request( prepend_key=False, requires_id=False, base_path=base_path ) @@ -248,9 +243,7 @@ class Stack(resource.Resource): self._translate_response(response, **kwargs) if self and self.status in ['DELETE_COMPLETE', 'ADOPT_COMPLETE']: - raise exceptions.NotFoundException( - "No stack found for %s" % self.id - ) + raise exceptions.NotFoundException(f"No stack found for {self.id}") return self @classmethod diff --git a/openstack/proxy.py b/openstack/proxy.py index 36f72db47..2ecebf820 100644 --- a/openstack/proxy.py +++ b/openstack/proxy.py @@ -51,13 +51,12 @@ def _check_resource(strict=False): and actual is not None and not isinstance(actual, resource.Resource) ): - raise ValueError("A %s must be passed" % expected.__name__) + raise ValueError(f"A {expected.__name__} must be passed") elif isinstance(actual, resource.Resource) and not isinstance( actual, expected ): raise ValueError( - "Expected %s but received %s" - % (expected.__name__, actual.__class__.__name__) + f"Expected {expected.__name__} but received {actual.__class__.__name__}" ) return method(self, expected, actual, *args, **kwargs) @@ -340,16 +339,14 @@ class Proxy(adapter.Adapter): with self._statsd_client.pipeline() as pipe: if response is not None: duration = int(response.elapsed.total_seconds() * 1000) - metric_name = '{}.{}'.format( - key, str(response.status_code) - ) + metric_name = f'{key}.{str(response.status_code)}' pipe.timing(metric_name, duration) pipe.incr(metric_name) if duration > 1000: - pipe.incr('%s.over_1000' % key) + pipe.incr(f'{key}.over_1000') elif exc is not None: - pipe.incr('%s.failed' % key) - pipe.incr('%s.attempted' % key) + pipe.incr(f'{key}.failed') + pipe.incr(f'{key}.attempted') except Exception: # We do not want errors in metric reporting ever break client self.log.exception("Exception reporting metrics") @@ -362,8 +359,8 @@ class Proxy(adapter.Adapter): if response is not None and not method: method = response.request.method parsed_url = urlparse(url) - endpoint = "{}://{}{}".format( - parsed_url.scheme, parsed_url.netloc, parsed_url.path + endpoint = ( + f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}" ) if response is not None: labels = dict( @@ -713,9 +710,7 @@ class Proxy(adapter.Adapter): requires_id=requires_id, base_path=base_path, skip_cache=skip_cache, - error_message="No {resource_type} found for {value}".format( - resource_type=resource_type.__name__, value=value - ), + error_message=f"No {resource_type.__name__} found for {value}", ) def _list( @@ -875,8 +870,8 @@ class Proxy(adapter.Adapter): # There are filters set, but we can't get required # attribute, so skip the resource self.log.debug( - 'Requested cleanup attribute %s is not ' - 'available on the resource' % k + f'Requested cleanup attribute {k} is not ' + 'available on the resource' ) part_cond.append(False) except Exception: diff --git a/openstack/resource.py b/openstack/resource.py index 979c1cc3f..6edc9f945 100644 --- a/openstack/resource.py +++ b/openstack/resource.py @@ -221,8 +221,9 @@ class _BaseComponent(abc.ABC): if value and deprecated: warnings.warn( - "The field %r has been deprecated. %s" - % (self.name, deprecation_reason or "Avoid usage."), + "The field {!r} has been deprecated. {}".format( + self.name, deprecation_reason or "Avoid usage." + ), os_warnings.RemovedFieldWarning, ) return value @@ -386,8 +387,9 @@ class QueryParameters: else: if not allow_unknown_params: raise exceptions.InvalidResourceQuery( - message="Invalid query params: %s" - % ",".join(invalid_keys), + message="Invalid query params: {}".format( + ",".join(invalid_keys) + ), extra_data=invalid_keys, ) else: @@ -620,9 +622,7 @@ class Resource(dict): ] args = ", ".join(pairs) - return "{}.{}({})".format( - self.__module__, self.__class__.__name__, args - ) + return f"{self.__module__}.{self.__class__.__name__}({args})" def __eq__(self, comparand): """Return True if another resource has the same contents""" @@ -687,9 +687,8 @@ class Resource(dict): for attr, component in self._attributes_iterator(tuple([Body])): if component.name == name: warnings.warn( - "Access to '%s[%s]' is deprecated. " - "Use '%s.%s' attribute instead" - % (self.__class__, name, self.__class__, attr), + f"Access to '{self.__class__}[{name}]' is deprecated. " + f"Use '{self.__class__}.{attr}' attribute instead", os_warnings.LegacyAPIWarning, ) return getattr(self, attr) @@ -710,13 +709,9 @@ class Resource(dict): self._unknown_attrs_in_body[name] = value return raise KeyError( - "{name} is not found. {module}.{cls} objects do not support" - " setting arbitrary keys through the" - " dict interface.".format( - module=self.__module__, - cls=self.__class__.__name__, - name=name, - ) + f"{name} is not found. " + f"{self.__module__}.{self.__class__.__name__} objects do not " + f"support setting arbitrary keys through the dict interface." ) def _attributes( @@ -1340,9 +1335,9 @@ class Resource(dict): if isinstance(session, adapter.Adapter): return session raise ValueError( - "The session argument to Resource methods requires either an" - " instance of an openstack.proxy.Proxy object or at the very least" - " a raw keystoneauth1.adapter.Adapter." + "The session argument to Resource methods requires either an " + "instance of an openstack.proxy.Proxy object or at the very least " + "a raw keystoneauth1.adapter.Adapter." ) @classmethod @@ -1373,7 +1368,7 @@ class Resource(dict): 'delete', 'patch', }: - raise ValueError('Invalid action: %s' % action) + raise ValueError(f'Invalid action: {action}') if session.default_microversion: return session.default_microversion @@ -1414,9 +1409,9 @@ class Resource(dict): if actual is None: message = ( - "API version %s is required, but the default " + f"API version {expected} is required, but the default " "version will be used." - ) % expected + ) _raise(message) actual_n = discover.normalize_version_number(actual) @@ -1424,9 +1419,9 @@ class Resource(dict): expected_n = discover.normalize_version_number(expected) if actual_n < expected_n: message = ( - "API version %(expected)s is required, but %(actual)s " + f"API version {expected} is required, but {actual} " "will be used." - ) % {'expected': expected, 'actual': actual} + ) _raise(message) if maximum is not None: maximum_n = discover.normalize_version_number(maximum) @@ -1514,7 +1509,7 @@ class Resource(dict): ) else: raise exceptions.ResourceFailure( - "Invalid create method: %s" % self.create_method + f"Invalid create method: {self.create_method}" ) has_body = ( @@ -1576,7 +1571,7 @@ class Resource(dict): and isinstance(data, list) and all([isinstance(x, dict) for x in data]) ): - raise ValueError('Invalid data passed: %s' % data) + raise ValueError(f'Invalid data passed: {data}') session = cls._get_session(session) if microversion is None: @@ -1592,7 +1587,7 @@ class Resource(dict): method = session.post else: raise exceptions.ResourceFailure( - "Invalid create method: %s" % cls.create_method + f"Invalid create method: {cls.create_method}" ) _body: ty.List[ty.Any] = [] @@ -1831,7 +1826,7 @@ class Resource(dict): call = getattr(session, method.lower()) except AttributeError: raise exceptions.ResourceFailure( - "Invalid commit method: %s" % method + f"Invalid commit method: {method}" ) response = call( @@ -1858,7 +1853,7 @@ class Resource(dict): parts = path.lstrip('/').split('/', 1) field = parts[0] except (KeyError, IndexError): - raise ValueError("Malformed or missing path in %s" % item) + raise ValueError(f"Malformed or missing path in {item}") try: component = getattr(self.__class__, field) @@ -1870,7 +1865,7 @@ class Resource(dict): if len(parts) > 1: new_path = f'/{server_field}/{parts[1]}' else: - new_path = '/%s' % server_field + new_path = f'/{server_field}' converted.append(dict(item, path=new_path)) return converted @@ -2435,9 +2430,7 @@ def wait_for_status( failures = [f.lower() for f in failures] name = f"{resource.__class__.__name__}:{resource.id}" - msg = "Timeout waiting for {name} to transition to {status}".format( - name=name, status=status - ) + msg = f"Timeout waiting for {name} to transition to {status}" for count in utils.iterate_timeout( timeout=wait, message=msg, wait=interval @@ -2445,9 +2438,7 @@ def wait_for_status( resource = resource.fetch(session, skip_cache=True) if not resource: raise exceptions.ResourceFailure( - "{name} went away while waiting for {status}".format( - name=name, status=status - ) + f"{name} went away while waiting for {status}" ) new_status = getattr(resource, attribute) @@ -2456,9 +2447,7 @@ def wait_for_status( return resource elif normalized_status in failures: raise exceptions.ResourceFailure( - "{name} transitioned to failure state {status}".format( - name=name, status=new_status - ) + f"{name} transitioned to failure state {new_status}" ) LOG.debug( @@ -2494,9 +2483,7 @@ def wait_for_delete(session, resource, interval, wait, callback=None): orig_resource = resource for count in utils.iterate_timeout( timeout=wait, - message="Timeout waiting for {res}:{id} to delete".format( - res=resource.__class__.__name__, id=resource.id - ), + message=f"Timeout waiting for {resource.__class__.__name__}:{resource.id} to delete", wait=interval, ): try: diff --git a/openstack/service_description.py b/openstack/service_description.py index efe1e783b..a749d60ed 100644 --- a/openstack/service_description.py +++ b/openstack/service_description.py @@ -224,9 +224,7 @@ class ServiceDescription: if not data and instance._strict_proxies: raise exceptions.ServiceDiscoveryException( "Failed to create a working proxy for service " - "{service_type}: No endpoint data found.".format( - service_type=self.service_type - ) + f"{self.service_type}: No endpoint data found." ) # If we've gotten here with a proxy object it means we have @@ -279,8 +277,8 @@ class ServiceDescription: ) else: version_kwargs['min_version'] = str(supported_versions[0]) - version_kwargs['max_version'] = '{version}.latest'.format( - version=str(supported_versions[-1]) + version_kwargs['max_version'] = ( + f'{str(supported_versions[-1])}.latest' ) temp_adapter = config.get_session_client( @@ -291,21 +289,15 @@ class ServiceDescription: region_name = instance.config.get_region_name(self.service_type) if version_kwargs: raise exceptions.NotSupported( - "The {service_type} service for {cloud}:{region_name}" - " exists but does not have any supported versions.".format( - service_type=self.service_type, - cloud=instance.name, - region_name=region_name, - ) + f"The {self.service_type} service for " + f"{instance.name}:{region_name} exists but does not have " + f"any supported versions." ) else: raise exceptions.NotSupported( - "The {service_type} service for {cloud}:{region_name}" - " exists but no version was discoverable.".format( - service_type=self.service_type, - cloud=instance.name, - region_name=region_name, - ) + f"The {self.service_type} service for " + f"{instance.name}:{region_name} exists but no version " + f"was discoverable." ) proxy_class = self.supported_versions.get(str(found_version[0])) if proxy_class: @@ -322,11 +314,9 @@ class ServiceDescription: # service catalog that also doesn't have any useful # version discovery? warnings.warn( - "Service {service_type} has no discoverable version. " + f"Service {self.service_type} has no discoverable version. " "The resulting Proxy object will only have direct " - "passthrough REST capabilities.".format( - service_type=self.service_type - ), + "passthrough REST capabilities.", category=os_warnings.UnsupportedServiceVersion, ) return temp_adapter diff --git a/openstack/shared_file_system/v2/_proxy.py b/openstack/shared_file_system/v2/_proxy.py index dfa41e98a..0bc53f7cc 100644 --- a/openstack/shared_file_system/v2/_proxy.py +++ b/openstack/shared_file_system/v2/_proxy.py @@ -1094,7 +1094,7 @@ class Proxy(proxy.Proxy): keys_failed_to_delete.append(key) if keys_failed_to_delete: raise exceptions.SDKException( - "Some keys failed to be deleted %s" % keys_failed_to_delete + f"Some keys failed to be deleted {keys_failed_to_delete}" ) def resource_locks(self, **query): diff --git a/openstack/test/fakes.py b/openstack/test/fakes.py index 63315b1b7..d82f88011 100644 --- a/openstack/test/fakes.py +++ b/openstack/test/fakes.py @@ -103,10 +103,7 @@ def generate_fake_resource( base_attrs[name] = [uuid.uuid4().hex] else: # Everything else - msg = "Fake value for {}.{} can not be generated".format( - resource_type.__name__, - name, - ) + msg = f"Fake value for {resource_type.__name__}.{name} can not be generated" raise NotImplementedError(msg) elif issubclass(target_type, list) and value.list_type is None: # List of str @@ -130,10 +127,7 @@ def generate_fake_resource( base_attrs[name] = dict() else: # Everything else - msg = "Fake value for {}.{} can not be generated".format( - resource_type.__name__, - name, - ) + msg = f"Fake value for {resource_type.__name__}.{name} can not be generated" raise NotImplementedError(msg) if isinstance(value, resource.URI): diff --git a/openstack/tests/base.py b/openstack/tests/base.py index be5bbb06d..3226f6be5 100644 --- a/openstack/tests/base.py +++ b/openstack/tests/base.py @@ -132,11 +132,7 @@ class TestCase(base.BaseTestCase): if not whole[key] and part[key]: missing_keys.append(key) if missing_keys: - self.fail( - "Keys {} are in {} but not in {}".format( - missing_keys, part, whole - ) - ) + self.fail(f"Keys {missing_keys} are in {part} but not in {whole}") wrong_values = [ (key, part[key], whole[key]) for key in part @@ -144,8 +140,10 @@ class TestCase(base.BaseTestCase): ] if wrong_values: self.fail( - "Mismatched values: %s" - % ", ".join( - "for %s got %s and %s" % tpl for tpl in wrong_values + "Mismatched values: {}".format( + ", ".join( + "for {} got {} and {}".format(*tpl) + for tpl in wrong_values + ) ) ) diff --git a/openstack/tests/fakes.py b/openstack/tests/fakes.py index c7b6dbf73..7c9dc5a5d 100644 --- a/openstack/tests/fakes.py +++ b/openstack/tests/fakes.py @@ -30,9 +30,7 @@ FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddd' CHOCOLATE_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8ddde' STRAWBERRY_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddf' COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1' -ORCHESTRATION_ENDPOINT = 'https://orchestration.example.com/v1/{p}'.format( - p=PROJECT_ID -) +ORCHESTRATION_ENDPOINT = f'https://orchestration.example.com/v1/{PROJECT_ID}' NO_MD5 = '93b885adfe0da089cdf634904fd59f71' NO_SHA256 = '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' FAKE_PUBLIC_KEY = ( @@ -53,15 +51,11 @@ def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24): 'id': flavor_id, 'links': [ { - 'href': '{endpoint}/flavors/{id}'.format( - endpoint=COMPUTE_ENDPOINT, id=flavor_id - ), + 'href': f'{COMPUTE_ENDPOINT}/flavors/{flavor_id}', 'rel': 'self', }, { - 'href': '{endpoint}/flavors/{id}'.format( - endpoint=COMPUTE_ENDPOINT, id=flavor_id - ), + 'href': f'{COMPUTE_ENDPOINT}/flavors/{flavor_id}', 'rel': 'bookmark', }, ], @@ -231,9 +225,7 @@ def make_fake_stack_event( "rel": "resource", }, { - "href": "{endpoint}/stacks/{name}/{id}".format( - endpoint=ORCHESTRATION_ENDPOINT, name=name, id=id - ), + "href": f"{ORCHESTRATION_ENDPOINT}/stacks/{name}/{id}", "rel": "stack", }, ], @@ -288,9 +280,7 @@ def make_fake_image( 'created_at': '2016-02-10T05:03:11Z', 'owner_specified.openstack.md5': md5 or NO_MD5, 'owner_specified.openstack.sha256': sha256 or NO_SHA256, - 'owner_specified.openstack.object': 'images/{name}'.format( - name=image_name - ), + 'owner_specified.openstack.object': f'images/{image_name}', 'protected': False, } diff --git a/openstack/tests/functional/baremetal/test_baremetal_driver.py b/openstack/tests/functional/baremetal/test_baremetal_driver.py index fb775c642..1c3b31eb8 100644 --- a/openstack/tests/functional/baremetal/test_baremetal_driver.py +++ b/openstack/tests/functional/baremetal/test_baremetal_driver.py @@ -41,10 +41,10 @@ class TestBareMetalDriverDetails(base.BaseBaremetalTest): self.assertEqual('fake-hardware', driver.name) for iface in ('boot', 'deploy', 'management', 'power'): self.assertIn( - 'fake', getattr(driver, 'enabled_%s_interfaces' % iface) + 'fake', getattr(driver, f'enabled_{iface}_interfaces') ) self.assertEqual( - 'fake', getattr(driver, 'default_%s_interface' % iface) + 'fake', getattr(driver, f'default_{iface}_interface') ) self.assertNotEqual([], driver.hosts) @@ -53,8 +53,8 @@ class TestBareMetalDriverDetails(base.BaseBaremetalTest): driver = [d for d in drivers if d.name == 'fake-hardware'][0] for iface in ('boot', 'deploy', 'management', 'power'): self.assertIn( - 'fake', getattr(driver, 'enabled_%s_interfaces' % iface) + 'fake', getattr(driver, f'enabled_{iface}_interfaces') ) self.assertEqual( - 'fake', getattr(driver, 'default_%s_interface' % iface) + 'fake', getattr(driver, f'default_{iface}_interface') ) diff --git a/openstack/tests/functional/base.py b/openstack/tests/functional/base.py index 464561000..24f6affdf 100644 --- a/openstack/tests/functional/base.py +++ b/openstack/tests/functional/base.py @@ -220,11 +220,7 @@ class BaseFunctionalTest(base.TestCase): :returns: True if the service exists, otherwise False. """ if not self.conn.has_service(service_type): - self.skipTest( - 'Service {service_type} not found in cloud'.format( - service_type=service_type - ) - ) + self.skipTest(f'Service {service_type} not found in cloud') if not min_microversion: return @@ -252,9 +248,9 @@ class BaseFunctionalTest(base.TestCase): # Globally unique names can only rely on some form of uuid # unix_t is also used to easier determine orphans when running real # functional tests on a real cloud - return (prefix if prefix else '') + "{time}-{uuid}".format( - time=int(time.time()), uuid=uuid.uuid4().hex - ) + return ( + prefix if prefix else '' + ) + f"{int(time.time())}-{uuid.uuid4().hex}" def create_temporary_project(self): """Create a new temporary project. diff --git a/openstack/tests/functional/cloud/test_cluster_templates.py b/openstack/tests/functional/cloud/test_cluster_templates.py index a52f3283a..a98c85a6f 100644 --- a/openstack/tests/functional/cloud/test_cluster_templates.py +++ b/openstack/tests/functional/cloud/test_cluster_templates.py @@ -58,12 +58,12 @@ class TestClusterTemplate(base.BaseFunctionalTest): '-N', '', '-f', - '%s/id_rsa_sdk' % self.ssh_directory, + f'{self.ssh_directory}/id_rsa_sdk', ] ) # add keypair to nova - with open('%s/id_rsa_sdk.pub' % self.ssh_directory) as f: + with open(f'{self.ssh_directory}/id_rsa_sdk.pub') as f: key_content = f.read() self.user_cloud.create_keypair('testkey', key_content) diff --git a/openstack/tests/functional/cloud/test_compute.py b/openstack/tests/functional/cloud/test_compute.py index c7e270f4a..037a7e025 100644 --- a/openstack/tests/functional/cloud/test_compute.py +++ b/openstack/tests/functional/cloud/test_compute.py @@ -339,9 +339,7 @@ class TestCompute(base.BaseFunctionalTest): # consistency! for count in utils.iterate_timeout( 60, - 'Timeout waiting for volume {volume_id} to detach'.format( - volume_id=volume_id - ), + f'Timeout waiting for volume {volume_id} to detach', ): volume = self.user_cloud.get_volume(volume_id) if volume.status in ( diff --git a/openstack/tests/functional/cloud/test_recordset.py b/openstack/tests/functional/cloud/test_recordset.py index fff2b1b60..ce6e9127c 100644 --- a/openstack/tests/functional/cloud/test_recordset.py +++ b/openstack/tests/functional/cloud/test_recordset.py @@ -35,9 +35,9 @@ class TestRecordset(base.BaseFunctionalTest): '''Test DNS recordsets functionality''' sub = ''.join(random.choice(string.ascii_lowercase) for _ in range(6)) - zone = '%s.example2.net.' % sub + zone = f'{sub}.example2.net.' email = 'test@example2.net' - name = 'www.%s' % zone + name = f'www.{zone}' type_ = 'a' description = 'Test recordset' ttl = 3600 @@ -96,9 +96,9 @@ class TestRecordset(base.BaseFunctionalTest): '''Test DNS recordsets functionality''' sub = ''.join(random.choice(string.ascii_lowercase) for _ in range(6)) - zone = '%s.example2.net.' % sub + zone = f'{sub}.example2.net.' email = 'test@example2.net' - name = 'www.%s' % zone + name = f'www.{zone}' type_ = 'a' description = 'Test recordset' ttl = 3600 diff --git a/openstack/tests/functional/dns/v2/test_zone.py b/openstack/tests/functional/dns/v2/test_zone.py index 417897516..3875c78ad 100644 --- a/openstack/tests/functional/dns/v2/test_zone.py +++ b/openstack/tests/functional/dns/v2/test_zone.py @@ -54,9 +54,7 @@ class TestZone(base.BaseFunctionalTest): self.assertEqual( current_ttl + 1, updated_zone_ttl, - 'Failed, updated TTL value is:{} instead of expected:{}'.format( - updated_zone_ttl, current_ttl + 1 - ), + f'Failed, updated TTL value is:{updated_zone_ttl} instead of expected:{current_ttl + 1}', ) def test_create_rs(self): diff --git a/openstack/tests/unit/baremetal/v1/test_node.py b/openstack/tests/unit/baremetal/v1/test_node.py index f86f33c4f..96d47f8a1 100644 --- a/openstack/tests/unit/baremetal/v1/test_node.py +++ b/openstack/tests/unit/baremetal/v1/test_node.py @@ -310,7 +310,7 @@ class TestNodeSetProvisionState(base.TestCase): result = self.node.set_provision_state(self.session, 'active') self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'active'}, headers=mock.ANY, microversion=None, @@ -321,7 +321,7 @@ class TestNodeSetProvisionState(base.TestCase): result = self.node.set_provision_state(self.session, 'manage') self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'manage'}, headers=mock.ANY, microversion='1.4', @@ -334,7 +334,7 @@ class TestNodeSetProvisionState(base.TestCase): ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'active', 'configdrive': 'abcd'}, headers=mock.ANY, microversion=None, @@ -348,7 +348,7 @@ class TestNodeSetProvisionState(base.TestCase): ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'active', 'configdrive': config_drive.decode()}, headers=mock.ANY, microversion=None, @@ -361,7 +361,7 @@ class TestNodeSetProvisionState(base.TestCase): ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'rebuild', 'configdrive': 'abcd'}, headers=mock.ANY, microversion='1.35', @@ -376,7 +376,7 @@ class TestNodeSetProvisionState(base.TestCase): ) self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': target, 'configdrive': {'user_data': 'abcd'}}, headers=mock.ANY, microversion='1.56', @@ -391,7 +391,7 @@ class TestNodeSetProvisionState(base.TestCase): self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'active', 'deploy_steps': deploy_steps}, headers=mock.ANY, microversion='1.69', @@ -406,7 +406,7 @@ class TestNodeSetProvisionState(base.TestCase): self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'rebuild', 'deploy_steps': deploy_steps}, headers=mock.ANY, microversion='1.69', @@ -418,7 +418,7 @@ class TestNodeSetProvisionState(base.TestCase): self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'unhold'}, headers=mock.ANY, microversion='1.85', @@ -433,7 +433,7 @@ class TestNodeSetProvisionState(base.TestCase): self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'service', 'service_steps': service_steps}, headers=mock.ANY, microversion='1.87', @@ -448,7 +448,7 @@ class TestNodeSetProvisionState(base.TestCase): self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'clean', 'runbook': runbook}, headers=mock.ANY, microversion='1.92', @@ -463,7 +463,7 @@ class TestNodeSetProvisionState(base.TestCase): self.assertIs(result, self.node) self.session.put.assert_called_once_with( - 'nodes/%s/states/provision' % self.node.id, + f'nodes/{self.node.id}/states/provision', json={'target': 'service', 'runbook': runbook}, headers=mock.ANY, microversion='1.92', @@ -602,7 +602,7 @@ class TestNodeVif(base.TestCase): def test_attach_vif(self): self.assertIsNone(self.node.attach_vif(self.session, self.vif_id)) self.session.post.assert_called_once_with( - 'nodes/%s/vifs' % self.node.id, + f'nodes/{self.node.id}/vifs', json={'id': self.vif_id}, headers=mock.ANY, microversion='1.67', @@ -616,7 +616,7 @@ class TestNodeVif(base.TestCase): ) ) self.session.post.assert_called_once_with( - 'nodes/%s/vifs' % self.node.id, + f'nodes/{self.node.id}/vifs', json={'id': self.vif_id}, headers=mock.ANY, microversion='1.67', @@ -630,7 +630,7 @@ class TestNodeVif(base.TestCase): ) ) self.session.post.assert_called_once_with( - 'nodes/%s/vifs' % self.node.id, + f'nodes/{self.node.id}/vifs', json={'id': self.vif_id, 'port_uuid': self.vif_port_uuid}, headers=mock.ANY, microversion='1.67', @@ -646,7 +646,7 @@ class TestNodeVif(base.TestCase): ) ) self.session.post.assert_called_once_with( - 'nodes/%s/vifs' % self.node.id, + f'nodes/{self.node.id}/vifs', json={ 'id': self.vif_id, 'portgroup_uuid': self.vif_portgroup_uuid, @@ -695,7 +695,7 @@ class TestNodeVif(base.TestCase): res = self.node.list_vifs(self.session) self.assertEqual(['1234', '5678'], res) self.session.get.assert_called_once_with( - 'nodes/%s/vifs' % self.node.id, + f'nodes/{self.node.id}/vifs', headers=mock.ANY, microversion='1.67', ) @@ -849,7 +849,7 @@ class TestNodeInjectNMI(base.TestCase): def test_inject_nmi(self): self.node.inject_nmi(self.session) self.session.put.assert_called_once_with( - 'nodes/%s/management/inject_nmi' % FAKE['uuid'], + 'nodes/{}/management/inject_nmi'.format(FAKE['uuid']), json={}, headers=mock.ANY, microversion='1.29', @@ -878,7 +878,7 @@ class TestNodeSetPowerState(base.TestCase): def test_power_on(self): self.node.set_power_state(self.session, 'power on') self.session.put.assert_called_once_with( - 'nodes/%s/states/power' % FAKE['uuid'], + 'nodes/{}/states/power'.format(FAKE['uuid']), json={'target': 'power on'}, headers=mock.ANY, microversion=None, @@ -888,7 +888,7 @@ class TestNodeSetPowerState(base.TestCase): def test_soft_power_on(self): self.node.set_power_state(self.session, 'soft power off') self.session.put.assert_called_once_with( - 'nodes/%s/states/power' % FAKE['uuid'], + 'nodes/{}/states/power'.format(FAKE['uuid']), json={'target': 'soft power off'}, headers=mock.ANY, microversion='1.27', @@ -912,7 +912,7 @@ class TestNodeMaintenance(base.TestCase): def test_set(self): self.node.set_maintenance(self.session) self.session.put.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json={'reason': None}, headers=mock.ANY, microversion=mock.ANY, @@ -921,7 +921,7 @@ class TestNodeMaintenance(base.TestCase): def test_set_with_reason(self): self.node.set_maintenance(self.session, 'No work on Monday') self.session.put.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json={'reason': 'No work on Monday'}, headers=mock.ANY, microversion=mock.ANY, @@ -930,7 +930,7 @@ class TestNodeMaintenance(base.TestCase): def test_unset(self): self.node.unset_maintenance(self.session) self.session.delete.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json=None, headers=mock.ANY, microversion=mock.ANY, @@ -940,7 +940,7 @@ class TestNodeMaintenance(base.TestCase): self.node.is_maintenance = True self.node.commit(self.session) self.session.put.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json={'reason': None}, headers=mock.ANY, microversion=mock.ANY, @@ -953,7 +953,7 @@ class TestNodeMaintenance(base.TestCase): self.node.maintenance_reason = 'No work on Monday' self.node.commit(self.session) self.session.put.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json={'reason': 'No work on Monday'}, headers=mock.ANY, microversion=mock.ANY, @@ -965,14 +965,14 @@ class TestNodeMaintenance(base.TestCase): self.node.name = 'lazy-3000' self.node.commit(self.session) self.session.put.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json={'reason': None}, headers=mock.ANY, microversion=mock.ANY, ) self.session.patch.assert_called_once_with( - 'nodes/%s' % self.node.id, + f'nodes/{self.node.id}', json=[{'path': '/name', 'op': 'replace', 'value': 'lazy-3000'}], headers=mock.ANY, microversion=mock.ANY, @@ -984,14 +984,14 @@ class TestNodeMaintenance(base.TestCase): self.node.name = 'lazy-3000' self.node.commit(self.session) self.session.put.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json={'reason': 'No work on Monday'}, headers=mock.ANY, microversion=mock.ANY, ) self.session.patch.assert_called_once_with( - 'nodes/%s' % self.node.id, + f'nodes/{self.node.id}', json=[{'path': '/name', 'op': 'replace', 'value': 'lazy-3000'}], headers=mock.ANY, microversion=mock.ANY, @@ -1009,7 +1009,7 @@ class TestNodeMaintenance(base.TestCase): self.node.commit(self.session) self.session.put.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json={'reason': 'No work on Monday'}, headers=mock.ANY, microversion=mock.ANY, @@ -1020,7 +1020,7 @@ class TestNodeMaintenance(base.TestCase): self.assertIsNone(self.node.maintenance_reason) self.session.delete.assert_called_once_with( - 'nodes/%s/maintenance' % self.node.id, + f'nodes/{self.node.id}/maintenance', json=None, headers=mock.ANY, microversion=mock.ANY, @@ -1040,7 +1040,7 @@ class TestNodeBootDevice(base.TestCase): def test_get_boot_device(self): self.node.get_boot_device(self.session) self.session.get.assert_called_once_with( - 'nodes/%s/management/boot_device' % self.node.id, + f'nodes/{self.node.id}/management/boot_device', headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1049,7 +1049,7 @@ class TestNodeBootDevice(base.TestCase): def test_set_boot_device(self): self.node.set_boot_device(self.session, 'pxe', persistent=False) self.session.put.assert_called_once_with( - 'nodes/%s/management/boot_device' % self.node.id, + f'nodes/{self.node.id}/management/boot_device', json={'boot_device': 'pxe', 'persistent': False}, headers=mock.ANY, microversion=mock.ANY, @@ -1059,7 +1059,7 @@ class TestNodeBootDevice(base.TestCase): def test_get_supported_boot_devices(self): self.node.get_supported_boot_devices(self.session) self.session.get.assert_called_once_with( - 'nodes/%s/management/boot_device/supported' % self.node.id, + f'nodes/{self.node.id}/management/boot_device/supported', headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1080,7 +1080,7 @@ class TestNodeSetBootMode(base.TestCase): def test_node_set_boot_mode(self): self.node.set_boot_mode(self.session, 'uefi') self.session.put.assert_called_once_with( - 'nodes/%s/states/boot_mode' % self.node.id, + f'nodes/{self.node.id}/states/boot_mode', json={'target': 'uefi'}, headers=mock.ANY, microversion=mock.ANY, @@ -1107,7 +1107,7 @@ class TestNodeSetSecureBoot(base.TestCase): def test_node_set_secure_boot(self): self.node.set_secure_boot(self.session, True) self.session.put.assert_called_once_with( - 'nodes/%s/states/secure_boot' % self.node.id, + f'nodes/{self.node.id}/states/secure_boot', json={'target': True}, headers=mock.ANY, microversion=mock.ANY, @@ -1167,7 +1167,7 @@ class TestNodeTraits(base.TestCase): traits = ['CUSTOM_FAKE', 'CUSTOM_REAL', 'CUSTOM_MISSING'] self.node.set_traits(self.session, traits) self.session.put.assert_called_once_with( - 'nodes/%s/traits' % self.node.id, + f'nodes/{self.node.id}/traits', json={'traits': ['CUSTOM_FAKE', 'CUSTOM_REAL', 'CUSTOM_MISSING']}, headers=mock.ANY, microversion='1.37', @@ -1264,7 +1264,7 @@ class TestNodePassthru: def test_get_passthru(self): self.node.call_vendor_passthru(self.session, "GET", "test_method") self.session.get.assert_called_once_with( - 'nodes/%s/vendor_passthru?method=test_method' % self.node.id, + f'nodes/{self.node.id}/vendor_passthru?method=test_method', headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1273,7 +1273,7 @@ class TestNodePassthru: def test_post_passthru(self): self.node.call_vendor_passthru(self.session, "POST", "test_method") self.session.post.assert_called_once_with( - 'nodes/%s/vendor_passthru?method=test_method' % self.node.id, + f'nodes/{self.node.id}/vendor_passthru?method=test_method', headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1282,7 +1282,7 @@ class TestNodePassthru: def test_put_passthru(self): self.node.call_vendor_passthru(self.session, "PUT", "test_method") self.session.put.assert_called_once_with( - 'nodes/%s/vendor_passthru?method=test_method' % self.node.id, + f'nodes/{self.node.id}/vendor_passthru?method=test_method', headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1291,7 +1291,7 @@ class TestNodePassthru: def test_delete_passthru(self): self.node.call_vendor_passthru(self.session, "DELETE", "test_method") self.session.delete.assert_called_once_with( - 'nodes/%s/vendor_passthru?method=test_method' % self.node.id, + f'nodes/{self.node.id}/vendor_passthru?method=test_method', headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1300,7 +1300,7 @@ class TestNodePassthru: def test_list_passthru(self): self.node.list_vendor_passthru(self.session) self.session.get.assert_called_once_with( - 'nodes/%s/vendor_passthru/methods' % self.node.id, + f'nodes/{self.node.id}/vendor_passthru/methods', headers=mock.ANY, microversion='1.37', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1321,7 +1321,7 @@ class TestNodeConsole(base.TestCase): def test_get_console(self): self.node.get_console(self.session) self.session.get.assert_called_once_with( - 'nodes/%s/states/console' % self.node.id, + f'nodes/{self.node.id}/states/console', headers=mock.ANY, microversion=mock.ANY, retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1330,7 +1330,7 @@ class TestNodeConsole(base.TestCase): def test_set_console_mode(self): self.node.set_console_mode(self.session, True) self.session.put.assert_called_once_with( - 'nodes/%s/states/console' % self.node.id, + f'nodes/{self.node.id}/states/console', json={'enabled': True}, headers=mock.ANY, microversion=mock.ANY, @@ -1382,7 +1382,7 @@ class TestNodeInventory(base.TestCase): self.assertEqual(node_inventory, res) self.session.get.assert_called_once_with( - 'nodes/%s/inventory' % self.node.id, + f'nodes/{self.node.id}/inventory', headers=mock.ANY, microversion='1.81', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, @@ -1427,7 +1427,7 @@ class TestNodeFirmware(base.TestCase): self.assertEqual(node_firmware, res) self.session.get.assert_called_once_with( - 'nodes/%s/firmware' % self.node.id, + f'nodes/{self.node.id}/firmware', headers=mock.ANY, microversion='1.86', retriable_status_codes=_common.RETRIABLE_STATUS_CODES, diff --git a/openstack/tests/unit/base.py b/openstack/tests/unit/base.py index b1d609ce8..b63aeb535 100644 --- a/openstack/tests/unit/base.py +++ b/openstack/tests/unit/base.py @@ -203,7 +203,7 @@ class TestCase(base.TestCase): if append: to_join.extend([urllib.parse.quote(i) for i in append]) if qs_elements is not None: - qs = '?%s' % '&'.join(qs_elements) + qs = '?{}'.format('&'.join(qs_elements)) return '{uri}{qs}'.format(uri='/'.join(to_join), qs=qs) def mock_for_keystone_projects( @@ -811,17 +811,13 @@ class TestCase(base.TestCase): # NOTE(notmorgan): make sure the delimiter is non-url-safe, in this # case "|" is used so that the split can be a bit easier on # maintainers of this code. - key = '{method}|{uri}|{params}'.format( - method=method, uri=uri, params=kw_params - ) + key = f'{method}|{uri}|{kw_params}' validate = to_mock.pop('validate', {}) valid_keys = {'json', 'headers', 'params', 'data'} invalid_keys = set(validate.keys()) - valid_keys if invalid_keys: raise TypeError( - "Invalid values passed to validate: {keys}".format( - keys=invalid_keys - ) + f"Invalid values passed to validate: {invalid_keys}" ) headers = structures.CaseInsensitiveDict( to_mock.pop('headers', {}) @@ -841,11 +837,10 @@ class TestCase(base.TestCase): 'PROGRAMMING ERROR: key-word-params ' 'should be part of the uri_key and cannot change, ' 'it will affect the matcher in requests_mock. ' - '%(old)r != %(new)r' - % { - 'old': self._uri_registry[key]['kw_params'], - 'new': kw_params, - } + '{old!r} != {new!r}'.format( + old=self._uri_registry[key]['kw_params'], + new=kw_params, + ) ) self._uri_registry[key]['response_list'].append(to_mock) @@ -900,9 +895,7 @@ class TestCase(base.TestCase): 'call': '{method} {url}'.format( method=call['method'], url=call['url'] ), - 'history': '{method} {url}'.format( - method=history.method, url=history.url - ), + 'history': f'{history.method} {history.url}', } ), ) diff --git a/openstack/tests/unit/block_storage/v2/test_backup.py b/openstack/tests/unit/block_storage/v2/test_backup.py index 040063024..600ed8855 100644 --- a/openstack/tests/unit/block_storage/v2/test_backup.py +++ b/openstack/tests/unit/block_storage/v2/test_backup.py @@ -139,7 +139,7 @@ class TestBackup(base.TestCase): self.assertEqual(sot, sot.restore(self.sess, 'vol', 'name')) - url = 'backups/%s/restore' % FAKE_ID + url = f'backups/{FAKE_ID}/restore' body = {"restore": {"volume_id": "vol", "name": "name"}} self.sess.post.assert_called_with(url, json=body) @@ -148,7 +148,7 @@ class TestBackup(base.TestCase): self.assertEqual(sot, sot.restore(self.sess, name='name')) - url = 'backups/%s/restore' % FAKE_ID + url = f'backups/{FAKE_ID}/restore' body = {"restore": {"name": "name"}} self.sess.post.assert_called_with(url, json=body) @@ -157,7 +157,7 @@ class TestBackup(base.TestCase): self.assertEqual(sot, sot.restore(self.sess, volume_id='vol')) - url = 'backups/%s/restore' % FAKE_ID + url = f'backups/{FAKE_ID}/restore' body = {"restore": {"volume_id": "vol"}} self.sess.post.assert_called_with(url, json=body) @@ -171,7 +171,7 @@ class TestBackup(base.TestCase): self.assertIsNone(sot.force_delete(self.sess)) - url = 'backups/%s/action' % FAKE_ID + url = f'backups/{FAKE_ID}/action' body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -182,7 +182,7 @@ class TestBackup(base.TestCase): self.assertIsNone(sot.reset(self.sess, 'new_status')) - url = 'backups/%s/action' % FAKE_ID + url = f'backups/{FAKE_ID}/action' body = {'os-reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion diff --git a/openstack/tests/unit/block_storage/v2/test_snapshot.py b/openstack/tests/unit/block_storage/v2/test_snapshot.py index 0c2909404..657ffb1e0 100644 --- a/openstack/tests/unit/block_storage/v2/test_snapshot.py +++ b/openstack/tests/unit/block_storage/v2/test_snapshot.py @@ -97,7 +97,7 @@ class TestSnapshotActions(base.TestCase): self.assertIsNone(sot.reset(self.sess, 'new_status')) - url = 'snapshots/%s/action' % FAKE_ID + url = f'snapshots/{FAKE_ID}/action' body = {'os-reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion diff --git a/openstack/tests/unit/block_storage/v2/test_type.py b/openstack/tests/unit/block_storage/v2/test_type.py index d67934d06..edfade8ff 100644 --- a/openstack/tests/unit/block_storage/v2/test_type.py +++ b/openstack/tests/unit/block_storage/v2/test_type.py @@ -73,7 +73,7 @@ class TestType(base.TestCase): ) self.sess.get.assert_called_with( - "types/%s/os-volume-type-access" % sot.id + f"types/{sot.id}/os-volume-type-access" ) def test_add_private_access(self): @@ -81,7 +81,7 @@ class TestType(base.TestCase): self.assertIsNone(sot.add_private_access(self.sess, "a")) - url = "types/%s/action" % sot.id + url = f"types/{sot.id}/action" body = {"addProjectAccess": {"project": "a"}} self.sess.post.assert_called_with(url, json=body) @@ -90,6 +90,6 @@ class TestType(base.TestCase): self.assertIsNone(sot.remove_private_access(self.sess, "a")) - url = "types/%s/action" % sot.id + url = f"types/{sot.id}/action" body = {"removeProjectAccess": {"project": "a"}} self.sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/block_storage/v2/test_volume.py b/openstack/tests/unit/block_storage/v2/test_volume.py index 3f13ed80a..1e80ce9f0 100644 --- a/openstack/tests/unit/block_storage/v2/test_volume.py +++ b/openstack/tests/unit/block_storage/v2/test_volume.py @@ -153,7 +153,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.extend(self.sess, '20')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {"os-extend": {"new_size": "20"}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -164,7 +164,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.set_readonly(self.sess, True)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-update_readonly_flag': {'readonly': True}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -175,7 +175,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.set_readonly(self.sess, False)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-update_readonly_flag': {'readonly': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -186,7 +186,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.set_bootable_status(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-set_bootable': {'bootable': True}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -197,7 +197,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.set_bootable_status(self.sess, False)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-set_bootable': {'bootable': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -208,7 +208,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.set_image_metadata(self.sess, {'foo': 'bar'})) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-set_image_metadata': {'foo': 'bar'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -224,7 +224,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.delete_image_metadata(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body_a = {'os-unset_image_metadata': 'foo'} body_b = {'os-unset_image_metadata': 'baz'} self.sess.post.assert_has_calls( @@ -243,7 +243,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.delete_image_metadata_item(self.sess, 'foo')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-unset_image_metadata': 'foo'} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -254,7 +254,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.reset_status(self.sess, '1', '2', '3')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-reset_status': { 'status': '1', @@ -271,7 +271,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.reset_status(self.sess, status='1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-reset_status': { 'status': '1', @@ -286,7 +286,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.attach(self.sess, '1', '2')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-attach': {'mountpoint': '1', 'instance_uuid': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -297,7 +297,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.detach(self.sess, '1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-detach': {'attachment_id': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -308,7 +308,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.detach(self.sess, '1', force=True)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-force_detach': {'attachment_id': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -319,7 +319,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.unmanage(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-unmanage': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -330,7 +330,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.retype(self.sess, '1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-retype': {'new_type': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -341,7 +341,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.retype(self.sess, '1', migration_policy='2')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-retype': {'new_type': '1', 'migration_policy': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -352,7 +352,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.migrate(self.sess, host='1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-migrate_volume': {'host': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -367,7 +367,7 @@ class TestVolumeActions(TestVolume): ) ) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-migrate_volume': { 'host': '1', @@ -384,7 +384,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.complete_migration(self.sess, new_volume_id='1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-migrate_volume_completion': {'new_volume': '1', 'error': False} } @@ -399,7 +399,7 @@ class TestVolumeActions(TestVolume): sot.complete_migration(self.sess, new_volume_id='1', error=True) ) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-migrate_volume_completion': {'new_volume': '1', 'error': True} } @@ -412,7 +412,7 @@ class TestVolumeActions(TestVolume): self.assertIsNone(sot.force_delete(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion diff --git a/openstack/tests/unit/block_storage/v3/test_attachment.py b/openstack/tests/unit/block_storage/v3/test_attachment.py index af725a0ae..006760219 100644 --- a/openstack/tests/unit/block_storage/v3/test_attachment.py +++ b/openstack/tests/unit/block_storage/v3/test_attachment.py @@ -180,7 +180,7 @@ class TestAttachment(base.TestCase): sot.id = FAKE_ID sot.complete(self.sess) self.sess.post.assert_called_with( - '/attachments/%s/action' % FAKE_ID, + f'/attachments/{FAKE_ID}/action', json={ 'os-complete': '92dc3671-d0ab-4370-8058-c88a71661ec5', }, diff --git a/openstack/tests/unit/block_storage/v3/test_backup.py b/openstack/tests/unit/block_storage/v3/test_backup.py index a80b97d46..796cd05ae 100644 --- a/openstack/tests/unit/block_storage/v3/test_backup.py +++ b/openstack/tests/unit/block_storage/v3/test_backup.py @@ -152,7 +152,7 @@ class TestBackup(base.TestCase): self.assertEqual(sot, sot.restore(self.sess, 'vol', 'name')) - url = 'backups/%s/restore' % FAKE_ID + url = f'backups/{FAKE_ID}/restore' body = {"restore": {"volume_id": "vol", "name": "name"}} self.sess.post.assert_called_with(url, json=body) @@ -161,7 +161,7 @@ class TestBackup(base.TestCase): self.assertEqual(sot, sot.restore(self.sess, name='name')) - url = 'backups/%s/restore' % FAKE_ID + url = f'backups/{FAKE_ID}/restore' body = {"restore": {"name": "name"}} self.sess.post.assert_called_with(url, json=body) @@ -170,7 +170,7 @@ class TestBackup(base.TestCase): self.assertEqual(sot, sot.restore(self.sess, volume_id='vol')) - url = 'backups/%s/restore' % FAKE_ID + url = f'backups/{FAKE_ID}/restore' body = {"restore": {"volume_id": "vol"}} self.sess.post.assert_called_with(url, json=body) @@ -184,7 +184,7 @@ class TestBackup(base.TestCase): self.assertIsNone(sot.force_delete(self.sess)) - url = 'backups/%s/action' % FAKE_ID + url = f'backups/{FAKE_ID}/action' body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -195,7 +195,7 @@ class TestBackup(base.TestCase): self.assertIsNone(sot.reset(self.sess, 'new_status')) - url = 'backups/%s/action' % FAKE_ID + url = f'backups/{FAKE_ID}/action' body = {'os-reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion diff --git a/openstack/tests/unit/block_storage/v3/test_group.py b/openstack/tests/unit/block_storage/v3/test_group.py index e75cf97ac..cc6592885 100644 --- a/openstack/tests/unit/block_storage/v3/test_group.py +++ b/openstack/tests/unit/block_storage/v3/test_group.py @@ -87,7 +87,7 @@ class TestGroupAction(base.TestCase): self.assertIsNone(sot.delete(self.sess)) - url = 'groups/%s/action' % GROUP_ID + url = f'groups/{GROUP_ID}/action' body = {'delete': {'delete-volumes': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -98,7 +98,7 @@ class TestGroupAction(base.TestCase): self.assertIsNone(sot.reset(self.sess, 'new_status')) - url = 'groups/%s/action' % GROUP_ID + url = f'groups/{GROUP_ID}/action' body = {'reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, diff --git a/openstack/tests/unit/block_storage/v3/test_snapshot.py b/openstack/tests/unit/block_storage/v3/test_snapshot.py index b377d5fca..e925493f1 100644 --- a/openstack/tests/unit/block_storage/v3/test_snapshot.py +++ b/openstack/tests/unit/block_storage/v3/test_snapshot.py @@ -105,7 +105,7 @@ class TestSnapshotActions(base.TestCase): self.assertIsNone(sot.force_delete(self.sess)) - url = 'snapshots/%s/action' % FAKE_ID + url = f'snapshots/{FAKE_ID}/action' body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -116,7 +116,7 @@ class TestSnapshotActions(base.TestCase): self.assertIsNone(sot.reset(self.sess, 'new_status')) - url = 'snapshots/%s/action' % FAKE_ID + url = f'snapshots/{FAKE_ID}/action' body = {'os-reset_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -127,7 +127,7 @@ class TestSnapshotActions(base.TestCase): self.assertIsNone(sot.set_status(self.sess, 'new_status')) - url = 'snapshots/%s/action' % FAKE_ID + url = f'snapshots/{FAKE_ID}/action' body = {'os-update_snapshot_status': {'status': 'new_status'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -206,7 +206,7 @@ class TestSnapshotActions(base.TestCase): self.assertIsNone(sot.unmanage(self.sess)) - url = 'snapshots/%s/action' % FAKE_ID + url = f'snapshots/{FAKE_ID}/action' body = {'os-unmanage': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion diff --git a/openstack/tests/unit/block_storage/v3/test_transfer.py b/openstack/tests/unit/block_storage/v3/test_transfer.py index 4114ab30b..d048fd29f 100644 --- a/openstack/tests/unit/block_storage/v3/test_transfer.py +++ b/openstack/tests/unit/block_storage/v3/test_transfer.py @@ -112,7 +112,7 @@ class TestTransfer(base.TestCase): sot.accept(self.sess, auth_key=FAKE_AUTH_KEY) self.sess.post.assert_called_with( - 'volume-transfers/%s/accept' % FAKE_TRANSFER, + f'volume-transfers/{FAKE_TRANSFER}/accept', json={ 'accept': { 'auth_key': FAKE_AUTH_KEY, @@ -134,7 +134,7 @@ class TestTransfer(base.TestCase): sot.accept(self.sess, auth_key=FAKE_AUTH_KEY) self.sess.post.assert_called_with( - 'os-volume-transfer/%s/accept' % FAKE_TRANSFER, + f'os-volume-transfer/{FAKE_TRANSFER}/accept', json={ 'accept': { 'auth_key': FAKE_AUTH_KEY, diff --git a/openstack/tests/unit/block_storage/v3/test_type.py b/openstack/tests/unit/block_storage/v3/test_type.py index 19f10cd14..174d213b7 100644 --- a/openstack/tests/unit/block_storage/v3/test_type.py +++ b/openstack/tests/unit/block_storage/v3/test_type.py @@ -150,7 +150,7 @@ class TestType(base.TestCase): ) self.sess.get.assert_called_with( - "types/%s/os-volume-type-access" % sot.id + f"types/{sot.id}/os-volume-type-access" ) def test_add_private_access(self): @@ -158,7 +158,7 @@ class TestType(base.TestCase): self.assertIsNone(sot.add_private_access(self.sess, "a")) - url = "types/%s/action" % sot.id + url = f"types/{sot.id}/action" body = {"addProjectAccess": {"project": "a"}} self.sess.post.assert_called_with(url, json=body) @@ -167,6 +167,6 @@ class TestType(base.TestCase): self.assertIsNone(sot.remove_private_access(self.sess, "a")) - url = "types/%s/action" % sot.id + url = f"types/{sot.id}/action" body = {"removeProjectAccess": {"project": "a"}} self.sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/block_storage/v3/test_volume.py b/openstack/tests/unit/block_storage/v3/test_volume.py index 89ac68037..89a620bd8 100644 --- a/openstack/tests/unit/block_storage/v3/test_volume.py +++ b/openstack/tests/unit/block_storage/v3/test_volume.py @@ -158,7 +158,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.extend(self.sess, '20')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {"os-extend": {"new_size": "20"}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -169,7 +169,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.set_readonly(self.sess, True)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-update_readonly_flag': {'readonly': True}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -180,7 +180,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.set_readonly(self.sess, False)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-update_readonly_flag': {'readonly': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -191,7 +191,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.set_bootable_status(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-set_bootable': {'bootable': True}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -202,7 +202,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.set_bootable_status(self.sess, False)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-set_bootable': {'bootable': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -213,7 +213,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.set_image_metadata(self.sess, {'foo': 'bar'})) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-set_image_metadata': {'foo': 'bar'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -229,7 +229,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.delete_image_metadata(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body_a = {'os-unset_image_metadata': 'foo'} body_b = {'os-unset_image_metadata': 'baz'} self.sess.post.assert_has_calls( @@ -248,7 +248,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.delete_image_metadata_item(self.sess, 'foo')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-unset_image_metadata': 'foo'} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -259,7 +259,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.reset_status(self.sess, '1', '2', '3')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-reset_status': { 'status': '1', @@ -276,7 +276,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.reset_status(self.sess, status='1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-reset_status': { 'status': '1', @@ -308,7 +308,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.revert_to_snapshot(self.sess, '1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'revert': {'snapshot_id': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -320,7 +320,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.attach(self.sess, '1', instance='2')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-attach': {'mountpoint': '1', 'instance_uuid': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -331,7 +331,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.attach(self.sess, '1', host_name='2')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-attach': {'mountpoint': '1', 'host_name': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -347,7 +347,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.detach(self.sess, '1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-detach': {'attachment_id': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -360,7 +360,7 @@ class TestVolume(base.TestCase): sot.detach(self.sess, '1', force=True, connector={'a': 'b'}) ) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-force_detach': {'attachment_id': '1', 'connector': {'a': 'b'}} } @@ -373,7 +373,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.unmanage(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-unmanage': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -384,7 +384,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.retype(self.sess, '1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-retype': {'new_type': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -395,7 +395,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.retype(self.sess, '1', migration_policy='2')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-retype': {'new_type': '1', 'migration_policy': '2'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -406,7 +406,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.migrate(self.sess, host='1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-migrate_volume': {'host': '1'}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -421,7 +421,7 @@ class TestVolume(base.TestCase): ) ) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-migrate_volume': { 'host': '1', @@ -447,7 +447,7 @@ class TestVolume(base.TestCase): ) ) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-migrate_volume': { 'cluster': '1', @@ -465,7 +465,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.complete_migration(self.sess, new_volume_id='1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-migrate_volume_completion': {'new_volume': '1', 'error': False} } @@ -480,7 +480,7 @@ class TestVolume(base.TestCase): sot.complete_migration(self.sess, new_volume_id='1', error=True) ) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-migrate_volume_completion': {'new_volume': '1', 'error': True} } @@ -493,7 +493,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.force_delete(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-force_delete': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -510,7 +510,7 @@ class TestVolume(base.TestCase): self.assertDictEqual({'a': 'b'}, sot.upload_to_image(self.sess, '1')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-volume_upload_image': {'image_name': '1', 'force': False}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -542,7 +542,7 @@ class TestVolume(base.TestCase): ), ) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = { 'os-volume_upload_image': { 'image_name': '1', @@ -563,7 +563,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.reserve(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-reserve': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -574,7 +574,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.unreserve(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-unreserve': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -585,7 +585,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.begin_detaching(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-begin_detaching': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -596,7 +596,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.abort_detaching(self.sess)) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-roll_detaching': None} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -614,7 +614,7 @@ class TestVolume(base.TestCase): {'c': 'd'}, sot.init_attachment(self.sess, {'a': 'b'}) ) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-initialize_connection': {'connector': {'a': 'b'}}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -625,7 +625,7 @@ class TestVolume(base.TestCase): self.assertIsNone(sot.terminate_attachment(self.sess, {'a': 'b'})) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {'os-terminate_connection': {'connector': {'a': 'b'}}} self.sess.post.assert_called_with( url, json=body, microversion=sot._max_microversion @@ -733,6 +733,6 @@ class TestVolume(base.TestCase): self.sess.default_microversion = '3.50' self.assertIsNone(sot.extend(self.sess, '20')) - url = 'volumes/%s/action' % FAKE_ID + url = f'volumes/{FAKE_ID}/action' body = {"os-extend": {"new_size": "20"}} self.sess.post.assert_called_with(url, json=body, microversion="3.50") diff --git a/openstack/tests/unit/cloud/test__utils.py b/openstack/tests/unit/cloud/test__utils.py index 6aac3f834..1fd1a3f3c 100644 --- a/openstack/tests/unit/cloud/test__utils.py +++ b/openstack/tests/unit/cloud/test__utils.py @@ -331,7 +331,7 @@ class TestUtils(base.TestCase): # if the use_direct_get flag is set to False(default). uuid = uuid4().hex resource = 'network' - func = 'search_%ss' % resource + func = f'search_{resource}s' filters = {} with mock.patch.object(self.cloud, func) as search: _utils._get_entity(self.cloud, resource, uuid, filters) @@ -343,7 +343,7 @@ class TestUtils(base.TestCase): self.cloud.use_direct_get = True name = 'name_no_uuid' resource = 'network' - func = 'search_%ss' % resource + func = f'search_{resource}s' filters = {} with mock.patch.object(self.cloud, func) as search: _utils._get_entity(self.cloud, resource, name, filters) @@ -363,7 +363,7 @@ class TestUtils(base.TestCase): 'security_group', ] for r in resources: - f = 'get_%s_by_id' % r + f = f'get_{r}_by_id' with mock.patch.object(self.cloud, f) as get: _utils._get_entity(self.cloud, r, uuid, {}) get.assert_called_once_with(uuid) @@ -383,7 +383,7 @@ class TestUtils(base.TestCase): filters = {} name = 'name_no_uuid' for r in resources: - f = 'search_%ss' % r + f = f'search_{r}s' with mock.patch.object(self.cloud, f) as search: _utils._get_entity(self.cloud, r, name, {}) search.assert_called_once_with(name, filters) @@ -400,5 +400,5 @@ class TestUtils(base.TestCase): 'security_group', ] for r in resources: - self.assertTrue(hasattr(self.cloud, 'get_%s_by_id' % r)) - self.assertTrue(hasattr(self.cloud, 'search_%ss' % r)) + self.assertTrue(hasattr(self.cloud, f'get_{r}_by_id')) + self.assertTrue(hasattr(self.cloud, f'search_{r}s')) diff --git a/openstack/tests/unit/cloud/test_availability_zones.py b/openstack/tests/unit/cloud/test_availability_zones.py index 68d67a64b..aa277f669 100644 --- a/openstack/tests/unit/cloud/test_availability_zones.py +++ b/openstack/tests/unit/cloud/test_availability_zones.py @@ -29,9 +29,7 @@ class TestAvailabilityZoneNames(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-availability-zone'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone', json=_fake_zone_list, ), ] @@ -46,9 +44,7 @@ class TestAvailabilityZoneNames(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-availability-zone'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone', status_code=403, ), ] @@ -63,9 +59,7 @@ class TestAvailabilityZoneNames(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-availability-zone'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone', json=_fake_zone_list, ), ] diff --git a/openstack/tests/unit/cloud/test_baremetal_node.py b/openstack/tests/unit/cloud/test_baremetal_node.py index 616fbc332..5c7fd8dca 100644 --- a/openstack/tests/unit/cloud/test_baremetal_node.py +++ b/openstack/tests/unit/cloud/test_baremetal_node.py @@ -88,7 +88,7 @@ class TestBaremetalNode(base.IronicTestCase): uri=self.get_mock_url( resource='ports', append=['detail'], - qs_elements=['address=%s' % mac_address], + qs_elements=[f'address={mac_address}'], ), json={ 'ports': [ @@ -2041,7 +2041,7 @@ class TestBaremetalNode(base.IronicTestCase): method='GET', uri=self.get_mock_url( resource='ports', - qs_elements=['address=%s' % mac_address], + qs_elements=[f'address={mac_address}'], ), json={ 'ports': [ @@ -2129,7 +2129,7 @@ class TestBaremetalNode(base.IronicTestCase): method='GET', uri=self.get_mock_url( resource='ports', - qs_elements=['address=%s' % mac_address], + qs_elements=[f'address={mac_address}'], ), json={ 'ports': [ diff --git a/openstack/tests/unit/cloud/test_baremetal_ports.py b/openstack/tests/unit/cloud/test_baremetal_ports.py index 3cd4c043d..5a75676cc 100644 --- a/openstack/tests/unit/cloud/test_baremetal_ports.py +++ b/openstack/tests/unit/cloud/test_baremetal_ports.py @@ -83,7 +83,9 @@ class TestBaremetalPort(base.IronicTestCase): resource='ports', append=['detail'], qs_elements=[ - 'node_uuid=%s' % self.fake_baremetal_node['uuid'] + 'node_uuid={}'.format( + self.fake_baremetal_node['uuid'] + ) ], ), json={ @@ -112,7 +114,9 @@ class TestBaremetalPort(base.IronicTestCase): resource='ports', append=['detail'], qs_elements=[ - 'node_uuid=%s' % self.fake_baremetal_node['uuid'] + 'node_uuid={}'.format( + self.fake_baremetal_node['uuid'] + ) ], ), status_code=400, @@ -136,7 +140,7 @@ class TestBaremetalPort(base.IronicTestCase): uri=self.get_mock_url( resource='ports', append=['detail'], - qs_elements=['address=%s' % mac], + qs_elements=[f'address={mac}'], ), json={'ports': [self.fake_baremetal_port]}, ), @@ -157,7 +161,7 @@ class TestBaremetalPort(base.IronicTestCase): uri=self.get_mock_url( resource='ports', append=['detail'], - qs_elements=['address=%s' % mac], + qs_elements=[f'address={mac}'], ), json={'ports': []}, ), diff --git a/openstack/tests/unit/cloud/test_endpoints.py b/openstack/tests/unit/cloud/test_endpoints.py index d992e1555..01191e912 100644 --- a/openstack/tests/unit/cloud/test_endpoints.py +++ b/openstack/tests/unit/cloud/test_endpoints.py @@ -40,7 +40,7 @@ class TestCloudEndpoints(base.TestCase): ) def _dummy_url(self): - return 'https://%s.example.com/' % uuid.uuid4().hex + return f'https://{uuid.uuid4().hex}.example.com/' def test_create_endpoint_v3(self): service_data = self._get_service_data() diff --git a/openstack/tests/unit/cloud/test_flavors.py b/openstack/tests/unit/cloud/test_flavors.py index f65dab2cd..de3601815 100644 --- a/openstack/tests/unit/cloud/test_flavors.py +++ b/openstack/tests/unit/cloud/test_flavors.py @@ -26,9 +26,7 @@ class TestFlavors(base.TestCase): [ dict( method='POST', - uri='{endpoint}/flavors'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors', json={'flavor': fakes.FAKE_FLAVOR}, validate=dict( json={ @@ -64,16 +62,12 @@ class TestFlavors(base.TestCase): [ dict( method='GET', - uri='{endpoint}/flavors/vanilla'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla', json=fakes.FAKE_FLAVOR, ), dict( method='DELETE', - uri='{endpoint}/flavors/{id}'.format( - endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{fakes.FLAVOR_ID}', ), ] ) @@ -87,16 +81,12 @@ class TestFlavors(base.TestCase): [ dict( method='GET', - uri='{endpoint}/flavors/invalid'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/invalid', status_code=404, ), dict( method='GET', - uri='{endpoint}/flavors/detail?is_public=None'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] @@ -112,23 +102,17 @@ class TestFlavors(base.TestCase): [ dict( method='GET', - uri='{endpoint}/flavors/vanilla'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla', json=fakes.FAKE_FLAVOR, ), dict( method='GET', - uri='{endpoint}/flavors/detail?is_public=None'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), dict( method='DELETE', - uri='{endpoint}/flavors/{id}'.format( - endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{fakes.FLAVOR_ID}', status_code=503, ), ] @@ -145,9 +129,7 @@ class TestFlavors(base.TestCase): uris_to_mock = [ dict( method='GET', - uri='{endpoint}/flavors/detail?is_public=None'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] @@ -173,9 +155,7 @@ class TestFlavors(base.TestCase): uris_to_mock = [ dict( method='GET', - uri='{endpoint}/flavors/detail?is_public=None'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] @@ -213,9 +193,7 @@ class TestFlavors(base.TestCase): uris_to_mock = [ dict( method='GET', - uri='{endpoint}/flavors/detail?is_public=None'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] @@ -241,9 +219,7 @@ class TestFlavors(base.TestCase): uris_to_mock = [ dict( method='GET', - uri='{endpoint}/flavors/detail?is_public=None'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', json={'flavors': fakes.FAKE_FLAVOR_LIST}, ), ] @@ -269,9 +245,7 @@ class TestFlavors(base.TestCase): [ dict( method='GET', - uri='{endpoint}/flavors/detail?is_public=None'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', json={'flavors': []}, ) ] @@ -284,8 +258,8 @@ class TestFlavors(base.TestCase): def test_get_flavor_string_and_int(self): self.use_compute_discovery() - flavor_resource_uri = '{endpoint}/flavors/1/os-extra_specs'.format( - endpoint=fakes.COMPUTE_ENDPOINT + flavor_resource_uri = ( + f'{fakes.COMPUTE_ENDPOINT}/flavors/1/os-extra_specs' ) flavor = fakes.make_fake_flavor('1', 'vanilla') flavor_json = {'extra_specs': {}} @@ -294,9 +268,7 @@ class TestFlavors(base.TestCase): [ dict( method='GET', - uri='{endpoint}/flavors/1'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/1', json=flavor, ), dict(method='GET', uri=flavor_resource_uri, json=flavor_json), @@ -315,9 +287,7 @@ class TestFlavors(base.TestCase): [ dict( method='POST', - uri='{endpoint}/flavors/{id}/os-extra_specs'.format( - endpoint=fakes.COMPUTE_ENDPOINT, id=1 - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{1}/os-extra_specs', json=dict(extra_specs=extra_specs), ) ] @@ -333,9 +303,7 @@ class TestFlavors(base.TestCase): [ dict( method='DELETE', - uri='{endpoint}/flavors/{id}/os-extra_specs/{key}'.format( - endpoint=fakes.COMPUTE_ENDPOINT, id=1, key=key - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{1}/os-extra_specs/{key}', ) for key in keys ] @@ -394,9 +362,7 @@ class TestFlavors(base.TestCase): [ dict( method='GET', - uri='{endpoint}/flavors/vanilla/os-flavor-access'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla/os-flavor-access', json={ 'flavor_access': [ {'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'} @@ -410,9 +376,7 @@ class TestFlavors(base.TestCase): def test_get_flavor_by_id(self): self.use_compute_discovery() - flavor_uri = '{endpoint}/flavors/1'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ) + flavor_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1' flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} self.register_uris( @@ -430,12 +394,8 @@ class TestFlavors(base.TestCase): def test_get_flavor_with_extra_specs(self): self.use_compute_discovery() - flavor_uri = '{endpoint}/flavors/1'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ) - flavor_extra_uri = '{endpoint}/flavors/1/os-extra_specs'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ) + flavor_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1' + flavor_extra_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1/os-extra_specs' flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} flavor_extra_json = {'extra_specs': {'name': 'test'}} diff --git a/openstack/tests/unit/cloud/test_floating_ip_neutron.py b/openstack/tests/unit/cloud/test_floating_ip_neutron.py index ad821cb67..17bc87516 100644 --- a/openstack/tests/unit/cloud/test_floating_ip_neutron.py +++ b/openstack/tests/unit/cloud/test_floating_ip_neutron.py @@ -258,7 +258,7 @@ class TestFloatingIP(base.TestCase): dict( method='GET', uri='https://network.example.com/v2.0/floatingips/' - '{id}'.format(id=fid), + f'{fid}', json=self.mock_floating_ip_new_rep, ) ] diff --git a/openstack/tests/unit/cloud/test_floating_ip_pool.py b/openstack/tests/unit/cloud/test_floating_ip_pool.py index ee3d55eae..3f424c419 100644 --- a/openstack/tests/unit/cloud/test_floating_ip_pool.py +++ b/openstack/tests/unit/cloud/test_floating_ip_pool.py @@ -31,9 +31,7 @@ class TestFloatingIPPool(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-floating-ip-pools'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-floating-ip-pools', json={"floating_ip_pools": [{"name": "public"}]}, ), ] diff --git a/openstack/tests/unit/cloud/test_fwaas.py b/openstack/tests/unit/cloud/test_fwaas.py index 935e08dbf..72e03dd3d 100644 --- a/openstack/tests/unit/cloud/test_fwaas.py +++ b/openstack/tests/unit/cloud/test_fwaas.py @@ -1274,7 +1274,7 @@ class TestFirewallGroup(FirewallTestCase): 'network', 'public', append=['v2.0', 'ports'], - qs_elements=['name=%s' % self.mock_port['name']], + qs_elements=['name={}'.format(self.mock_port['name'])], ), json={'ports': [self.mock_port]}, ), @@ -1580,7 +1580,7 @@ class TestFirewallGroup(FirewallTestCase): 'network', 'public', append=['v2.0', 'ports'], - qs_elements=['name=%s' % self.mock_port['name']], + qs_elements=['name={}'.format(self.mock_port['name'])], ), json={'ports': [self.mock_port]}, ), diff --git a/openstack/tests/unit/cloud/test_identity_roles.py b/openstack/tests/unit/cloud/test_identity_roles.py index 1349d7ec2..344f9cbb2 100644 --- a/openstack/tests/unit/cloud/test_identity_roles.py +++ b/openstack/tests/unit/cloud/test_identity_roles.py @@ -286,8 +286,8 @@ class TestIdentityRoles(base.TestCase): uri=self.get_mock_url( resource='role_assignments', qs_elements=[ - 'scope.domain.id=%s' % domain_data.domain_id, - 'user.id=%s' % user_data.user_id, + f'scope.domain.id={domain_data.domain_id}', + f'user.id={user_data.user_id}', 'effective=True', ], ), diff --git a/openstack/tests/unit/cloud/test_image.py b/openstack/tests/unit/cloud/test_image.py index 18fc33853..07748031c 100644 --- a/openstack/tests/unit/cloud/test_image.py +++ b/openstack/tests/unit/cloud/test_image.py @@ -89,16 +89,12 @@ class TestImage(BaseTestImage): [ dict( method='GET', - uri='https://image.example.com/v2/images/{name}'.format( - name=self.image_name - ), + uri=f'https://image.example.com/v2/images/{self.image_name}', status_code=404, ), dict( method='GET', - uri='https://image.example.com/v2/images?name={name}'.format( # noqa: E501 - name=self.image_name - ), + uri=f'https://image.example.com/v2/images?name={self.image_name}', # noqa: E501 json=dict(images=[]), ), dict( @@ -121,23 +117,17 @@ class TestImage(BaseTestImage): [ dict( method='GET', - uri='https://image.example.com/v2/images/{name}'.format( - name=self.image_name - ), + uri=f'https://image.example.com/v2/images/{self.image_name}', status_code=404, ), dict( method='GET', - uri='https://image.example.com/v2/images?name={name}'.format( # noqa: E501 - name=self.image_name - ), + uri=f'https://image.example.com/v2/images?name={self.image_name}', # noqa: E501 json=self.fake_search_return, ), dict( method='GET', - uri='https://image.example.com/v2/images/{id}/file'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}/file', content=self.output, headers={ 'Content-Type': 'application/octet-stream', @@ -417,9 +407,7 @@ class TestImage(BaseTestImage): ), json={ 'images': [self.fake_image_dict], - 'next': '/v2/images?marker={marker}'.format( - marker=marker - ), + 'next': f'/v2/images?marker={marker}', }, ), dict( @@ -821,16 +809,12 @@ class TestImage(BaseTestImage): ), dict( method='HEAD', - uri='{endpoint}/{container}'.format( - endpoint=endpoint, container=self.container_name - ), + uri=f'{endpoint}/{self.container_name}', status_code=404, ), dict( method='PUT', - uri='{endpoint}/{container}'.format( - endpoint=endpoint, container=self.container_name - ), + uri=f'{endpoint}/{self.container_name}', status_code=201, headers={ 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', @@ -840,9 +824,7 @@ class TestImage(BaseTestImage): ), dict( method='HEAD', - uri='{endpoint}/{container}'.format( - endpoint=endpoint, container=self.container_name - ), + uri=f'{endpoint}/{self.container_name}', headers={ 'Content-Length': '0', 'X-Container-Object-Count': '0', @@ -867,20 +849,12 @@ class TestImage(BaseTestImage): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=endpoint, - container=self.container_name, - object=self.image_name, - ), + uri=f'{endpoint}/{self.container_name}/{self.image_name}', status_code=404, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=endpoint, - container=self.container_name, - object=self.image_name, - ), + uri=f'{endpoint}/{self.container_name}/{self.image_name}', status_code=201, validate=dict( headers={ @@ -903,10 +877,7 @@ class TestImage(BaseTestImage): json=dict( type='import', input={ - 'import_from': '{container}/{object}'.format( - container=self.container_name, - object=self.image_name, - ), + 'import_from': f'{self.container_name}/{self.image_name}', 'image_properties': {'name': self.image_name}, }, ) @@ -952,10 +923,7 @@ class TestImage(BaseTestImage): [ { 'op': 'add', - 'value': '{container}/{object}'.format( - container=self.container_name, - object=self.image_name, - ), + 'value': f'{self.container_name}/{self.image_name}', 'path': '/owner_specified.openstack.object', # noqa: E501 }, { @@ -983,11 +951,7 @@ class TestImage(BaseTestImage): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=endpoint, - container=self.container_name, - object=self.image_name, - ), + uri=f'{endpoint}/{self.container_name}/{self.image_name}', headers={ 'X-Timestamp': '1429036140.50253', 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', @@ -1007,11 +971,7 @@ class TestImage(BaseTestImage): ), dict( method='DELETE', - uri='{endpoint}/{container}/{object}'.format( - endpoint=endpoint, - container=self.container_name, - object=self.image_name, - ), + uri=f'{endpoint}/{self.container_name}/{self.image_name}', ), dict( method='GET', @@ -1069,15 +1029,11 @@ class TestImage(BaseTestImage): ), dict( method='DELETE', - uri='https://image.example.com/v2/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}', ), dict( method='HEAD', - uri='{endpoint}/{object}'.format( - endpoint=endpoint, object=object_path - ), + uri=f'{endpoint}/{object_path}', headers={ 'X-Timestamp': '1429036140.50253', 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', @@ -1097,9 +1053,7 @@ class TestImage(BaseTestImage): ), dict( method='DELETE', - uri='{endpoint}/{object}'.format( - endpoint=endpoint, object=object_path - ), + uri=f'{endpoint}/{object_path}', ), ] ) @@ -1187,11 +1141,7 @@ class TestImage(BaseTestImage): ), dict( method='DELETE', - uri='{endpoint}/{container}/{object}'.format( - endpoint=endpoint, - container=self.container_name, - object=self.image_name, - ), + uri=f'{endpoint}/{self.container_name}/{self.image_name}', ), ] ) @@ -1230,9 +1180,7 @@ class TestImage(BaseTestImage): 'properties': { 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, - 'owner_specified.openstack.object': 'images/{name}'.format( - name=self.image_name - ), + 'owner_specified.openstack.object': f'images/{self.image_name}', 'is_public': False, }, } @@ -1263,9 +1211,7 @@ class TestImage(BaseTestImage): ), dict( method='PUT', - uri='https://image.example.com/v1/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v1/images/{self.image_id}', json={'image': ret}, validate=dict( headers={ @@ -1297,9 +1243,7 @@ class TestImage(BaseTestImage): 'properties': { 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, - 'owner_specified.openstack.object': 'images/{name}'.format( - name=self.image_name - ), + 'owner_specified.openstack.object': f'images/{self.image_name}', 'is_public': False, }, 'validate_checksum': True, @@ -1331,9 +1275,7 @@ class TestImage(BaseTestImage): ), dict( method='PUT', - uri='https://image.example.com/v1/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v1/images/{self.image_id}', status_code=400, validate=dict( headers={ @@ -1344,9 +1286,7 @@ class TestImage(BaseTestImage): ), dict( method='DELETE', - uri='https://image.example.com/v1/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v1/images/{self.image_id}', json={'images': [ret]}, ), ] @@ -1369,9 +1309,7 @@ class TestImage(BaseTestImage): 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, - 'owner_specified.openstack.object': 'images/{name}'.format( - name=self.image_name - ), + 'owner_specified.openstack.object': f'images/{self.image_name}', 'visibility': 'private', } @@ -1382,9 +1320,7 @@ class TestImage(BaseTestImage): self.cloud.update_image_properties( image=image.Image.existing(**ret), **{ - 'owner_specified.openstack.object': 'images/{name}'.format( - name=self.image_name - ) + 'owner_specified.openstack.object': f'images/{self.image_name}' }, ) @@ -1399,9 +1335,7 @@ class TestImage(BaseTestImage): 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, - 'owner_specified.openstack.object': 'images/{name}'.format( - name=self.image_name - ), + 'owner_specified.openstack.object': f'images/{self.image_name}', 'visibility': 'private', } @@ -1449,9 +1383,7 @@ class TestImage(BaseTestImage): ), dict( method='PUT', - uri='https://image.example.com/v2/images/{id}/file'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}/file', status_code=400, validate=dict( headers={ @@ -1461,9 +1393,7 @@ class TestImage(BaseTestImage): ), dict( method='DELETE', - uri='https://image.example.com/v2/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}', ), ] ) @@ -1530,9 +1460,7 @@ class TestImage(BaseTestImage): ), dict( method='DELETE', - uri='https://image.example.com/v2/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}', ), ] ) @@ -1574,9 +1502,7 @@ class TestImage(BaseTestImage): 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, - 'owner_specified.openstack.object': 'images/{name}'.format( - name=self.image_name - ), + 'owner_specified.openstack.object': f'images/{self.image_name}', 'int_v': '12345', 'visibility': 'private', 'min_disk': 0, @@ -1627,9 +1553,7 @@ class TestImage(BaseTestImage): ), dict( method='PUT', - uri='https://image.example.com/v2/images/{id}/file'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}/file', validate=dict( headers={ 'Content-Type': 'application/octet-stream', @@ -1638,9 +1562,7 @@ class TestImage(BaseTestImage): ), dict( method='GET', - uri='https://image.example.com/v2/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}', json=ret, ), dict( @@ -1667,9 +1589,7 @@ class TestImage(BaseTestImage): 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, - 'owner_specified.openstack.object': 'images/{name}'.format( - name=self.image_name - ), + 'owner_specified.openstack.object': f'images/{self.image_name}', 'int_v': 12345, 'visibility': 'private', 'min_disk': 0, @@ -1721,9 +1641,7 @@ class TestImage(BaseTestImage): ), dict( method='PUT', - uri='https://image.example.com/v2/images/{id}/file'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}/file', validate=dict( headers={ 'Content-Type': 'application/octet-stream', @@ -1732,9 +1650,7 @@ class TestImage(BaseTestImage): ), dict( method='GET', - uri='https://image.example.com/v2/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}', json=ret, ), dict( @@ -1761,9 +1677,7 @@ class TestImage(BaseTestImage): 'disk_format': 'qcow2', 'owner_specified.openstack.md5': fakes.NO_MD5, 'owner_specified.openstack.sha256': fakes.NO_SHA256, - 'owner_specified.openstack.object': 'images/{name}'.format( - name=self.image_name - ), + 'owner_specified.openstack.object': f'images/{self.image_name}', 'int_v': '12345', 'protected': False, 'visibility': 'private', @@ -1816,9 +1730,7 @@ class TestImage(BaseTestImage): ), dict( method='PUT', - uri='https://image.example.com/v2/images/{id}/file'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}/file', validate=dict( headers={ 'Content-Type': 'application/octet-stream', @@ -1827,9 +1739,7 @@ class TestImage(BaseTestImage): ), dict( method='GET', - uri='https://image.example.com/v2/images/{id}'.format( - id=self.image_id - ), + uri=f'https://image.example.com/v2/images/{self.image_id}', json=ret, ), dict( @@ -1892,9 +1802,7 @@ class TestImageSuburl(BaseTestImage): ), json={ 'images': [self.fake_image_dict], - 'next': '/v2/images?marker={marker}'.format( - marker=marker - ), + 'next': f'/v2/images?marker={marker}', }, ), dict( diff --git a/openstack/tests/unit/cloud/test_image_snapshot.py b/openstack/tests/unit/cloud/test_image_snapshot.py index bf497f794..a0d582015 100644 --- a/openstack/tests/unit/cloud/test_image_snapshot.py +++ b/openstack/tests/unit/cloud/test_image_snapshot.py @@ -37,10 +37,7 @@ class TestImageSnapshot(base.TestCase): self.get_nova_discovery_mock_dict(), dict( method='POST', - uri='{endpoint}/servers/{server_id}/action'.format( - endpoint=fakes.COMPUTE_ENDPOINT, - server_id=self.server_id, - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', headers=dict( Location='{endpoint}/images/{image_id}'.format( endpoint='https://images.example.com', @@ -87,10 +84,7 @@ class TestImageSnapshot(base.TestCase): self.get_nova_discovery_mock_dict(), dict( method='POST', - uri='{endpoint}/servers/{server_id}/action'.format( - endpoint=fakes.COMPUTE_ENDPOINT, - server_id=self.server_id, - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', headers=dict( Location='{endpoint}/images/{image_id}'.format( endpoint='https://images.example.com', diff --git a/openstack/tests/unit/cloud/test_meta.py b/openstack/tests/unit/cloud/test_meta.py index 7b40371d5..58cbd162e 100644 --- a/openstack/tests/unit/cloud/test_meta.py +++ b/openstack/tests/unit/cloud/test_meta.py @@ -530,9 +530,7 @@ class TestMeta(base.TestCase): ), dict( method='GET', - uri='{endpoint}/servers/test-id/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', json={'security_groups': []}, ), ] @@ -609,9 +607,7 @@ class TestMeta(base.TestCase): ), dict( method='GET', - uri='{endpoint}/servers/test-id/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', json={'security_groups': []}, ), ] @@ -685,9 +681,7 @@ class TestMeta(base.TestCase): ), dict( method='GET', - uri='{endpoint}/servers/test-id/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', json={'security_groups': []}, ), ] @@ -804,9 +798,7 @@ class TestMeta(base.TestCase): ), dict( method='GET', - uri='{endpoint}/servers/test-id/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', json={'security_groups': []}, ), ] @@ -865,9 +857,7 @@ class TestMeta(base.TestCase): ), dict( method='GET', - uri='{endpoint}/servers/test-id/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', json={'security_groups': []}, ), ] @@ -947,9 +937,7 @@ class TestMeta(base.TestCase): ), dict( method='GET', - uri='{endpoint}/servers/test-id/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', json={'security_groups': []}, ), ] diff --git a/openstack/tests/unit/cloud/test_network.py b/openstack/tests/unit/cloud/test_network.py index 98d0c8646..fe460ab14 100644 --- a/openstack/tests/unit/cloud/test_network.py +++ b/openstack/tests/unit/cloud/test_network.py @@ -373,7 +373,7 @@ class TestNetworks(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % network_name], + qs_elements=[f'name={network_name}'], ), json={'networks': [network]}, ), @@ -574,7 +574,7 @@ class TestNetworks(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % network_name], + qs_elements=[f'name={network_name}'], ), json={'networks': [network]}, ), @@ -640,7 +640,7 @@ class TestNetworks(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % network_name], + qs_elements=[f'name={network_name}'], ), json={'networks': [network]}, ), diff --git a/openstack/tests/unit/cloud/test_object.py b/openstack/tests/unit/cloud/test_object.py index 28a85fb89..88d58f8ce 100644 --- a/openstack/tests/unit/cloud/test_object.py +++ b/openstack/tests/unit/cloud/test_object.py @@ -33,12 +33,8 @@ class BaseTestObject(base.TestCase): self.container = self.getUniqueString() self.object = self.getUniqueString() self.endpoint = self.cloud.object_store.get_endpoint() - self.container_endpoint = '{endpoint}/{container}'.format( - endpoint=self.endpoint, container=self.container - ) - self.object_endpoint = '{endpoint}/{object}'.format( - endpoint=self.container_endpoint, object=self.object - ) + self.container_endpoint = f'{self.endpoint}/{self.container}' + self.object_endpoint = f'{self.container_endpoint}/{self.object}' def _compare_containers(self, exp, real): self.assertDictEqual( @@ -330,7 +326,7 @@ class TestObject(BaseTestObject): ) with testtools.ExpectedException( exceptions.SDKException, - "Container not found: %s" % self.container, + f"Container not found: {self.container}", ): self.cloud.get_container_access(self.container) @@ -594,9 +590,7 @@ class TestObject(BaseTestObject): self.assert_calls() def test_list_objects(self): - endpoint = '{endpoint}?format=json'.format( - endpoint=self.container_endpoint - ) + endpoint = f'{self.container_endpoint}?format=json' objects = [ { @@ -619,9 +613,7 @@ class TestObject(BaseTestObject): self._compare_objects(a, b) def test_list_objects_with_prefix(self): - endpoint = '{endpoint}?format=json&prefix=test'.format( - endpoint=self.container_endpoint - ) + endpoint = f'{self.container_endpoint}?format=json&prefix=test' objects = [ { @@ -644,9 +636,7 @@ class TestObject(BaseTestObject): self._compare_objects(a, b) def test_list_objects_exception(self): - endpoint = '{endpoint}?format=json'.format( - endpoint=self.container_endpoint - ) + endpoint = f'{self.container_endpoint}?format=json' self.register_uris( [ dict( @@ -903,20 +893,12 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=404, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, validate=dict( headers={ @@ -972,11 +954,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, validate=dict( headers={ @@ -1008,11 +986,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=404, ), ] @@ -1021,12 +995,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='PUT', - uri='{endpoint}/{container}/{object}/{index:0>6}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - index=index, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}', status_code=201, ) for index, offset in enumerate( @@ -1038,17 +1007,11 @@ class TestObjectUploads(BaseTestObject): uris_to_mock.append( dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, validate=dict( headers={ - 'x-object-manifest': '{container}/{object}'.format( - container=self.container, object=self.object - ), + 'x-object-manifest': f'{self.container}/{self.object}', 'x-object-meta-x-sdk-md5': self.md5, 'x-object-meta-x-sdk-sha256': self.sha256, } @@ -1088,11 +1051,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=404, ), ] @@ -1101,12 +1060,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='PUT', - uri='{endpoint}/{container}/{object}/{index:0>6}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - index=index, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}', status_code=201, headers=dict(Etag=f'etag{index}'), ) @@ -1119,11 +1073,7 @@ class TestObjectUploads(BaseTestObject): uris_to_mock.append( dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, validate=dict( params={'multipart-manifest', 'put'}, @@ -1153,37 +1103,27 @@ class TestObjectUploads(BaseTestObject): 'header mismatch in manifest call', ) - base_object = '/{container}/{object}'.format( - container=self.container, object=self.object - ) + base_object = f'/{self.container}/{self.object}' self.assertEqual( [ { - 'path': "{base_object}/000000".format( - base_object=base_object - ), + 'path': f"{base_object}/000000", 'size_bytes': 25, 'etag': 'etag0', }, { - 'path': "{base_object}/000001".format( - base_object=base_object - ), + 'path': f"{base_object}/000001", 'size_bytes': 25, 'etag': 'etag1', }, { - 'path': "{base_object}/000002".format( - base_object=base_object - ), + 'path': f"{base_object}/000002", 'size_bytes': 25, 'etag': 'etag2', }, { - 'path': "{base_object}/000003".format( - base_object=base_object - ), + 'path': f"{base_object}/000003", 'size_bytes': len(self.object) - 75, 'etag': 'etag3', }, @@ -1210,11 +1150,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=404, ), ] @@ -1223,12 +1159,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='PUT', - uri='{endpoint}/{container}/{object}/{index:0>6}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - index=index, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}', status_code=201, headers=dict(Etag=f'etag{index}'), ) @@ -1243,11 +1174,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=400, validate=dict( params={'multipart-manifest', 'put'}, @@ -1259,11 +1186,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=400, validate=dict( params={'multipart-manifest', 'put'}, @@ -1275,11 +1198,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, validate=dict( params={'multipart-manifest', 'put'}, @@ -1311,37 +1230,27 @@ class TestObjectUploads(BaseTestObject): 'header mismatch in manifest call', ) - base_object = '/{container}/{object}'.format( - container=self.container, object=self.object - ) + base_object = f'/{self.container}/{self.object}' self.assertEqual( [ { - 'path': "{base_object}/000000".format( - base_object=base_object - ), + 'path': f"{base_object}/000000", 'size_bytes': 25, 'etag': 'etag0', }, { - 'path': "{base_object}/000001".format( - base_object=base_object - ), + 'path': f"{base_object}/000001", 'size_bytes': 25, 'etag': 'etag1', }, { - 'path': "{base_object}/000002".format( - base_object=base_object - ), + 'path': f"{base_object}/000002", 'size_bytes': 25, 'etag': 'etag2', }, { - 'path': "{base_object}/000003".format( - base_object=base_object - ), + 'path': f"{base_object}/000003", 'size_bytes': len(self.object) - 75, 'etag': 'etag3', }, @@ -1369,11 +1278,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=404, ), ] @@ -1382,12 +1287,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='PUT', - uri='{endpoint}/{container}/{object}/{index:0>6}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - index=index, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}', status_code=201, headers=dict(Etag=f'etag{index}'), ) @@ -1402,11 +1302,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=400, validate=dict( params={'multipart-manifest', 'put'}, @@ -1418,11 +1314,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=400, validate=dict( params={'multipart-manifest', 'put'}, @@ -1434,11 +1326,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=400, validate=dict( params={'multipart-manifest', 'put'}, @@ -1459,9 +1347,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='GET', - uri='{endpoint}/images?format=json&prefix={prefix}'.format( - endpoint=self.endpoint, prefix=self.object - ), + uri=f'{self.endpoint}/images?format=json&prefix={self.object}', complete_qs=True, json=[ { @@ -1475,9 +1361,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/images/{object}'.format( - endpoint=self.endpoint, object=self.object - ), + uri=f'{self.endpoint}/images/{self.object}', headers={ 'X-Timestamp': '1429036140.50253', 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', @@ -1495,9 +1379,7 @@ class TestObjectUploads(BaseTestObject): ), dict( method='DELETE', - uri='{endpoint}/images/{object}'.format( - endpoint=self.endpoint, object=self.object - ), + uri=f'{self.endpoint}/images/{self.object}', ), ] ) @@ -1536,56 +1418,32 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=404, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000000'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000000', status_code=201, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000001'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000001', status_code=201, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000002'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000002', status_code=201, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000003'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000003', status_code=501, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, ), ] @@ -1619,69 +1477,41 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=404, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000000'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000000', headers={'etag': 'etag0'}, status_code=201, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000001'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000001', headers={'etag': 'etag1'}, status_code=201, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000002'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000002', headers={'etag': 'etag2'}, status_code=201, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000003'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000003', status_code=501, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}/000003'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}/000003', status_code=201, headers={'etag': 'etag3'}, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, validate=dict( params={'multipart-manifest', 'put'}, @@ -1711,37 +1541,27 @@ class TestObjectUploads(BaseTestObject): 'header mismatch in manifest call', ) - base_object = '/{container}/{object}'.format( - container=self.container, object=self.object - ) + base_object = f'/{self.container}/{self.object}' self.assertEqual( [ { - 'path': "{base_object}/000000".format( - base_object=base_object - ), + 'path': f"{base_object}/000000", 'size_bytes': 25, 'etag': 'etag0', }, { - 'path': "{base_object}/000001".format( - base_object=base_object - ), + 'path': f"{base_object}/000001", 'size_bytes': 25, 'etag': 'etag1', }, { - 'path': "{base_object}/000002".format( - base_object=base_object - ), + 'path': f"{base_object}/000002", 'size_bytes': 25, 'etag': 'etag2', }, { - 'path': "{base_object}/000003".format( - base_object=base_object - ), + 'path': f"{base_object}/000003", 'size_bytes': len(self.object) - 75, 'etag': 'etag3', }, @@ -1762,20 +1582,12 @@ class TestObjectUploads(BaseTestObject): ), dict( method='HEAD', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=200, ), dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, validate=dict(headers={}), ), @@ -1796,11 +1608,7 @@ class TestObjectUploads(BaseTestObject): [ dict( method='PUT', - uri='{endpoint}/{container}/{object}'.format( - endpoint=self.endpoint, - container=self.container, - object=self.object, - ), + uri=f'{self.endpoint}/{self.container}/{self.object}', status_code=201, validate=dict( headers={}, diff --git a/openstack/tests/unit/cloud/test_operator.py b/openstack/tests/unit/cloud/test_operator.py index 1487d3208..e92ddab83 100644 --- a/openstack/tests/unit/cloud/test_operator.py +++ b/openstack/tests/unit/cloud/test_operator.py @@ -91,8 +91,8 @@ class TestOperatorCloud(base.TestCase): self.cloud.config.config['region_name'] = 'testregion' with testtools.ExpectedException( exceptions.SDKException, - "Error getting image endpoint on testcloud:testregion:" - " No service", + "Error getting image endpoint on testcloud:testregion: " + "No service", ): self.cloud.get_session_endpoint("image") diff --git a/openstack/tests/unit/cloud/test_port.py b/openstack/tests/unit/cloud/test_port.py index d184ca440..da82cb2d0 100644 --- a/openstack/tests/unit/cloud/test_port.py +++ b/openstack/tests/unit/cloud/test_port.py @@ -507,7 +507,7 @@ class TestPort(base.TestCase): 'network', 'public', append=['v2.0', 'ports'], - qs_elements=['name=%s' % port_name], + qs_elements=[f'name={port_name}'], ), json={'ports': [port1, port2]}, ), diff --git a/openstack/tests/unit/cloud/test_project.py b/openstack/tests/unit/cloud/test_project.py index cbd7d2971..8afa40b67 100644 --- a/openstack/tests/unit/cloud/test_project.py +++ b/openstack/tests/unit/cloud/test_project.py @@ -179,7 +179,7 @@ class TestProject(base.TestCase): method='GET', uri=self.get_mock_url( resource=( - 'projects?domain_id=%s' % project_data.domain_id + f'projects?domain_id={project_data.domain_id}' ) ), status_code=200, @@ -204,7 +204,7 @@ class TestProject(base.TestCase): method='GET', uri=self.get_mock_url( resource=( - 'projects?domain_id=%s' % project_data.domain_id + f'projects?domain_id={project_data.domain_id}' ) ), status_code=200, @@ -250,7 +250,7 @@ class TestProject(base.TestCase): method='GET', uri=self.get_mock_url( resource=( - 'projects?domain_id=%s' % project_data.domain_id + f'projects?domain_id={project_data.domain_id}' ) ), status_code=200, diff --git a/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py b/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py index 1a88497d2..3cdffd4e5 100644 --- a/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py +++ b/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py @@ -104,7 +104,7 @@ class TestQosBandwidthLimitRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -157,7 +157,7 @@ class TestQosBandwidthLimitRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': []}, ), @@ -216,7 +216,7 @@ class TestQosBandwidthLimitRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -288,7 +288,7 @@ class TestQosBandwidthLimitRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -516,7 +516,7 @@ class TestQosBandwidthLimitRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -590,7 +590,7 @@ class TestQosBandwidthLimitRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), diff --git a/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py b/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py index 30af9b7cc..c3e1fe94e 100644 --- a/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py +++ b/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py @@ -87,7 +87,7 @@ class TestQosDscpMarkingRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -140,7 +140,7 @@ class TestQosDscpMarkingRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': []}, ), @@ -199,7 +199,7 @@ class TestQosDscpMarkingRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -361,7 +361,7 @@ class TestQosDscpMarkingRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -435,7 +435,7 @@ class TestQosDscpMarkingRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), diff --git a/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py b/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py index ac04d8d5b..5ef881371 100644 --- a/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py +++ b/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py @@ -88,7 +88,7 @@ class TestQosMinimumBandwidthRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -141,7 +141,7 @@ class TestQosMinimumBandwidthRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': []}, ), @@ -200,7 +200,7 @@ class TestQosMinimumBandwidthRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -361,7 +361,7 @@ class TestQosMinimumBandwidthRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -435,7 +435,7 @@ class TestQosMinimumBandwidthRule(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), diff --git a/openstack/tests/unit/cloud/test_qos_policy.py b/openstack/tests/unit/cloud/test_qos_policy.py index bec3e3bee..534633e61 100644 --- a/openstack/tests/unit/cloud/test_qos_policy.py +++ b/openstack/tests/unit/cloud/test_qos_policy.py @@ -86,7 +86,7 @@ class TestQosPolicy(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -225,7 +225,7 @@ class TestQosPolicy(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [self.mock_policy]}, ), @@ -323,7 +323,7 @@ class TestQosPolicy(base.TestCase): 'network', 'public', append=['v2.0', 'qos', 'policies'], - qs_elements=['name=%s' % self.policy_name], + qs_elements=[f'name={self.policy_name}'], ), json={'policies': [policy1, policy2]}, ), diff --git a/openstack/tests/unit/cloud/test_router.py b/openstack/tests/unit/cloud/test_router.py index ab67ac385..0d3064561 100644 --- a/openstack/tests/unit/cloud/test_router.py +++ b/openstack/tests/unit/cloud/test_router.py @@ -100,7 +100,7 @@ class TestRouter(base.TestCase): 'network', 'public', append=['v2.0', 'routers'], - qs_elements=['name=%s' % self.router_name], + qs_elements=[f'name={self.router_name}'], ), json={'routers': [self.mock_router_rep]}, ), @@ -450,7 +450,7 @@ class TestRouter(base.TestCase): 'network', 'public', append=['v2.0', 'routers'], - qs_elements=['name=%s' % self.router_name], + qs_elements=[f'name={self.router_name}'], ), json={'routers': [self.mock_router_rep]}, ), @@ -486,7 +486,7 @@ class TestRouter(base.TestCase): 'network', 'public', append=['v2.0', 'routers'], - qs_elements=['name=%s' % self.router_name], + qs_elements=[f'name={self.router_name}'], ), json={'routers': []}, ), @@ -576,7 +576,7 @@ class TestRouter(base.TestCase): 'network', 'public', append=['v2.0', 'ports'], - qs_elements=["device_id=%s" % self.router_id], + qs_elements=[f"device_id={self.router_id}"], ), json={'ports': (internal_ports + external_ports)}, ) diff --git a/openstack/tests/unit/cloud/test_security_groups.py b/openstack/tests/unit/cloud/test_security_groups.py index 9ccb8a593..745d59300 100644 --- a/openstack/tests/unit/cloud/test_security_groups.py +++ b/openstack/tests/unit/cloud/test_security_groups.py @@ -74,7 +74,7 @@ class TestSecurityGroups(base.TestCase): 'network', 'public', append=['v2.0', 'security-groups'], - qs_elements=["project_id=%s" % project_id], + qs_elements=[f"project_id={project_id}"], ), json={'security_groups': [neutron_grp_dict]}, ) @@ -88,9 +88,7 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups?project_id=42'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups?project_id=42', json={'security_groups': []}, ), ] @@ -126,7 +124,7 @@ class TestSecurityGroups(base.TestCase): uri=self.get_mock_url( 'network', 'public', - append=['v2.0', 'security-groups', '%s' % sg_id], + append=['v2.0', 'security-groups', f'{sg_id}'], ), status_code=200, json={}, @@ -144,16 +142,12 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': nova_return}, ), dict( method='DELETE', - uri='{endpoint}/os-security-groups/2'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups/2', ), ] ) @@ -184,9 +178,7 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': nova_return}, ), ] @@ -240,8 +232,8 @@ class TestSecurityGroups(base.TestCase): project_id = "861808a93da0484ea1767967c4df8a23" group_name = self.getUniqueString() group_desc = ( - 'security group from' - ' test_create_security_group_neutron_specific_tenant' + 'security group from ' + 'test_create_security_group_neutron_specific_tenant' ) new_group = fakes.make_fake_neutron_security_group( id='2', @@ -331,9 +323,7 @@ class TestSecurityGroups(base.TestCase): [ dict( method='POST', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_group': new_group}, validate=dict( json={ @@ -385,7 +375,7 @@ class TestSecurityGroups(base.TestCase): uri=self.get_mock_url( 'network', 'public', - append=['v2.0', 'security-groups', '%s' % sg_id], + append=['v2.0', 'security-groups', f'{sg_id}'], ), json={'security_group': update_return}, validate=dict( @@ -418,16 +408,12 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': nova_return}, ), dict( method='PUT', - uri='{endpoint}/os-security-groups/2'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups/2', json={'security_group': update_return}, ), ] @@ -586,16 +572,12 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': nova_return}, ), dict( method='POST', - uri='{endpoint}/os-security-group-rules'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules', json={'security_group_rule': new_rule}, validate=dict( json={ @@ -642,16 +624,12 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': nova_return}, ), dict( method='POST', - uri='{endpoint}/os-security-group-rules'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules', json={'security_group_rule': new_rule}, validate=dict( json={ @@ -700,7 +678,7 @@ class TestSecurityGroups(base.TestCase): append=[ 'v2.0', 'security-group-rules', - '%s' % rule_id, + f'{rule_id}', ], ), json={}, @@ -717,9 +695,7 @@ class TestSecurityGroups(base.TestCase): [ dict( method='DELETE', - uri='{endpoint}/os-security-group-rules/xyz'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules/xyz', ), ] ) @@ -760,9 +736,7 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': [nova_grp_dict]}, ), ] @@ -779,9 +753,7 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': [nova_grp_dict]}, ), ] @@ -842,16 +814,15 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT, - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': [nova_grp_dict]}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', - uri='%s/servers/%s/action' - % (fakes.COMPUTE_ENDPOINT, '1234'), + uri='{}/servers/{}/action'.format( + fakes.COMPUTE_ENDPOINT, '1234' + ), validate=dict( json={'addSecurityGroup': {'name': 'nova-sec-group'}} ), @@ -894,8 +865,9 @@ class TestSecurityGroups(base.TestCase): ), dict( method='POST', - uri='%s/servers/%s/action' - % (fakes.COMPUTE_ENDPOINT, '1234'), + uri='{}/servers/{}/action'.format( + fakes.COMPUTE_ENDPOINT, '1234' + ), validate=dict( json={ 'addSecurityGroup': {'name': 'neutron-sec-group'} @@ -921,16 +893,15 @@ class TestSecurityGroups(base.TestCase): [ dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': [nova_grp_dict]}, ), self.get_nova_discovery_mock_dict(), dict( method='POST', - uri='%s/servers/%s/action' - % (fakes.COMPUTE_ENDPOINT, '1234'), + uri='{}/servers/{}/action'.format( + fakes.COMPUTE_ENDPOINT, '1234' + ), validate=dict( json={ 'removeSecurityGroup': {'name': 'nova-sec-group'} @@ -974,8 +945,9 @@ class TestSecurityGroups(base.TestCase): ), dict( method='POST', - uri='%s/servers/%s/action' - % (fakes.COMPUTE_ENDPOINT, '1234'), + uri='{}/servers/{}/action'.format( + fakes.COMPUTE_ENDPOINT, '1234' + ), validate=dict(json=validate), ), ] @@ -1000,16 +972,12 @@ class TestSecurityGroups(base.TestCase): self.get_nova_discovery_mock_dict(), dict( method='GET', - uri='{endpoint}/servers/detail'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/detail', json={'servers': [fake_server]}, ), dict( method='GET', - uri='{endpoint}/os-security-groups'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', json={'security_groups': [nova_grp_dict]}, ), ] @@ -1064,9 +1032,7 @@ class TestSecurityGroups(base.TestCase): self.get_nova_discovery_mock_dict(), dict( method='GET', - uri='{endpoint}/servers/detail'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/detail', json={'servers': [fake_server]}, ), ] diff --git a/openstack/tests/unit/cloud/test_server_console.py b/openstack/tests/unit/cloud/test_server_console.py index 872fe27d8..4d1bef820 100644 --- a/openstack/tests/unit/cloud/test_server_console.py +++ b/openstack/tests/unit/cloud/test_server_console.py @@ -33,9 +33,7 @@ class TestServerConsole(base.TestCase): self.get_nova_discovery_mock_dict(), dict( method='POST', - uri='{endpoint}/servers/{id}/action'.format( - endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', json={"output": self.output}, validate=dict(json={'os-getConsoleOutput': {'length': 5}}), ), @@ -53,16 +51,12 @@ class TestServerConsole(base.TestCase): self.get_nova_discovery_mock_dict(), dict( method='GET', - uri='{endpoint}/servers/detail'.format( - endpoint=fakes.COMPUTE_ENDPOINT - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/detail', json={"servers": [self.server]}, ), dict( method='POST', - uri='{endpoint}/servers/{id}/action'.format( - endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', json={"output": self.output}, validate=dict(json={'os-getConsoleOutput': {}}), ), @@ -81,9 +75,7 @@ class TestServerConsole(base.TestCase): self.get_nova_discovery_mock_dict(), dict( method='POST', - uri='{endpoint}/servers/{id}/action'.format( - endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id - ), + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', status_code=400, validate=dict(json={'os-getConsoleOutput': {}}), ), diff --git a/openstack/tests/unit/cloud/test_stack.py b/openstack/tests/unit/cloud/test_stack.py index 91b7acce1..265b199d6 100644 --- a/openstack/tests/unit/cloud/test_stack.py +++ b/openstack/tests/unit/cloud/test_stack.py @@ -45,9 +45,7 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', json={"stacks": fake_stacks}, ), ] @@ -88,9 +86,7 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', status_code=404, ) ] @@ -110,9 +106,7 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', json={"stacks": fake_stacks}, ), ] @@ -134,9 +128,7 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', json={"stacks": fake_stacks}, ), ] @@ -151,9 +143,7 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', status_code=404, ) ] @@ -167,36 +157,20 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks/{name}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501 ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', json={"stack": self.stack}, ), dict( method='DELETE', - uri='{endpoint}/stacks/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', ), ] ) @@ -209,9 +183,7 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks/stack_name?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, resolve=resolve - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/stack_name?{resolve}', status_code=404, ), ] @@ -225,36 +197,20 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501 ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', json={"stack": self.stack}, ), dict( method='DELETE', - uri='{endpoint}/stacks/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', status_code=400, reason="ouch", ), @@ -279,29 +235,15 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks/{name}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501 ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', json={"stack": self.stack}, ), dict( @@ -316,17 +258,11 @@ class TestStack(base.TestCase): ), dict( method='DELETE', - uri='{endpoint}/stacks/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', ), dict( method='GET', - uri='{endpoint}/stacks/{name}/events?{qs}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - qs=marker_qs, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?{marker_qs}', complete_qs=True, json={ "events": [ @@ -341,11 +277,7 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{name}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}', status_code=404, ), ] @@ -369,29 +301,15 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501 ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', json={"stack": self.stack}, ), dict( @@ -406,17 +324,11 @@ class TestStack(base.TestCase): ), dict( method='DELETE', - uri='{endpoint}/stacks/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', ), dict( method='GET', - uri='{endpoint}/stacks/{id}/events?{qs}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - qs=marker_qs, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}/events?{marker_qs}', complete_qs=True, json={ "events": [ @@ -430,11 +342,7 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}', status_code=404, ), ] @@ -457,29 +365,15 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501 ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', json={"stack": self.stack}, ), dict( @@ -494,17 +388,11 @@ class TestStack(base.TestCase): ), dict( method='DELETE', - uri='{endpoint}/stacks/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', ), dict( method='GET', - uri='{endpoint}/stacks/{id}/events?{qs}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - qs=marker_qs, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}/events?{marker_qs}', complete_qs=True, json={ "events": [ @@ -518,27 +406,15 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{id}?resolve_outputs=False'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?resolve_outputs=False', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501 - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501 ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - resolve=resolve, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', json={"stack": failed_stack}, ), ] @@ -557,9 +433,7 @@ class TestStack(base.TestCase): [ dict( method='POST', - uri='{endpoint}/stacks'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', json={"stack": self.stack}, validate=dict( json={ @@ -574,26 +448,15 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{name}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', json={"stack": self.stack}, ), ] @@ -616,9 +479,7 @@ class TestStack(base.TestCase): [ dict( method='POST', - uri='{endpoint}/stacks'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', json={"stack": self.stack}, validate=dict( json={ @@ -633,10 +494,7 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{name}/events?sort_dir=asc'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?sort_dir=asc', json={ "events": [ fakes.make_fake_stack_event( @@ -650,26 +508,15 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{name}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', json={"stack": self.stack}, ), ] @@ -692,10 +539,7 @@ class TestStack(base.TestCase): [ dict( method='PUT', - uri='{endpoint}/stacks/{name}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', validate=dict( json={ 'disable_rollback': False, @@ -709,26 +553,15 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{name}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', json={"stack": self.stack}, ), ] @@ -768,10 +601,7 @@ class TestStack(base.TestCase): ), dict( method='PUT', - uri='{endpoint}/stacks/{name}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', validate=dict( json={ 'disable_rollback': False, @@ -785,11 +615,7 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{name}/events?{qs}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - qs=marker_qs, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?{marker_qs}', json={ "events": [ fakes.make_fake_stack_event( @@ -803,26 +629,15 @@ class TestStack(base.TestCase): ), dict( method='GET', - uri='{endpoint}/stacks/{name}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', json={"stack": self.stack}, ), ] @@ -841,26 +656,15 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks/{name}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', json={"stack": self.stack}, ), ] @@ -881,26 +685,15 @@ class TestStack(base.TestCase): [ dict( method='GET', - uri='{endpoint}/stacks/{name}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', status_code=302, headers=dict( - location='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ) + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' ), ), dict( method='GET', - uri='{endpoint}/stacks/{name}/{id}'.format( - endpoint=fakes.ORCHESTRATION_ENDPOINT, - id=self.stack_id, - name=self.stack_name, - ), + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', json={"stack": in_progress}, ), ] diff --git a/openstack/tests/unit/cloud/test_subnet.py b/openstack/tests/unit/cloud/test_subnet.py index 0005a1969..11bf50579 100644 --- a/openstack/tests/unit/cloud/test_subnet.py +++ b/openstack/tests/unit/cloud/test_subnet.py @@ -88,7 +88,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'subnets'], - qs_elements=['name=%s' % self.subnet_name], + qs_elements=[f'name={self.subnet_name}'], ), json={'subnets': [self.mock_subnet_rep]}, ), @@ -143,7 +143,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % self.network_name], + qs_elements=[f'name={self.network_name}'], ), json={'networks': [self.mock_network_rep]}, ), @@ -198,7 +198,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % self.network_name], + qs_elements=[f'name={self.network_name}'], ), json={'networks': [self.mock_network_rep]}, ), @@ -246,7 +246,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % self.network_name], + qs_elements=[f'name={self.network_name}'], ), json={'networks': [self.mock_network_rep]}, ), @@ -284,7 +284,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % self.network_name], + qs_elements=[f'name={self.network_name}'], ), json={'networks': [self.mock_network_rep]}, ), @@ -345,7 +345,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % self.network_name], + qs_elements=[f'name={self.network_name}'], ), json={'networks': [self.mock_network_rep]}, ), @@ -468,7 +468,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % self.network_name], + qs_elements=[f'name={self.network_name}'], ), json={'networks': [net1, net2]}, ), @@ -513,7 +513,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % self.network_name], + qs_elements=[f'name={self.network_name}'], ), json={'networks': [self.mock_network_rep]}, ), @@ -585,7 +585,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'networks'], - qs_elements=['name=%s' % self.network_name], + qs_elements=[f'name={self.network_name}'], ), json={'networks': [self.mock_network_rep]}, ), @@ -659,7 +659,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'subnets'], - qs_elements=['name=%s' % self.subnet_name], + qs_elements=[f'name={self.subnet_name}'], ), json={'subnets': [self.mock_subnet_rep]}, ), @@ -724,7 +724,7 @@ class TestSubnet(base.TestCase): 'network', 'public', append=['v2.0', 'subnets'], - qs_elements=['name=%s' % self.subnet_name], + qs_elements=[f'name={self.subnet_name}'], ), json={'subnets': [subnet1, subnet2]}, ), diff --git a/openstack/tests/unit/cloud/test_update_server.py b/openstack/tests/unit/cloud/test_update_server.py index 275c0f65a..214a3e4e4 100644 --- a/openstack/tests/unit/cloud/test_update_server.py +++ b/openstack/tests/unit/cloud/test_update_server.py @@ -57,7 +57,7 @@ class TestUpdateServer(base.TestCase): 'compute', 'public', append=['servers', 'detail'], - qs_elements=['name=%s' % self.server_name], + qs_elements=[f'name={self.server_name}'], ), json={'servers': [self.fake_server]}, ), @@ -108,7 +108,7 @@ class TestUpdateServer(base.TestCase): 'compute', 'public', append=['servers', 'detail'], - qs_elements=['name=%s' % self.server_name], + qs_elements=[f'name={self.server_name}'], ), json={'servers': [self.fake_server]}, ), diff --git a/openstack/tests/unit/cloud/test_users.py b/openstack/tests/unit/cloud/test_users.py index de5a12b27..042916d10 100644 --- a/openstack/tests/unit/cloud/test_users.py +++ b/openstack/tests/unit/cloud/test_users.py @@ -84,8 +84,8 @@ class TestUsers(base.TestCase): ) with testtools.ExpectedException( exceptions.SDKException, - "User or project creation requires an explicit" - " domain_id argument.", + "User or project creation requires an explicit " + "domain_id argument.", ): self.cloud.create_user( name=user_data.name, @@ -105,7 +105,7 @@ class TestUsers(base.TestCase): method='GET', uri=self._get_keystone_mock_url( resource='users', - qs_elements=['name=%s' % user_data.name], + qs_elements=[f'name={user_data.name}'], ), status_code=200, json=self._get_user_list(user_data), diff --git a/openstack/tests/unit/cloud/test_volume.py b/openstack/tests/unit/cloud/test_volume.py index 8c41006b8..44ba20831 100644 --- a/openstack/tests/unit/cloud/test_volume.py +++ b/openstack/tests/unit/cloud/test_volume.py @@ -235,8 +235,9 @@ class TestVolume(base.TestCase): with testtools.ExpectedException( exceptions.SDKException, - "Volume %s is not available. Status is '%s'" - % (volume['id'], volume['status']), + "Volume {} is not available. Status is '{}'".format( + volume['id'], volume['status'] + ), ): self.cloud.attach_volume(server, volume) self.assertEqual(0, len(self.adapter.request_history)) @@ -251,8 +252,9 @@ class TestVolume(base.TestCase): with testtools.ExpectedException( exceptions.SDKException, - "Volume %s already attached to server %s on device %s" - % (volume['id'], server['id'], device_id), + "Volume {} already attached to server {} on device {}".format( + volume['id'], server['id'], device_id + ), ): self.cloud.attach_volume(server, volume) self.assertEqual(0, len(self.adapter.request_history)) diff --git a/openstack/tests/unit/clustering/v1/test_cluster.py b/openstack/tests/unit/clustering/v1/test_cluster.py index a359150c6..4a5da101c 100644 --- a/openstack/tests/unit/clustering/v1/test_cluster.py +++ b/openstack/tests/unit/clustering/v1/test_cluster.py @@ -121,7 +121,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.scale_in(sess, 3)) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'scale_in': {'count': 3}} sess.post.assert_called_once_with(url, json=body) @@ -133,7 +133,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.scale_out(sess, 3)) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'scale_out': {'count': 3}} sess.post.assert_called_once_with(url, json=body) @@ -145,7 +145,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.resize(sess, foo='bar', zoo=5)) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'resize': {'foo': 'bar', 'zoo': 5}} sess.post.assert_called_once_with(url, json=body) @@ -157,7 +157,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.add_nodes(sess, ['node-33'])) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'add_nodes': {'nodes': ['node-33']}} sess.post.assert_called_once_with(url, json=body) @@ -169,7 +169,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.del_nodes(sess, ['node-11'])) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'del_nodes': {'nodes': ['node-11']}} sess.post.assert_called_once_with(url, json=body) @@ -184,7 +184,7 @@ class TestCluster(base.TestCase): 'destroy_after_deletion': True, } self.assertEqual('', sot.del_nodes(sess, ['node-11'], **params)) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = { 'del_nodes': { 'nodes': ['node-11'], @@ -201,7 +201,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.replace_nodes(sess, {'node-22': 'node-44'})) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'replace_nodes': {'nodes': {'node-22': 'node-44'}}} sess.post.assert_called_once_with(url, json=body) @@ -217,7 +217,7 @@ class TestCluster(base.TestCase): } self.assertEqual('', sot.policy_attach(sess, 'POLICY', **params)) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = { 'policy_attach': { 'policy_id': 'POLICY', @@ -235,7 +235,7 @@ class TestCluster(base.TestCase): sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.policy_detach(sess, 'POLICY')) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'policy_detach': {'policy_id': 'POLICY'}} sess.post.assert_called_once_with(url, json=body) @@ -249,7 +249,7 @@ class TestCluster(base.TestCase): params = {'enabled': False} self.assertEqual('', sot.policy_update(sess, 'POLICY', **params)) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'policy_update': {'policy_id': 'POLICY', 'enabled': False}} sess.post.assert_called_once_with(url, json=body) @@ -261,7 +261,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.check(sess)) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'check': {}} sess.post.assert_called_once_with(url, json=body) @@ -273,7 +273,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.recover(sess)) - url = 'clusters/%s/actions' % sot.id + url = f'clusters/{sot.id}/actions' body = {'recover': {}} sess.post.assert_called_once_with(url, json=body) @@ -285,7 +285,7 @@ class TestCluster(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.op(sess, 'dance', style='tango')) - url = 'clusters/%s/ops' % sot.id + url = f'clusters/{sot.id}/ops' body = {'dance': {'style': 'tango'}} sess.post.assert_called_once_with(url, json=body) @@ -302,6 +302,6 @@ class TestCluster(base.TestCase): res = sot.force_delete(sess) self.assertEqual(fake_action_id, res.id) - url = 'clusters/%s' % sot.id + url = f'clusters/{sot.id}' body = {'force': True} sess.delete.assert_called_once_with(url, json=body) diff --git a/openstack/tests/unit/clustering/v1/test_node.py b/openstack/tests/unit/clustering/v1/test_node.py index 28bdaf814..0627efe81 100644 --- a/openstack/tests/unit/clustering/v1/test_node.py +++ b/openstack/tests/unit/clustering/v1/test_node.py @@ -75,7 +75,7 @@ class TestNode(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.check(sess)) - url = 'nodes/%s/actions' % sot.id + url = f'nodes/{sot.id}/actions' body = {'check': {}} sess.post.assert_called_once_with(url, json=body) @@ -87,7 +87,7 @@ class TestNode(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.recover(sess)) - url = 'nodes/%s/actions' % sot.id + url = f'nodes/{sot.id}/actions' body = {'recover': {}} sess.post.assert_called_once_with(url, json=body) @@ -99,7 +99,7 @@ class TestNode(base.TestCase): sess = mock.Mock() sess.post = mock.Mock(return_value=resp) self.assertEqual('', sot.op(sess, 'dance', style='tango')) - url = 'nodes/%s/ops' % sot.id + url = f'nodes/{sot.id}/ops' sess.post.assert_called_once_with( url, json={'dance': {'style': 'tango'}} ) @@ -150,7 +150,7 @@ class TestNode(base.TestCase): res = sot.force_delete(sess) self.assertEqual(fake_action_id, res.id) - url = 'nodes/%s' % sot.id + url = f'nodes/{sot.id}' body = {'force': True} sess.delete.assert_called_once_with(url, json=body) diff --git a/openstack/tests/unit/clustering/v1/test_profile_type.py b/openstack/tests/unit/clustering/v1/test_profile_type.py index d2f084147..b61030c9e 100644 --- a/openstack/tests/unit/clustering/v1/test_profile_type.py +++ b/openstack/tests/unit/clustering/v1/test_profile_type.py @@ -53,5 +53,5 @@ class TestProfileType(base.TestCase): sess = mock.Mock() sess.get = mock.Mock(return_value=resp) self.assertEqual('', sot.type_ops(sess)) - url = 'profile-types/%s/ops' % sot.id + url = f'profile-types/{sot.id}/ops' sess.get.assert_called_once_with(url) diff --git a/openstack/tests/unit/config/test_from_conf.py b/openstack/tests/unit/config/test_from_conf.py index a6e82c12a..032d72c42 100644 --- a/openstack/tests/unit/config/test_from_conf.py +++ b/openstack/tests/unit/config/test_from_conf.py @@ -285,8 +285,8 @@ class TestFromConf(base.TestCase): exceptions.ServiceDisabledException, getattr, adap, 'get' ) self.assertIn( - "Service '%s' is disabled because its configuration " - "could not be loaded." % service_type, + f"Service '{service_type}' is disabled because its configuration " + "could not be loaded.", ex.message, ) self.assertIn(expected_reason, ex.message) diff --git a/openstack/tests/unit/database/v1/test_instance.py b/openstack/tests/unit/database/v1/test_instance.py index bb2df451d..6a0d6d879 100644 --- a/openstack/tests/unit/database/v1/test_instance.py +++ b/openstack/tests/unit/database/v1/test_instance.py @@ -67,7 +67,7 @@ class TestInstance(base.TestCase): self.assertEqual(response.body['user'], sot.enable_root_user(sess)) - url = "instances/%s/root" % IDENTIFIER + url = f"instances/{IDENTIFIER}/root" sess.post.assert_called_with( url, ) @@ -82,7 +82,7 @@ class TestInstance(base.TestCase): self.assertTrue(sot.is_root_enabled(sess)) - url = "instances/%s/root" % IDENTIFIER + url = f"instances/{IDENTIFIER}/root" sess.get.assert_called_with( url, ) @@ -96,7 +96,7 @@ class TestInstance(base.TestCase): self.assertIsNone(sot.restart(sess)) - url = "instances/%s/action" % IDENTIFIER + url = f"instances/{IDENTIFIER}/action" body = {'restart': None} sess.post.assert_called_with(url, json=body) @@ -110,7 +110,7 @@ class TestInstance(base.TestCase): self.assertIsNone(sot.resize(sess, flavor)) - url = "instances/%s/action" % IDENTIFIER + url = f"instances/{IDENTIFIER}/action" body = {'resize': {'flavorRef': flavor}} sess.post.assert_called_with(url, json=body) @@ -124,6 +124,6 @@ class TestInstance(base.TestCase): self.assertIsNone(sot.resize_volume(sess, size)) - url = "instances/%s/action" % IDENTIFIER + url = f"instances/{IDENTIFIER}/action" body = {'resize': {'volume': size}} sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/image/v2/test_image.py b/openstack/tests/unit/image/v2/test_image.py index f402c7215..9638c7771 100644 --- a/openstack/tests/unit/image/v2/test_image.py +++ b/openstack/tests/unit/image/v2/test_image.py @@ -257,7 +257,7 @@ class TestImage(base.TestCase): sot.add_tag(self.sess, tag) self.sess.put.assert_called_with( - 'images/IDENTIFIER/tags/%s' % tag, + f'images/IDENTIFIER/tags/{tag}', ) def test_remove_tag(self): @@ -266,7 +266,7 @@ class TestImage(base.TestCase): sot.remove_tag(self.sess, tag) self.sess.delete.assert_called_with( - 'images/IDENTIFIER/tags/%s' % tag, + f'images/IDENTIFIER/tags/{tag}', ) def test_import_image(self): diff --git a/openstack/tests/unit/key_manager/v1/test_container.py b/openstack/tests/unit/key_manager/v1/test_container.py index bb43d6f5b..82d9b2376 100644 --- a/openstack/tests/unit/key_manager/v1/test_container.py +++ b/openstack/tests/unit/key_manager/v1/test_container.py @@ -15,7 +15,7 @@ from openstack.tests.unit import base ID_VAL = "123" -IDENTIFIER = 'http://localhost/containers/%s' % ID_VAL +IDENTIFIER = f'http://localhost/containers/{ID_VAL}' EXAMPLE = { 'container_ref': IDENTIFIER, 'created': '2015-03-09T12:14:57.233772', diff --git a/openstack/tests/unit/key_manager/v1/test_order.py b/openstack/tests/unit/key_manager/v1/test_order.py index bdb1198f0..a7b93c5d6 100644 --- a/openstack/tests/unit/key_manager/v1/test_order.py +++ b/openstack/tests/unit/key_manager/v1/test_order.py @@ -16,13 +16,13 @@ from openstack.tests.unit import base ID_VAL = "123" SECRET_ID = "5" -IDENTIFIER = 'http://localhost/orders/%s' % ID_VAL +IDENTIFIER = f'http://localhost/orders/{ID_VAL}' EXAMPLE = { 'created': '1', 'creator_id': '2', 'meta': {'key': '3'}, 'order_ref': IDENTIFIER, - 'secret_ref': 'http://localhost/secrets/%s' % SECRET_ID, + 'secret_ref': f'http://localhost/secrets/{SECRET_ID}', 'status': '6', 'sub_status': '7', 'sub_status_message': '8', diff --git a/openstack/tests/unit/key_manager/v1/test_secret.py b/openstack/tests/unit/key_manager/v1/test_secret.py index b0493fa18..6a9aa8317 100644 --- a/openstack/tests/unit/key_manager/v1/test_secret.py +++ b/openstack/tests/unit/key_manager/v1/test_secret.py @@ -16,7 +16,7 @@ from openstack.key_manager.v1 import secret from openstack.tests.unit import base ID_VAL = "123" -IDENTIFIER = 'http://localhost:9311/v1/secrets/%s' % ID_VAL +IDENTIFIER = f'http://localhost:9311/v1/secrets/{ID_VAL}' EXAMPLE = { 'algorithm': '1', 'bit_length': '2', diff --git a/openstack/tests/unit/message/v2/test_queue.py b/openstack/tests/unit/message/v2/test_queue.py index ca837d5ee..03932d99f 100644 --- a/openstack/tests/unit/message/v2/test_queue.py +++ b/openstack/tests/unit/message/v2/test_queue.py @@ -70,7 +70,7 @@ class TestQueue(base.TestCase): sot._translate_response = mock.Mock() res = sot.create(sess) - url = 'queues/%s' % FAKE1['name'] + url = 'queues/{}'.format(FAKE1['name']) headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', @@ -89,7 +89,7 @@ class TestQueue(base.TestCase): sot._translate_response = mock.Mock() res = sot.create(sess) - url = 'queues/%s' % FAKE2['name'] + url = 'queues/{}'.format(FAKE2['name']) headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', @@ -110,7 +110,7 @@ class TestQueue(base.TestCase): sot._translate_response = mock.Mock() res = sot.fetch(sess) - url = 'queues/%s' % FAKE1['name'] + url = 'queues/{}'.format(FAKE1['name']) headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', @@ -129,7 +129,7 @@ class TestQueue(base.TestCase): sot._translate_response = mock.Mock() res = sot.fetch(sess) - url = 'queues/%s' % FAKE2['name'] + url = 'queues/{}'.format(FAKE2['name']) headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', @@ -150,7 +150,7 @@ class TestQueue(base.TestCase): sot._translate_response = mock.Mock() sot.delete(sess) - url = 'queues/%s' % FAKE1['name'] + url = 'queues/{}'.format(FAKE1['name']) headers = { 'Client-ID': 'NEW_CLIENT_ID', 'X-PROJECT-ID': 'NEW_PROJECT_ID', @@ -168,7 +168,7 @@ class TestQueue(base.TestCase): sot._translate_response = mock.Mock() sot.delete(sess) - url = 'queues/%s' % FAKE2['name'] + url = 'queues/{}'.format(FAKE2['name']) headers = { 'Client-ID': 'OLD_CLIENT_ID', 'X-PROJECT-ID': 'OLD_PROJECT_ID', diff --git a/openstack/tests/unit/network/v2/test_bgp_speaker.py b/openstack/tests/unit/network/v2/test_bgp_speaker.py index 848aa590d..45cf617e0 100644 --- a/openstack/tests/unit/network/v2/test_bgp_speaker.py +++ b/openstack/tests/unit/network/v2/test_bgp_speaker.py @@ -169,7 +169,7 @@ class TestBgpSpeaker(base.TestCase): self.assertIsNone(sot.add_bgp_speaker_to_dragent(sess, agent_id)) body = {'bgp_speaker_id': sot.id} - url = 'agents/%s/bgp-drinstances' % agent_id + url = f'agents/{agent_id}/bgp-drinstances' sess.post.assert_called_with(url, json=body) def test_remove_bgp_speaker_from_dragent(self): diff --git a/openstack/tests/unit/object_store/v1/test_container.py b/openstack/tests/unit/object_store/v1/test_container.py index d67e72dc3..7908ff7fe 100644 --- a/openstack/tests/unit/object_store/v1/test_container.py +++ b/openstack/tests/unit/object_store/v1/test_container.py @@ -21,9 +21,7 @@ class TestContainer(base.TestCase): super().setUp() self.container = self.getUniqueString() self.endpoint = self.cloud.object_store.get_endpoint() + '/' - self.container_endpoint = '{endpoint}{container}'.format( - endpoint=self.endpoint, container=self.container - ) + self.container_endpoint = f'{self.endpoint}{self.container}' self.body = { "count": 2, diff --git a/openstack/tests/unit/object_store/v1/test_proxy.py b/openstack/tests/unit/object_store/v1/test_proxy.py index 14f6535d5..55cc5fe28 100644 --- a/openstack/tests/unit/object_store/v1/test_proxy.py +++ b/openstack/tests/unit/object_store/v1/test_proxy.py @@ -45,9 +45,7 @@ class TestObjectStoreProxy(test_proxy_base.TestProxyBase): self.proxy = self.cloud.object_store self.container = self.getUniqueString() self.endpoint = self.cloud.object_store.get_endpoint() + '/' - self.container_endpoint = '{endpoint}{container}'.format( - endpoint=self.endpoint, container=self.container - ) + self.container_endpoint = f'{self.endpoint}{self.container}' def test_account_metadata_get(self): self.verify_head( @@ -132,13 +130,13 @@ class TestObjectStoreProxy(test_proxy_base.TestProxyBase): def test_object_get(self): with requests_mock.Mocker() as m: - m.get("%scontainer/object" % self.endpoint, text="data") + m.get(f"{self.endpoint}container/object", text="data") res = self.proxy.get_object("object", container="container") self.assertIsNone(res.data) def test_object_get_write_file(self): with requests_mock.Mocker() as m: - m.get("%scontainer/object" % self.endpoint, text="data") + m.get(f"{self.endpoint}container/object", text="data") with tempfile.NamedTemporaryFile() as f: self.proxy.get_object( "object", container="container", outfile=f.name @@ -148,7 +146,7 @@ class TestObjectStoreProxy(test_proxy_base.TestProxyBase): def test_object_get_remember_content(self): with requests_mock.Mocker() as m: - m.get("%scontainer/object" % self.endpoint, text="data") + m.get(f"{self.endpoint}container/object", text="data") res = self.proxy.get_object( "object", container="container", remember_content=True ) @@ -657,8 +655,8 @@ class TestTempURLUnicodePathAndKey(TestTempURL): url = '/v1/\u00e4/c/\u00f3' key = 'k\u00e9y' expected_url = ( - '%s?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' - ) % url + f'{url}?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' + ) expected_body = '\n'.join( [ 'GET', @@ -672,8 +670,8 @@ class TestTempURLUnicodePathBytesKey(TestTempURL): url = '/v1/\u00e4/c/\u00f3' key = 'k\u00e9y'.encode() expected_url = ( - '%s?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' - ) % url + f'{url}?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' + ) expected_body = '\n'.join( [ 'GET', diff --git a/openstack/tests/unit/orchestration/v1/test_stack.py b/openstack/tests/unit/orchestration/v1/test_stack.py index 9019887dc..5b9c72e90 100644 --- a/openstack/tests/unit/orchestration/v1/test_stack.py +++ b/openstack/tests/unit/orchestration/v1/test_stack.py @@ -237,7 +237,7 @@ class TestStack(base.TestCase): ex = self.assertRaises(exceptions.NotFoundException, sot.fetch, sess) self.assertEqual('oops', str(ex)) ex = self.assertRaises(exceptions.NotFoundException, sot.fetch, sess) - self.assertEqual('No stack found for %s' % FAKE_ID, str(ex)) + self.assertEqual(f'No stack found for {FAKE_ID}', str(ex)) def test_abandon(self): sess = mock.Mock() @@ -333,7 +333,7 @@ class TestStack(base.TestCase): mock_response.headers = {} mock_response.json.return_value = {} sess.post = mock.Mock(return_value=mock_response) - url = "stacks/%s/actions" % FAKE_ID + url = f"stacks/{FAKE_ID}/actions" body = {"suspend": None} sot = stack.Stack(**FAKE) @@ -350,7 +350,7 @@ class TestStack(base.TestCase): mock_response.headers = {} mock_response.json.return_value = {} sess.post = mock.Mock(return_value=mock_response) - url = "stacks/%s/actions" % FAKE_ID + url = f"stacks/{FAKE_ID}/actions" body = {"resume": None} diff --git a/openstack/tests/unit/shared_file_system/v2/test_share.py b/openstack/tests/unit/shared_file_system/v2/test_share.py index f45bc9c6c..61f98fdce 100644 --- a/openstack/tests/unit/shared_file_system/v2/test_share.py +++ b/openstack/tests/unit/shared_file_system/v2/test_share.py @@ -224,7 +224,7 @@ class TestShareActions(TestShares): self.assertIsNone(sot.unmanage(self.sess)) - url = 'shares/%s/action' % IDENTIFIER + url = f'shares/{IDENTIFIER}/action' body = {'unmanage': None} self.sess.post.assert_called_with( diff --git a/openstack/tests/unit/test_connection.py b/openstack/tests/unit/test_connection.py index d1ac99c7a..f5cf7ce1f 100644 --- a/openstack/tests/unit/test_connection.py +++ b/openstack/tests/unit/test_connection.py @@ -31,71 +31,65 @@ CONFIG_PASSWORD = "TopSecret" CONFIG_PROJECT = "TheGrandPrizeGame" CONFIG_CACERT = "TrustMe" -CLOUD_CONFIG = """ +CLOUD_CONFIG = f""" clouds: sample-cloud: region_name: RegionOne auth: - auth_url: {auth_url} - username: {username} - password: {password} - project_name: {project} + auth_url: {CONFIG_AUTH_URL} + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} insecure-cloud: auth: - auth_url: {auth_url} - username: {username} - password: {password} - project_name: {project} - cacert: {cacert} + auth_url: {CONFIG_AUTH_URL} + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} + cacert: {CONFIG_CACERT} verify: False insecure-cloud-alternative-format: auth: - auth_url: {auth_url} - username: {username} - password: {password} - project_name: {project} + auth_url: {CONFIG_AUTH_URL} + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} insecure: True cacert-cloud: auth: - auth_url: {auth_url} - username: {username} - password: {password} - project_name: {project} - cacert: {cacert} + auth_url: {CONFIG_AUTH_URL} + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} + cacert: {CONFIG_CACERT} profiled-cloud: profile: dummy auth: - username: {username} - password: {password} - project_name: {project} - cacert: {cacert} -""".format( - auth_url=CONFIG_AUTH_URL, - username=CONFIG_USERNAME, - password=CONFIG_PASSWORD, - project=CONFIG_PROJECT, - cacert=CONFIG_CACERT, -) + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} + cacert: {CONFIG_CACERT} +""" -VENDOR_CONFIG = """ +VENDOR_CONFIG = f""" {{ "name": "dummy", "profile": {{ "auth": {{ - "auth_url": "{auth_url}" + "auth_url": "{CONFIG_AUTH_URL}" }}, "vendor_hook": "openstack.tests.unit.test_connection:vendor_hook" }} }} -""".format(auth_url=CONFIG_AUTH_URL) +""" -PUBLIC_CLOUDS_YAML = """ +PUBLIC_CLOUDS_YAML = f""" public-clouds: dummy: auth: - auth_url: {auth_url} + auth_url: {CONFIG_AUTH_URL} vendor_hook: openstack.tests.unit.test_connection:vendor_hook -""".format(auth_url=CONFIG_AUTH_URL) +""" class _TestConnectionBase(base.TestCase): diff --git a/openstack/tests/unit/test_proxy.py b/openstack/tests/unit/test_proxy.py index 78470afb6..9cc02d60b 100644 --- a/openstack/tests/unit/test_proxy.py +++ b/openstack/tests/unit/test_proxy.py @@ -638,7 +638,7 @@ class TestProxyCache(base.TestCase): self.sot.service_type = 'srv' def _get_key(self, id): - return "srv.fake.fake/%s.{'microversion': None, 'params': {}}" % id + return f"srv.fake.fake/{id}.{{'microversion': None, 'params': {{}}}}" def test_get_not_in_cache(self): self.cloud._cache_expirations['srv.fake'] = 5 diff --git a/openstack/tests/unit/test_stats.py b/openstack/tests/unit/test_stats.py index a9871fab4..999d2602a 100644 --- a/openstack/tests/unit/test_stats.py +++ b/openstack/tests/unit/test_stats.py @@ -163,7 +163,7 @@ class TestStats(base.TestCase): return True time.sleep(0.1) - raise Exception("Key %s not found in reported stats" % key) + raise Exception(f"Key {key} not found in reported stats") def assert_prometheus_stat(self, name, value, labels=None): sample_value = self._registry.get_sample_value(name, labels) diff --git a/openstack/utils.py b/openstack/utils.py index 7e54ef4bf..7ccc4c7bc 100644 --- a/openstack/utils.py +++ b/openstack/utils.py @@ -59,8 +59,8 @@ def iterate_timeout(timeout, message, wait=2): wait = float(wait) except ValueError: raise exceptions.SDKException( - "Wait value must be an int or float value. {wait} given" - " instead".format(wait=wait) + f"Wait value must be an int or float value. " + f"{wait} given instead" ) start = time.time() @@ -172,17 +172,15 @@ def supports_microversion(adapter, microversion, raise_exception=False): supports = discover.version_match(required, candidate) if raise_exception and not supports: raise exceptions.SDKException( - 'Required microversion {ver} is higher than currently ' - 'selected {curr}'.format( - ver=microversion, curr=adapter.default_microversion - ) + f'Required microversion {microversion} is higher than ' + f'currently selected {adapter.default_microversion}' ) return supports return True if raise_exception: raise exceptions.SDKException( - 'Required microversion {ver} is not supported ' - 'by the server side'.format(ver=microversion) + f'Required microversion {microversion} is not supported ' + f'by the server side' ) return False diff --git a/openstack/workflow/v2/workflow.py b/openstack/workflow/v2/workflow.py index fc21f0322..8b6df87b0 100644 --- a/openstack/workflow/v2/workflow.py +++ b/openstack/workflow/v2/workflow.py @@ -58,7 +58,7 @@ class Workflow(resource.Resource): "data": self.definition, } - scope = "?scope=%s" % self.scope + scope = f"?scope={self.scope}" uri = request.url + scope request.headers.update(headers) diff --git a/pyproject.toml b/pyproject.toml index 3ab847ac0..7113b0c03 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,3 +4,6 @@ line-length = 79 [tool.ruff.format] quote-style = "preserve" docstring-code-format = true + +[tool.ruff.lint] +select = ["E4", "E7", "E9", "F", "U"] diff --git a/tools/nova_version.py b/tools/nova_version.py index ad8ea0cbc..e5593fbc9 100644 --- a/tools/nova_version.py +++ b/tools/nova_version.py @@ -28,7 +28,7 @@ for cloud in openstack.config.OpenStackConfig().get_all_clouds(): print(endpoint) r = c.get(endpoint).json() except Exception: - print("Error with %s" % cloud.name) + print(f"Error with {cloud.name}") continue for version in r['versions']: if version['status'] == 'CURRENT': diff --git a/tools/print-services.py b/tools/print-services.py index 98602070d..c478fc997 100644 --- a/tools/print-services.py +++ b/tools/print-services.py @@ -47,9 +47,7 @@ def make_names(): dc = desc_class.__name__ services.append( - "{st} = {dm}.{dc}(service_type='{service_type}')".format( - st=st, dm=dm, dc=dc, service_type=service_type - ), + f"{st} = {dm}.{dc}(service_type='{service_type}')", ) # Register the descriptor class with every known alias. Don't