pre-commit: Migrate pyupgrade to ruff-format

openstack/tests/unit/cloud/test_stack.py needs some manual fixes but
this is otherwise auto-generated.

Change-Id: If0d202ece232181c16ee4990eb428e9ad6e91cd5
Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
Stephen Finucane 2024-09-12 17:49:32 +01:00
parent 33b5bfac57
commit 399dfcc0e5
125 changed files with 966 additions and 1837 deletions

View File

@ -18,16 +18,11 @@ repos:
rev: v1.1.2
hooks:
- id: doc8
- repo: https://github.com/asottile/pyupgrade
rev: v3.19.0
hooks:
- id: pyupgrade
args: ['--py38-plus']
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.7.3
hooks:
- id: ruff
args: ['--fix']
args: ['--fix', '--unsafe-fixes']
- id: ruff-format
- repo: https://opendev.org/openstack/hacking
rev: 7.0.0

View File

@ -46,7 +46,7 @@ def create_keypair(conn):
raise e
with open(PRIVATE_KEYPAIR_FILE, 'w') as f:
f.write("%s" % keypair.private_key)
f.write(str(keypair.private_key))
os.chmod(PRIVATE_KEYPAIR_FILE, 0o400)
@ -71,8 +71,4 @@ def create_server(conn):
server = conn.compute.wait_for_server(server)
print(
"ssh -i {key} root@{ip}".format(
key=PRIVATE_KEYPAIR_FILE, ip=server.access_ipv4
)
)
print(f"ssh -i {PRIVATE_KEYPAIR_FILE} root@{server.access_ipv4}")

View File

@ -20,8 +20,9 @@ import pbr.version
def show_version(args):
print(
"OpenstackSDK Version %s"
% pbr.version.VersionInfo('openstacksdk').version_string_with_vcs()
"OpenstackSDK Version {}".format(
pbr.version.VersionInfo('openstacksdk').version_string_with_vcs()
)
)

View File

@ -64,7 +64,7 @@ class Deployable(resource.Resource):
call = getattr(session, method.lower())
except AttributeError:
raise exceptions.ResourceFailure(
"Invalid commit method: %s" % method
f"Invalid commit method: {method}"
)
request.url = request.url + "/program"

View File

@ -67,9 +67,7 @@ def populate_directory(
# Strictly speaking, user data is binary, but in many cases
# it's actually a text (cloud-init, ignition, etc).
flag = 't' if isinstance(user_data, str) else 'b'
with open(
os.path.join(subdir, 'user_data'), 'w%s' % flag
) as fp:
with open(os.path.join(subdir, 'user_data'), f'w{flag}') as fp:
fp.write(user_data)
yield d
@ -147,15 +145,14 @@ def pack(path: str) -> str:
raise RuntimeError(
'Error generating the configdrive. Make sure the '
'"genisoimage", "mkisofs" or "xorrisofs" tool is installed. '
'Error: %s' % error
f'Error: {error}'
)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise RuntimeError(
'Error generating the configdrive.'
'Stdout: "%(stdout)s". Stderr: "%(stderr)s"'
% {'stdout': stdout.decode(), 'stderr': stderr.decode()}
f'Stdout: "{stdout.decode()}". Stderr: "{stderr.decode()}"'
)
tmpfile.seek(0)

View File

@ -63,9 +63,7 @@ class Proxy(proxy.Proxy):
kwargs['fields'] = _common.fields_type(fields, resource_type)
return res.fetch(
self,
error_message="No {resource_type} found for {value}".format(
resource_type=resource_type.__name__, value=value
),
error_message=f"No {resource_type.__name__} found for {value}",
**kwargs,
)
@ -560,9 +558,8 @@ class Proxy(proxy.Proxy):
try:
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for nodes %(nodes)s to reach "
"target state '%(state)s'"
% {'nodes': log_nodes, 'state': expected_state},
f"Timeout waiting for nodes {log_nodes} to reach "
f"target state '{expected_state}'",
):
nodes = [self.get_node(n) for n in remaining]
remaining = []

View File

@ -93,14 +93,13 @@ class Allocation(_common.Resource):
return self
for count in utils.iterate_timeout(
timeout, "Timeout waiting for the allocation %s" % self.id
timeout, f"Timeout waiting for the allocation {self.id}"
):
self.fetch(session)
if self.state == 'error' and not ignore_error:
raise exceptions.ResourceFailure(
"Allocation %(allocation)s failed: %(error)s"
% {'allocation': self.id, 'error': self.last_error}
f"Allocation {self.id} failed: {self.last_error}"
)
elif self.state != 'allocating':
return self

View File

@ -188,8 +188,6 @@ class Driver(resource.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed call to method {method} on driver {driver_name}".format(
method=method, driver_name=self.name
)
msg = f"Failed call to method {method} on driver {self.name}"
exceptions.raise_from_response(response, error_message=msg)
return response

View File

@ -324,9 +324,8 @@ class Node(_common.Resource):
microversion = _common.STATE_VERSIONS[expected_provision_state]
except KeyError:
raise ValueError(
"Node's provision_state must be one of %s for creation, "
"got %s"
% (
"Node's provision_state must be one of {} for creation, "
"got {}".format(
', '.join(_common.STATE_VERSIONS),
expected_provision_state,
)
@ -334,7 +333,7 @@ class Node(_common.Resource):
else:
error_message = (
"Cannot create a node with initial provision "
"state %s" % expected_provision_state
f"state {expected_provision_state}"
)
# Nodes cannot be created as available using new API versions
maximum = (
@ -546,8 +545,8 @@ class Node(_common.Resource):
expected_state = _common.EXPECTED_STATES[target]
except KeyError:
raise ValueError(
'For target %s the expected state is not '
'known, cannot wait for it' % target
f'For target {target} the expected state is not '
'known, cannot wait for it'
)
request = self._prepare_request(requires_id=True)
@ -561,8 +560,8 @@ class Node(_common.Resource):
)
msg = (
"Failed to set provision state for bare metal node {node} "
"to {target}".format(node=self.id, target=target)
f"Failed to set provision state for bare metal node {self.id} "
f"to {target}"
)
exceptions.raise_from_response(response, error_message=msg)
@ -588,9 +587,8 @@ class Node(_common.Resource):
"""
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for node %(node)s to reach "
"power state '%(state)s'"
% {'node': self.id, 'state': expected_state},
f"Timeout waiting for node {self.id} to reach "
f"power state '{expected_state}'",
):
self.fetch(session)
if self.power_state == expected_state:
@ -629,9 +627,8 @@ class Node(_common.Resource):
"""
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for node %(node)s to reach "
"target state '%(state)s'"
% {'node': self.id, 'state': expected_state},
f"Timeout waiting for node {self.id} to reach "
f"target state '{expected_state}'",
):
self.fetch(session)
if self._check_state_reached(
@ -677,7 +674,7 @@ class Node(_common.Resource):
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for the lock to be released on node %s" % self.id,
f"Timeout waiting for the lock to be released on node {self.id}",
):
self.fetch(session)
if self.reservation is None:
@ -719,13 +716,8 @@ class Node(_common.Resource):
or self.provision_state == 'error'
):
raise exceptions.ResourceFailure(
"Node %(node)s reached failure state \"%(state)s\"; "
"the last error is %(error)s"
% {
'node': self.id,
'state': self.provision_state,
'error': self.last_error,
}
f"Node {self.id} reached failure state \"{self.provision_state}\"; "
f"the last error is {self.last_error}"
)
# Special case: a failure state for "manage" transition can be
# "enroll"
@ -735,10 +727,9 @@ class Node(_common.Resource):
and self.last_error
):
raise exceptions.ResourceFailure(
"Node %(node)s could not reach state manageable: "
f"Node {self.id} could not reach state manageable: "
"failed to verify management credentials; "
"the last error is %(error)s"
% {'node': self.id, 'error': self.last_error}
f"the last error is {self.last_error}"
)
def inject_nmi(self, session):
@ -789,8 +780,8 @@ class Node(_common.Resource):
expected = _common.EXPECTED_POWER_STATES[target]
except KeyError:
raise ValueError(
"Cannot use target power state %s with wait, "
"the expected state is not known" % target
f"Cannot use target power state {target} with wait, "
"the expected state is not known"
)
session = self._get_session(session)
@ -816,8 +807,8 @@ class Node(_common.Resource):
)
msg = (
"Failed to set power state for bare metal node {node} "
"to {target}".format(node=self.id, target=target)
f"Failed to set power state for bare metal node {self.id} "
f"to {target}"
)
exceptions.raise_from_response(response, error_message=msg)
@ -893,9 +884,7 @@ class Node(_common.Resource):
retriable_status_codes=retriable_status_codes,
)
msg = "Failed to attach VIF {vif} to bare metal node {node}".format(
node=self.id, vif=vif_id
)
msg = f"Failed to attach VIF {vif_id} to bare metal node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
def detach_vif(self, session, vif_id, ignore_missing=True):
@ -940,9 +929,7 @@ class Node(_common.Resource):
)
return False
msg = "Failed to detach VIF {vif} from bare metal node {node}".format(
node=self.id, vif=vif_id
)
msg = f"Failed to detach VIF {vif_id} from bare metal node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
return True
@ -973,9 +960,7 @@ class Node(_common.Resource):
request.url, headers=request.headers, microversion=version
)
msg = "Failed to list VIFs attached to bare metal node {node}".format(
node=self.id
)
msg = f"Failed to list VIFs attached to bare metal node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
return [vif['id'] for vif in response.json()['vifs']]
@ -1015,8 +1000,8 @@ class Node(_common.Resource):
if failed:
raise exceptions.ValidationException(
'Validation failed for required interfaces of node {node}:'
' {failures}'.format(
'Validation failed for required interfaces of node '
'{node}: {failures}'.format(
node=self.id, failures=', '.join(failed)
)
)
@ -1058,9 +1043,7 @@ class Node(_common.Resource):
headers=request.headers,
microversion=version,
)
msg = "Failed to change maintenance mode for node {node}".format(
node=self.id
)
msg = f"Failed to change maintenance mode for node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
def get_boot_device(self, session):
@ -1081,9 +1064,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to get boot device for node {node}".format(
node=self.id,
)
msg = f"Failed to get boot device for node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
return response.json()
@ -1138,9 +1119,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to get supported boot devices for node {node}".format(
node=self.id,
)
msg = f"Failed to get supported boot devices for node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
return response.json()
@ -1164,8 +1143,8 @@ class Node(_common.Resource):
request.url = utils.urljoin(request.url, 'states', 'boot_mode')
if target not in ('uefi', 'bios'):
raise ValueError(
"Unrecognized boot mode %s."
"Boot mode should be one of 'uefi' or 'bios'." % target
f"Unrecognized boot mode {target}."
"Boot mode should be one of 'uefi' or 'bios'."
)
body = {'target': target}
@ -1200,8 +1179,8 @@ class Node(_common.Resource):
request.url = utils.urljoin(request.url, 'states', 'secure_boot')
if not isinstance(target, bool):
raise ValueError(
"Invalid target %s. It should be True or False "
"corresponding to secure boot state 'on' or 'off'" % target
f"Invalid target {target}. It should be True or False "
"corresponding to secure boot state 'on' or 'off'"
)
body = {'target': target}
@ -1213,9 +1192,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to change secure boot state for {node}".format(
node=self.id
)
msg = f"Failed to change secure boot state for {self.id}"
exceptions.raise_from_response(response, error_message=msg)
def add_trait(self, session, trait):
@ -1237,9 +1214,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to add trait {trait} for node {node}".format(
trait=trait, node=self.id
)
msg = f"Failed to add trait {trait} for node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
self.traits = list(set(self.traits or ()) | {trait})
@ -1342,10 +1317,8 @@ class Node(_common.Resource):
)
msg = (
"Failed to call vendor_passthru for node {node}, verb {verb}"
" and method {method}".format(
node=self.id, verb=verb, method=method
)
f"Failed to call vendor_passthru for node {self.id}, verb {verb} "
f"and method {method}"
)
exceptions.raise_from_response(response, error_message=msg)
@ -1369,9 +1342,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to list vendor_passthru methods for node {node}".format(
node=self.id
)
msg = f"Failed to list vendor_passthru methods for node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
return response.json()
@ -1394,9 +1365,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to get console for node {node}".format(
node=self.id,
)
msg = f"Failed to get console for node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
return response.json()
@ -1414,8 +1383,8 @@ class Node(_common.Resource):
request.url = utils.urljoin(request.url, 'states', 'console')
if not isinstance(enabled, bool):
raise ValueError(
"Invalid enabled %s. It should be True or False "
"corresponding to console enabled or disabled" % enabled
f"Invalid enabled {enabled}. It should be True or False "
"corresponding to console enabled or disabled"
)
body = {'enabled': enabled}
@ -1427,9 +1396,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to change console mode for {node}".format(
node=self.id,
)
msg = f"Failed to change console mode for {self.id}"
exceptions.raise_from_response(response, error_message=msg)
def get_node_inventory(self, session, node_id):
@ -1457,9 +1424,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to get inventory for node {node}".format(
node=self.id,
)
msg = f"Failed to get inventory for node {node_id}"
exceptions.raise_from_response(response, error_message=msg)
return response.json()
@ -1487,9 +1452,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
)
msg = "Failed to list firmware components for node {node}".format(
node=self.id
)
msg = f"Failed to list firmware components for node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
return response.json()

View File

@ -102,9 +102,7 @@ class Introspection(resource.Resource):
response = session.get(
request.url, headers=request.headers, microversion=version
)
msg = "Failed to fetch introspection data for node {id}".format(
id=self.id
)
msg = f"Failed to fetch introspection data for node {self.id}"
exceptions.raise_from_response(response, error_message=msg)
return response.json()
@ -127,7 +125,7 @@ class Introspection(resource.Resource):
return self
for count in utils.iterate_timeout(
timeout, "Timeout waiting for introspection on node %s" % self.id
timeout, f"Timeout waiting for introspection on node {self.id}"
):
self.fetch(session)
if self._check_state(ignore_error):
@ -142,8 +140,7 @@ class Introspection(resource.Resource):
def _check_state(self, ignore_error):
if self.state == 'error' and not ignore_error:
raise exceptions.ResourceFailure(
"Introspection of node %(node)s failed: %(error)s"
% {'node': self.id, 'error': self.error}
f"Introspection of node {self.id} failed: {self.error}"
)
else:
return self.is_finished

View File

@ -38,8 +38,8 @@ class BaseBlockStorageProxy(proxy.Proxy, metaclass=abc.ABCMeta):
volume_obj = self.get_volume(volume)
if not volume_obj:
raise exceptions.SDKException(
"Volume {volume} given to create_image could"
" not be found".format(volume=volume)
f"Volume {volume} given to create_image could "
f"not be found"
)
volume_id = volume_obj['id']
data = self.post(

View File

@ -142,7 +142,7 @@ class Backup(resource.Resource):
else:
# Just for safety of the implementation (since PUT removed)
raise exceptions.ResourceFailure(
"Invalid create method: %s" % self.create_method
f"Invalid create method: {self.create_method}"
)
has_body = (

View File

@ -158,7 +158,7 @@ class Backup(resource.Resource):
else:
# Just for safety of the implementation (since PUT removed)
raise exceptions.ResourceFailure(
"Invalid create method: %s" % self.create_method
f"Invalid create method: {self.create_method}"
)
has_body = (

View File

@ -35,7 +35,7 @@ def _normalize_port_list(nics):
except KeyError:
raise TypeError(
"Either 'address' or 'mac' must be provided "
"for port %s" % row
f"for port {row}"
)
ports.append(dict(row, address=address))
return ports
@ -126,10 +126,9 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin):
if node.provision_state == 'available':
if node.instance_id:
raise exceptions.SDKException(
"Refusing to inspect available machine %(node)s "
f"Refusing to inspect available machine {node.id} "
"which is associated with an instance "
"(instance_uuid %(inst)s)"
% {'node': node.id, 'inst': node.instance_id}
f"(instance_uuid {node.instance_id})"
)
return_to_available = True
@ -142,10 +141,9 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin):
if node.provision_state not in ('manageable', 'inspect failed'):
raise exceptions.SDKException(
"Machine %(node)s must be in 'manageable', 'inspect failed' "
f"Machine {node.id} must be in 'manageable', 'inspect failed' "
"or 'available' provision state to start inspection, the "
"current state is %(state)s"
% {'node': node.id, 'state': node.provision_state}
f"current state is {node.provision_state}"
)
node = self.baremetal.set_node_provision_state(
@ -229,7 +227,7 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin):
if provision_state not in ('enroll', 'manageable', 'available'):
raise ValueError(
'Initial provision state must be enroll, '
'manageable or available, got %s' % provision_state
f'manageable or available, got {provision_state}'
)
# Available is tricky: it cannot be directly requested on newer API
@ -306,8 +304,8 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin):
invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed']
if machine['provision_state'] in invalid_states:
raise exceptions.SDKException(
"Error unregistering node '%s' due to current provision "
"state '%s'" % (uuid, machine['provision_state'])
"Error unregistering node '{}' due to current provision "
"state '{}'".format(uuid, machine['provision_state'])
)
# NOTE(TheJulia) There is a high possibility of a lock being present
@ -318,8 +316,8 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin):
self.baremetal.wait_for_node_reservation(machine, timeout)
except exceptions.SDKException as e:
raise exceptions.SDKException(
"Error unregistering node '%s': Exception occured while"
" waiting to be able to proceed: %s" % (machine['uuid'], e)
"Error unregistering node '{}': Exception occured while "
"waiting to be able to proceed: {}".format(machine['uuid'], e)
)
for nic in _normalize_port_list(nics):
@ -382,7 +380,7 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin):
machine = self.get_machine(name_or_id)
if not machine:
raise exceptions.SDKException(
"Machine update failed to find Machine: %s. " % name_or_id
f"Machine update failed to find Machine: {name_or_id}. "
)
new_config = dict(machine._to_munch(), **attrs)
@ -394,8 +392,7 @@ class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin):
except Exception as e:
raise exceptions.SDKException(
"Machine update failed - Error generating JSON patch object "
"for submission to the API. Machine: %s Error: %s"
% (name_or_id, e)
f"for submission to the API. Machine: {name_or_id} Error: {e}"
)
if not patch:

View File

@ -169,7 +169,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin):
volume = self.get_volume(name_or_id)
if not volume:
raise exceptions.SDKException("Volume %s not found." % name_or_id)
raise exceptions.SDKException(f"Volume {name_or_id} not found.")
volume = self.block_storage.update_volume(volume, **kwargs)
@ -193,9 +193,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin):
if not volume:
raise exceptions.SDKException(
"Volume {name_or_id} does not exist".format(
name_or_id=name_or_id
)
f"Volume {name_or_id} does not exist"
)
self.block_storage.set_volume_bootable_status(volume, bootable)
@ -371,14 +369,16 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin):
dev = self.get_volume_attach_device(volume, server['id'])
if dev:
raise exceptions.SDKException(
"Volume %s already attached to server %s on device %s"
% (volume['id'], server['id'], dev)
"Volume {} already attached to server {} on device {}".format(
volume['id'], server['id'], dev
)
)
if volume['status'] != 'available':
raise exceptions.SDKException(
"Volume %s is not available. Status is '%s'"
% (volume['id'], volume['status'])
"Volume {} is not available. Status is '{}'".format(
volume['id'], volume['status']
)
)
payload = {}
@ -766,7 +766,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin):
volume_type = self.get_volume_type(name_or_id)
if not volume_type:
raise exceptions.SDKException(
"VolumeType not found: %s" % name_or_id
f"VolumeType not found: {name_or_id}"
)
return self.block_storage.get_type_access(volume_type)
@ -786,7 +786,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin):
volume_type = self.get_volume_type(name_or_id)
if not volume_type:
raise exceptions.SDKException(
"VolumeType not found: %s" % name_or_id
f"VolumeType not found: {name_or_id}"
)
self.block_storage.add_type_access(volume_type, project_id)
@ -804,7 +804,7 @@ class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin):
volume_type = self.get_volume_type(name_or_id)
if not volume_type:
raise exceptions.SDKException(
"VolumeType not found: %s" % name_or_id
f"VolumeType not found: {name_or_id}"
)
self.block_storage.remove_type_access(volume_type, project_id)

View File

@ -124,7 +124,7 @@ class CoeCloudMixin(openstackcloud._OpenStackCloudMixin):
cluster = self.get_coe_cluster(name_or_id)
if not cluster:
raise exceptions.SDKException(
"COE cluster %s not found." % name_or_id
f"COE cluster {name_or_id} not found."
)
cluster = self.container_infrastructure_management.update_cluster(
@ -283,7 +283,7 @@ class CoeCloudMixin(openstackcloud._OpenStackCloudMixin):
cluster_template = self.get_cluster_template(name_or_id)
if not cluster_template:
raise exceptions.SDKException(
"Cluster template %s not found." % name_or_id
f"Cluster template {name_or_id} not found."
)
cluster_template = (

View File

@ -111,9 +111,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
):
return flavor
raise exceptions.SDKException(
"Could not find a flavor with {ram} and '{include}'".format(
ram=ram, include=include
)
f"Could not find a flavor with {ram} and '{include}'"
)
def search_keypairs(self, name_or_id=None, filters=None):
@ -622,8 +620,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
server_obj = self.get_server(server, bare=True)
if not server_obj:
raise exceptions.SDKException(
"Server {server} could not be found and therefore"
" could not be snapshotted.".format(server=server)
f"Server {server} could not be found and therefore "
f"could not be snapshotted."
)
server = server_obj
image = self.compute.create_server_image(
@ -853,8 +851,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
kwargs['nics'] = [kwargs['nics']]
else:
raise exceptions.SDKException(
'nics parameter to create_server takes a list of dicts.'
' Got: {nics}'.format(nics=kwargs['nics'])
'nics parameter to create_server takes a list of dicts. '
'Got: {nics}'.format(nics=kwargs['nics'])
)
if network and ('nics' not in kwargs or not kwargs['nics']):
@ -902,8 +900,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
fixed_ip = nic.pop(ip_key, None)
if fixed_ip and net.get('fixed_ip'):
raise exceptions.SDKException(
"Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip"
" may be given"
"Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip "
"may be given"
)
if fixed_ip:
net['fixed_ip'] = fixed_ip
@ -917,8 +915,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
net['tag'] = nic.pop('tag')
if nic:
raise exceptions.SDKException(
"Additional unsupported keys given for server network"
" creation: {keys}".format(keys=nic.keys())
f"Additional unsupported keys given for server network "
f"creation: {nic.keys()}"
)
networks.append(net)
if networks:
@ -1220,23 +1218,21 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
)
self.log.debug(
'Server %(server)s reached ACTIVE state without'
' being allocated an IP address.'
' Deleting server.',
{'server': server['id']},
f'Server {server["id"]} reached ACTIVE state without '
f'being allocated an IP address. Deleting server.',
)
try:
self._delete_server(server=server, wait=wait, timeout=timeout)
except Exception as e:
raise exceptions.SDKException(
'Server reached ACTIVE state without being'
' allocated an IP address AND then could not'
' be deleted: {}'.format(e),
f'Server reached ACTIVE state without being '
f'allocated an IP address AND then could not '
f'be deleted: {e}',
extra_data=dict(server=server),
)
raise exceptions.SDKException(
'Server reached ACTIVE state without being'
' allocated an IP address.',
'Server reached ACTIVE state without being '
'allocated an IP address.',
extra_data=dict(server=server),
)
return None
@ -1378,9 +1374,9 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
deleted = self.delete_floating_ip(ip['id'], retry=delete_ip_retry)
if not deleted:
raise exceptions.SDKException(
"Tried to delete floating ip {floating_ip}"
" associated with server {id} but there was"
" an error deleting it. Not deleting server.".format(
"Tried to delete floating ip {floating_ip} "
"associated with server {id} but there was "
"an error deleting it. Not deleting server.".format(
floating_ip=ip['floating_ip_address'], id=server['id']
)
)
@ -1725,7 +1721,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
aggregate = self.get_aggregate(name_or_id)
if not aggregate:
raise exceptions.SDKException(
"Host aggregate %s not found." % name_or_id
f"Host aggregate {name_or_id} not found."
)
return self.compute.set_aggregate_metadata(aggregate, metadata)
@ -1742,7 +1738,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
aggregate = self.get_aggregate(name_or_id)
if not aggregate:
raise exceptions.SDKException(
"Host aggregate %s not found." % name_or_id
f"Host aggregate {name_or_id} not found."
)
return self.compute.add_host_to_aggregate(aggregate, host_name)
@ -1759,7 +1755,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
aggregate = self.get_aggregate(name_or_id)
if not aggregate:
raise exceptions.SDKException(
"Host aggregate %s not found." % name_or_id
f"Host aggregate {name_or_id} not found."
)
return self.compute.remove_host_from_aggregate(aggregate, host_name)
@ -1823,9 +1819,8 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
# implementation detail - and the error message is actually
# less informative.
raise exceptions.SDKException(
"Date given, {date}, is invalid. Please pass in a date"
" string in ISO 8601 format -"
" YYYY-MM-DDTHH:MM:SS".format(date=date)
f"Date given, {date}, is invalid. Please pass in a date "
f"string in ISO 8601 format (YYYY-MM-DDTHH:MM:SS)"
)
if isinstance(start, str):
@ -1844,7 +1839,7 @@ class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin):
if not isinstance(userdata, bytes):
# If the userdata passed in is bytes, just send it unmodified
if not isinstance(userdata, str):
raise TypeError("%s can't be encoded" % type(userdata))
raise TypeError(f"{type(userdata)} can't be encoded")
# If it's not bytes, make it bytes
userdata = userdata.encode('utf-8', 'strict')

View File

@ -78,8 +78,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin):
zone_type = zone_type.upper()
if zone_type not in ('PRIMARY', 'SECONDARY'):
raise exceptions.SDKException(
"Invalid type %s, valid choices are PRIMARY or SECONDARY"
% zone_type
f"Invalid type {zone_type}, valid choices are PRIMARY or SECONDARY"
)
zone = {
@ -119,7 +118,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin):
"""
zone = self.get_zone(name_or_id)
if not zone:
raise exceptions.SDKException("Zone %s not found." % name_or_id)
raise exceptions.SDKException(f"Zone {name_or_id} not found.")
return self.dns.update_zone(zone['id'], **kwargs)
@ -156,7 +155,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin):
else:
zone_obj = self.get_zone(zone)
if zone_obj is None:
raise exceptions.SDKException("Zone %s not found." % zone)
raise exceptions.SDKException(f"Zone {zone} not found.")
return list(self.dns.recordsets(zone_obj))
def get_recordset(self, zone, name_or_id):
@ -175,7 +174,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin):
else:
zone_obj = self.get_zone(zone)
if not zone_obj:
raise exceptions.SDKException("Zone %s not found." % zone)
raise exceptions.SDKException(f"Zone {name_or_id} not found.")
return self.dns.find_recordset(
zone=zone_obj, name_or_id=name_or_id, ignore_missing=True
)
@ -206,7 +205,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin):
else:
zone_obj = self.get_zone(zone)
if not zone_obj:
raise exceptions.SDKException("Zone %s not found." % zone)
raise exceptions.SDKException(f"Zone {zone} not found.")
# We capitalize the type in case the user sends in lowercase
recordset_type = recordset_type.upper()
@ -239,9 +238,7 @@ class DnsCloudMixin(openstackcloud._OpenStackCloudMixin):
rs = self.get_recordset(zone, name_or_id)
if not rs:
raise exceptions.SDKException(
"Recordset %s not found." % name_or_id
)
raise exceptions.SDKException(f"Recordset {name_or_id} not found.")
rs = self.dns.update_recordset(recordset=rs, **kwargs)

View File

@ -167,6 +167,8 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin):
domain_id=domain_id,
ignore_missing=False,
)
if not project:
raise exceptions.SDKException(f"Project {name_or_id} not found.")
if enabled is not None:
kwargs.update({'enabled': enabled})
project = self.identity.update_project(project, **kwargs)
@ -218,11 +220,7 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin):
self.identity.delete_project(project)
return True
except exceptions.SDKException:
self.log.exception(
"Error in deleting project {project}".format(
project=name_or_id
)
)
self.log.exception(f"Error in deleting project {name_or_id}")
return False
@_utils.valid_kwargs('domain_id', 'name')
@ -577,9 +575,7 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin):
service = self.get_service(name_or_id=service_name_or_id)
if service is None:
raise exceptions.SDKException(
"service {service} not found".format(
service=service_name_or_id
)
f"service {service_name_or_id} not found"
)
endpoints_args = []
@ -786,7 +782,7 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin):
return True
except exceptions.SDKException:
self.log.exception("Failed to delete domain %s" % domain_id)
self.log.exception(f"Failed to delete domain {domain_id}")
raise
def list_domains(self, **filters):
@ -928,8 +924,8 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin):
dom = self.get_domain(domain)
if not dom:
raise exceptions.SDKException(
"Creating group {group} failed: Invalid domain "
"{domain}".format(group=name, domain=domain)
f"Creating group {name} failed: Invalid domain "
f"{domain}"
)
group_ref['domain_id'] = dom['id']
@ -1124,11 +1120,11 @@ class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin):
for k in ['role', 'group', 'user']:
if k in filters:
filters['%s_id' % k] = filters.pop(k)
filters[f'{k}_id'] = filters.pop(k)
for k in ['domain', 'project']:
if k in filters:
filters['scope_%s_id' % k] = filters.pop(k)
filters[f'scope_{k}_id'] = filters.pop(k)
if 'system' in filters:
system_scope = filters.pop('system')

View File

@ -113,16 +113,17 @@ class ImageCloudMixin(openstackcloud._OpenStackCloudMixin):
"""
if output_path is None and output_file is None:
raise exceptions.SDKException(
'No output specified, an output path or file object'
' is necessary to write the image data to'
'No output specified, an output path or file object '
'is necessary to write the image data to'
)
elif output_path is not None and output_file is not None:
raise exceptions.SDKException(
'Both an output path and file object were provided,'
' however only one can be used at once'
'Both an output path and file object were provided, '
'however only one can be used at once'
)
image = self.image.find_image(name_or_id, ignore_missing=False)
return self.image.download_image(
image, output=output_file or output_path, chunk_size=chunk_size
)

View File

@ -579,7 +579,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
network = self.get_network(name_or_id)
if not network:
raise exceptions.SDKException("Network %s not found." % name_or_id)
raise exceptions.SDKException(f"Network {name_or_id} not found.")
network = self.network.update_network(network, **kwargs)
@ -1356,7 +1356,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not curr_policy:
raise exceptions.SDKException(
"QoS policy %s not found." % name_or_id
f"QoS policy {name_or_id} not found."
)
return self.network.update_qos_policy(curr_policy, **kwargs)
@ -1426,9 +1426,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
# Translate None from search interface to empty {} for kwargs below
@ -1460,9 +1458,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
return self.network.get_qos_bandwidth_limit_rule(rule_id, policy)
@ -1498,9 +1494,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
if kwargs.get("direction") is not None:
@ -1544,9 +1538,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
if kwargs.get("direction") is not None:
@ -1594,9 +1586,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
try:
@ -1657,9 +1647,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
# Translate None from search interface to empty {} for kwargs below
@ -1686,9 +1674,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
return self.network.get_qos_dscp_marking_rule(rule_id, policy)
@ -1718,9 +1704,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
return self.network.create_qos_dscp_marking_rule(
@ -1752,9 +1736,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
if not kwargs:
@ -1792,9 +1774,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
try:
@ -1859,9 +1839,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
# Translate None from search interface to empty {} for kwargs below
@ -1891,9 +1869,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
return self.network.get_qos_minimum_bandwidth_rule(rule_id, policy)
@ -1927,9 +1903,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
kwargs['min_kbps'] = min_kbps
@ -1963,9 +1937,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
if not kwargs:
@ -2005,9 +1977,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
)
if not policy:
raise exceptions.NotFoundException(
"QoS policy {name_or_id} not Found.".format(
name_or_id=policy_name_or_id
)
f"QoS policy {policy_name_or_id} not Found."
)
try:
@ -2235,7 +2205,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
curr_router = self.get_router(name_or_id)
if not curr_router:
raise exceptions.SDKException("Router %s not found." % name_or_id)
raise exceptions.SDKException(f"Router {name_or_id} not found.")
return self.network.update_router(curr_router, **router)
@ -2348,7 +2318,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
network = self.get_network(network_name_or_id, filters)
if not network:
raise exceptions.SDKException(
"Network %s not found." % network_name_or_id
f"Network {network_name_or_id} not found."
)
if disable_gateway_ip and gateway_ip:
@ -2378,7 +2348,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
subnetpool = self.get_subnetpool(subnetpool_name_or_id)
if not subnetpool:
raise exceptions.SDKException(
"Subnetpool %s not found." % subnetpool_name_or_id
f"Subnetpool {subnetpool_name_or_id} not found."
)
# Be friendly on ip_version and allow strings
@ -2523,7 +2493,7 @@ class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin):
curr_subnet = self.get_subnet(name_or_id)
if not curr_subnet:
raise exceptions.SDKException("Subnet %s not found." % name_or_id)
raise exceptions.SDKException(f"Subnet {name_or_id} not found.")
return self.network.update_subnet(curr_subnet, **subnet)

View File

@ -174,11 +174,11 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
if nat_source:
raise exceptions.SDKException(
'Multiple networks were found matching '
'{nat_net} which is the network configured '
f'{self._nat_source} which is the network configured '
'to be the NAT source. Please check your '
'cloud resources. It is probably a good idea '
'to configure this network by ID rather than '
'by name.'.format(nat_net=self._nat_source)
'by name.'
)
external_ipv4_floating_networks.append(network)
nat_source = network
@ -192,11 +192,11 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
if nat_destination:
raise exceptions.SDKException(
'Multiple networks were found matching '
'{nat_net} which is the network configured '
f'{self._nat_destination} which is the network configured '
'to be the NAT destination. Please check your '
'cloud resources. It is probably a good idea '
'to configure this network by ID rather than '
'by name.'.format(nat_net=self._nat_destination)
'by name.'
)
nat_destination = network
elif self._nat_destination is None:
@ -230,12 +230,12 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
if default_network:
raise exceptions.SDKException(
'Multiple networks were found matching '
'{default_net} which is the network '
f'{self._default_network} which is the network '
'configured to be the default interface '
'network. Please check your cloud resources. '
'It is probably a good idea '
'to configure this network by ID rather than '
'by name.'.format(default_net=self._default_network)
'by name.'
)
default_network = network
@ -243,58 +243,50 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
for net_name in self._external_ipv4_names:
if net_name not in [net['name'] for net in external_ipv4_networks]:
raise exceptions.SDKException(
"Networks: {network} was provided for external IPv4 "
"access and those networks could not be found".format(
network=net_name
)
f"Networks: {net_name} was provided for external IPv4 "
"access and those networks could not be found"
)
for net_name in self._internal_ipv4_names:
if net_name not in [net['name'] for net in internal_ipv4_networks]:
raise exceptions.SDKException(
"Networks: {network} was provided for internal IPv4 "
"access and those networks could not be found".format(
network=net_name
)
f"Networks: {net_name} was provided for internal IPv4 "
"access and those networks could not be found"
)
for net_name in self._external_ipv6_names:
if net_name not in [net['name'] for net in external_ipv6_networks]:
raise exceptions.SDKException(
"Networks: {network} was provided for external IPv6 "
"access and those networks could not be found".format(
network=net_name
)
f"Networks: {net_name} was provided for external IPv6 "
"access and those networks could not be found"
)
for net_name in self._internal_ipv6_names:
if net_name not in [net['name'] for net in internal_ipv6_networks]:
raise exceptions.SDKException(
"Networks: {network} was provided for internal IPv6 "
"access and those networks could not be found".format(
network=net_name
)
f"Networks: {net_name} was provided for internal IPv6 "
"access and those networks could not be found"
)
if self._nat_destination and not nat_destination:
raise exceptions.SDKException(
'Network {network} was configured to be the '
f'Network {self._nat_destination} was configured to be the '
'destination for inbound NAT but it could not be '
'found'.format(network=self._nat_destination)
'found'
)
if self._nat_source and not nat_source:
raise exceptions.SDKException(
'Network {network} was configured to be the '
f'Network {self._nat_source} was configured to be the '
'source for inbound NAT but it could not be '
'found'.format(network=self._nat_source)
'found'
)
if self._default_network and not default_network:
raise exceptions.SDKException(
'Network {network} was configured to be the '
f'Network {self._default_network} was configured to be the '
'default network interface but it could not be '
'found'.format(network=self._default_network)
'found'
)
self._external_ipv4_networks = external_ipv4_networks
@ -812,7 +804,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
except exceptions.NotFoundException:
raise exceptions.NotFoundException(
"unable to find network for floating ips with ID "
"{}".format(network_name_or_id)
f"{network_name_or_id}"
)
network_id = network['id']
else:
@ -879,8 +871,8 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
)
else:
raise exceptions.SDKException(
"Attempted to create FIP on port {port} "
"but something went wrong".format(port=port)
f"Attempted to create FIP on port {port} "
"but something went wrong"
)
return fip
@ -970,9 +962,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
try:
proxy._json_response(
self.compute.delete(f'/os-floating-ips/{floating_ip_id}'),
error_message='Unable to delete floating IP {fip_id}'.format(
fip_id=floating_ip_id
),
error_message=f'Unable to delete floating IP {floating_ip_id}',
)
except exceptions.NotFoundException:
return False
@ -1123,8 +1113,8 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
raise exceptions.SDKException(
f"unable to find floating IP {floating_ip_id}"
)
error_message = "Error attaching IP {ip} to instance {id}".format(
ip=floating_ip_id, id=server_id
error_message = (
f"Error attaching IP {floating_ip_id} to instance {server_id}"
)
body = {'address': f_ip['floating_ip_address']}
if fixed_address:
@ -1174,9 +1164,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
self.network.update_ip(floating_ip_id, port_id=None)
except exceptions.SDKException:
raise exceptions.SDKException(
"Error detaching IP {ip} from server {server_id}".format(
ip=floating_ip_id, server_id=server_id
)
f"Error detaching IP {floating_ip_id} from server {server_id}"
)
return True
@ -1187,8 +1175,8 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
raise exceptions.SDKException(
f"unable to find floating IP {floating_ip_id}"
)
error_message = "Error detaching IP {ip} from instance {id}".format(
ip=floating_ip_id, id=server_id
error_message = (
f"Error detaching IP {floating_ip_id} from instance {server_id}"
)
return proxy._json_response(
self.compute.post(
@ -1533,27 +1521,25 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
)
if not nat_network:
raise exceptions.SDKException(
'NAT Destination {nat_destination} was configured'
' but not found on the cloud. Please check your'
' config and your cloud and try again.'.format(
nat_destination=nat_destination
)
f'NAT Destination {nat_destination} was '
f'configured but not found on the cloud. Please '
f'check your config and your cloud and try again.'
)
else:
nat_network = self.get_nat_destination()
if not nat_network:
raise exceptions.SDKException(
'Multiple ports were found for server {server}'
' but none of the networks are a valid NAT'
' destination, so it is impossible to add a'
' floating IP. If you have a network that is a valid'
' destination for NAT and we could not find it,'
' please file a bug. But also configure the'
' nat_destination property of the networks list in'
' your clouds.yaml file. If you do not have a'
' clouds.yaml file, please make one - your setup'
' is complicated.'.format(server=server['id'])
f'Multiple ports were found for server {server["id"]} '
f'but none of the networks are a valid NAT '
f'destination, so it is impossible to add a '
f'floating IP. If you have a network that is a valid '
f'destination for NAT and we could not find it, '
f'please file a bug. But also configure the '
f'nat_destination property of the networks list in '
f'your clouds.yaml file. If you do not have a '
f'clouds.yaml file, please make one - your setup '
f'is complicated.'
)
maybe_ports = []
@ -1562,11 +1548,9 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
maybe_ports.append(maybe_port)
if not maybe_ports:
raise exceptions.SDKException(
'No port on server {server} was found matching'
' your NAT destination network {dest}. Please '
' check your config'.format(
server=server['id'], dest=nat_network['name']
)
f'No port on server {server["id"]} was found matching '
f'your NAT destination network {nat_network["name"]}.'
f'Please check your config'
)
ports = maybe_ports
@ -1914,7 +1898,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
if group is None:
raise exceptions.SDKException(
"Security group %s not found." % name_or_id
f"Security group {name_or_id} not found."
)
if self._use_neutron_secgroups():
@ -2006,7 +1990,7 @@ class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin):
secgroup = self.get_security_group(secgroup_name_or_id)
if not secgroup:
raise exceptions.SDKException(
"Security group %s not found." % secgroup_name_or_id
f"Security group {secgroup_name_or_id} not found."
)
if self._use_neutron_secgroups():

View File

@ -114,11 +114,9 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin):
return False
except exceptions.ConflictException:
raise exceptions.SDKException(
'Attempt to delete container {container} failed. The'
' container is not empty. Please delete the objects'
' inside it before deleting the container'.format(
container=name
)
f'Attempt to delete container {name} failed. The '
f'container is not empty. Please delete the objects '
f'inside it before deleting the container'
)
def update_container(self, name, headers):
@ -142,8 +140,8 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin):
"""
if access not in OBJECT_CONTAINER_ACLS:
raise exceptions.SDKException(
"Invalid container access specified: %s. Must be one of %s"
% (access, list(OBJECT_CONTAINER_ACLS.keys()))
f"Invalid container access specified: {access}. "
f"Must be one of {list(OBJECT_CONTAINER_ACLS.keys())}"
)
return self.object_store.set_container_metadata(
name, read_ACL=OBJECT_CONTAINER_ACLS[access], refresh=refresh
@ -159,7 +157,7 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin):
"""
container = self.get_container(name, skip_cache=True)
if not container:
raise exceptions.SDKException("Container not found: %s" % name)
raise exceptions.SDKException(f"Container not found: {name}")
acl = container.read_ACL or ''
for key, value in OBJECT_CONTAINER_ACLS.items():
# Convert to string for the comparison because swiftclient
@ -168,7 +166,7 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin):
if str(acl) == str(value):
return key
raise exceptions.SDKException(
"Could not determine container access for ACL: %s." % acl
f"Could not determine container access for ACL: {acl}."
)
def get_object_capabilities(self):
@ -423,13 +421,9 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin):
def _get_object_endpoint(self, container, obj=None, query_string=None):
endpoint = urllib.parse.quote(container)
if obj:
endpoint = '{endpoint}/{object}'.format(
endpoint=endpoint, object=urllib.parse.quote(obj)
)
endpoint = f'{endpoint}/{urllib.parse.quote(obj)}'
if query_string:
endpoint = '{endpoint}?{query_string}'.format(
endpoint=endpoint, query_string=query_string
)
endpoint = f'{endpoint}?{query_string}'
return endpoint
def stream_object(
@ -517,9 +511,7 @@ class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin):
keystoneauth1.exceptions.RetriableConnectionFailure,
exceptions.HttpException,
) as e:
error_text = "Exception processing async task: {}".format(
str(e)
)
error_text = f"Exception processing async task: {str(e)}"
if raise_on_error:
self.log.exception(error_text)
raise

View File

@ -159,21 +159,21 @@ def _get_entity(cloud, resource, name_or_id, filters, **kwargs):
# If a uuid is passed short-circuit it calling the
# get_<resource_name>_by_id method
if getattr(cloud, 'use_direct_get', False) and _is_uuid_like(name_or_id):
get_resource = getattr(cloud, 'get_%s_by_id' % resource, None)
get_resource = getattr(cloud, f'get_{resource}_by_id', None)
if get_resource:
return get_resource(name_or_id)
search = (
resource
if callable(resource)
else getattr(cloud, 'search_%ss' % resource, None)
else getattr(cloud, f'search_{resource}s', None)
)
if search:
entities = search(name_or_id, filters, **kwargs)
if entities:
if len(entities) > 1:
raise exceptions.SDKException(
"Multiple matches found for %s" % name_or_id
f"Multiple matches found for {name_or_id}"
)
return entities[0]
return None
@ -213,8 +213,8 @@ def valid_kwargs(*valid_args):
for k in kwargs:
if k not in argspec.args[1:] and k not in valid_args:
raise TypeError(
"{f}() got an unexpected keyword argument "
"'{arg}'".format(f=inspect.stack()[1][3], arg=k)
f"{inspect.stack()[1][3]}() got an unexpected keyword argument "
f"'{k}'"
)
return func(*args, **kwargs)
@ -270,9 +270,7 @@ def safe_dict_min(key, data):
except ValueError:
raise exceptions.SDKException(
"Search for minimum value failed. "
"Value for {key} is not an integer: {value}".format(
key=key, value=d[key]
)
f"Value for {key} is not an integer: {d[key]}"
)
if (min_value is None) or (val < min_value):
min_value = val
@ -303,9 +301,7 @@ def safe_dict_max(key, data):
except ValueError:
raise exceptions.SDKException(
"Search for maximum value failed. "
"Value for {key} is not an integer: {value}".format(
key=key, value=d[key]
)
f"Value for {key} is not an integer: {d[key]}"
)
if (max_value is None) or (val > max_value):
max_value = val
@ -423,7 +419,7 @@ def generate_patches_from_kwargs(operation, **kwargs):
"""
patches = []
for k, v in kwargs.items():
patch = {'op': operation, 'value': v, 'path': '/%s' % k}
patch = {'op': operation, 'value': v, 'path': f'/{k}'}
patches.append(patch)
return sorted(patches)

View File

@ -29,9 +29,7 @@ class OpenStackCloudUnavailableFeature(OpenStackCloudException):
class OpenStackCloudCreateException(OpenStackCloudException):
def __init__(self, resource, resource_id, extra_data=None, **kwargs):
super().__init__(
message="Error creating {resource}: {resource_id}".format(
resource=resource, resource_id=resource_id
),
message=f"Error creating {resource}: {resource_id}",
extra_data=extra_data,
**kwargs,
)

View File

@ -245,7 +245,7 @@ def find_best_address(addresses, public=False, cloud_public=True):
for address in addresses:
try:
for count in utils.iterate_timeout(
5, "Timeout waiting for %s" % address, wait=0.1
5, f"Timeout waiting for {address}", wait=0.1
):
# Return the first one that is reachable
try:
@ -275,10 +275,10 @@ def find_best_address(addresses, public=False, cloud_public=True):
if do_check:
log = _log.setup_logging('openstack')
log.debug(
"The cloud returned multiple addresses %s:, and we could not "
f"The cloud returned multiple addresses {addresses}:, and we could not "
"connect to port 22 on either. That might be what you wanted, "
"but we have no clue what's going on, so we picked the first one "
"%s" % (addresses, addresses[0])
f"{addresses[0]}"
)
return addresses[0]
@ -379,7 +379,7 @@ def get_groups_from_server(cloud, server, server_vars):
if extra_group:
groups.append(extra_group)
groups.append('instance-%s' % server['id'])
groups.append('instance-{}'.format(server['id']))
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
@ -439,11 +439,11 @@ def _get_supplemental_addresses(cloud, server):
if fixed_net is None:
log = _log.setup_logging('openstack')
log.debug(
"The cloud returned floating ip %(fip)s attached"
" to server %(server)s but the fixed ip associated"
" with the floating ip in the neutron listing"
" does not exist in the nova listing. Something"
" is exceptionally broken.",
"The cloud returned floating ip %(fip)s attached "
"to server %(server)s but the fixed ip associated "
"with the floating ip in the neutron listing "
"does not exist in the nova listing. Something "
"is exceptionally broken.",
dict(fip=fip['id'], server=server['id']),
)
else:

View File

@ -540,13 +540,8 @@ class _OpenStackCloudMixin(_services_mixin.ServicesMixin):
raise
except Exception as e:
raise exceptions.SDKException(
"Error getting {service} endpoint on {cloud}:{region}: "
"{error}".format(
service=service_key,
cloud=self.name,
region=self.config.get_region_name(service_key),
error=str(e),
)
f"Error getting {service_key} endpoint on {self.name}:{self.config.get_region_name(service_key)}: "
f"{str(e)}"
)
return endpoint
@ -611,15 +606,14 @@ class _OpenStackCloudMixin(_services_mixin.ServicesMixin):
(service_name, resource_name) = resource_type.split('.')
if not hasattr(self, service_name):
raise exceptions.SDKException(
"service %s is not existing/enabled" % service_name
f"service {service_name} is not existing/enabled"
)
service_proxy = getattr(self, service_name)
try:
resource_type = service_proxy._resource_registry[resource_name]
except KeyError:
raise exceptions.SDKException(
"Resource %s is not known in service %s"
% (resource_name, service_name)
f"Resource {resource_name} is not known in service {service_name}"
)
if name_or_id:
@ -745,6 +739,6 @@ def cleanup_task(graph, service, fn):
fn()
except Exception:
log = _log.setup_logging('openstack.project_cleanup')
log.exception('Error in the %s cleanup function' % service)
log.exception(f'Error in the {service} cleanup function')
finally:
graph.node_done(service)

View File

@ -894,7 +894,7 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
"""
action = CONSOLE_TYPE_ACTION_MAPPING.get(console_type)
if not action:
raise ValueError("Unsupported console type %s" % console_type)
raise ValueError(f"Unsupported console type {console_type}")
body = {action: {'type': console_type}}
resp = self._action(session, body)
return resp.json().get('console')
@ -967,12 +967,12 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
body['host'] = host
if not force:
raise ValueError(
"Live migration on this cloud implies 'force'"
" if the 'host' option has been given and it is not"
" possible to disable. It is recommended to not use 'host'"
" at all on this cloud as it is inherently unsafe, but if"
" it is unavoidable, please supply 'force=True' so that it"
" is clear you understand the risks."
"Live migration on this cloud implies 'force' "
"if the 'host' option has been given and it is not "
"possible to disable. It is recommended to not use 'host' "
"at all on this cloud as it is inherently unsafe, but if "
"it is unavoidable, please supply 'force=True' so that it "
"is clear you understand the risks."
)
self._action(
session,
@ -994,8 +994,8 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
}
if block_migration == 'auto':
raise ValueError(
"Live migration on this cloud does not support 'auto' as"
" a parameter to block_migration, but only True and False."
"Live migration on this cloud does not support 'auto' as "
"a parameter to block_migration, but only True and False."
)
body['block_migration'] = block_migration or False
body['disk_over_commit'] = disk_over_commit or False
@ -1003,12 +1003,12 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
body['host'] = host
if not force:
raise ValueError(
"Live migration on this cloud implies 'force'"
" if the 'host' option has been given and it is not"
" possible to disable. It is recommended to not use 'host'"
" at all on this cloud as it is inherently unsafe, but if"
" it is unavoidable, please supply 'force=True' so that it"
" is clear you understand the risks."
"Live migration on this cloud implies 'force' "
"if the 'host' option has been given and it is not "
"possible to disable. It is recommended to not use 'host' "
"at all on this cloud as it is inherently unsafe, but if "
"it is unavoidable, please supply 'force=True' so that it "
"is clear you understand the risks."
)
self._action(
session,

View File

@ -122,7 +122,7 @@ class ServerGroup(resource.Resource):
)
else:
raise exceptions.ResourceFailure(
"Invalid create method: %s" % self.create_method
f"Invalid create method: {self.create_method}"
)
has_body = (

View File

@ -190,10 +190,8 @@ def from_conf(conf, session=None, service_types=None, **kwargs):
_disable_service(
config_dict,
st,
reason="No section for project '{project}' (service type "
"'{service_type}') was present in the config.".format(
project=project_name, service_type=st
),
reason=f"No section for project '{project_name}' (service type "
f"'{st}') was present in the config.",
)
continue
opt_dict: ty.Dict[str, str] = {}
@ -212,16 +210,10 @@ def from_conf(conf, session=None, service_types=None, **kwargs):
# *that* blow up.
reason = (
"Encountered an exception attempting to process config "
"for project '{project}' (service type "
"'{service_type}'): {exception}".format(
project=project_name, service_type=st, exception=e
)
)
_logger.warning(
"Disabling service '{service_type}': {reason}".format(
service_type=st, reason=reason
)
f"for project '{project_name}' (service type "
f"'{st}'): {e}"
)
_logger.warning(f"Disabling service '{st}': {reason}")
_disable_service(config_dict, st, reason=reason)
continue
# Load them into config_dict under keys prefixed by ${service_type}_
@ -699,9 +691,8 @@ class CloudRegion:
# cert verification
if not verify:
self.log.debug(
'Turning off SSL warnings for %(full_name)s since '
'verify=False',
{'full_name': self.full_name},
f"Turning off SSL warnings for {self.full_name} "
f"since verify=False"
)
requestsexceptions.squelch_warnings(insecure_requests=not verify)
self._keystone_session = self._session_constructor(
@ -765,13 +756,10 @@ class CloudRegion:
and implied_microversion != default_microversion
):
raise exceptions.ConfigException(
"default_microversion of {default_microversion} was given"
" for {service_type}, but api_version looks like a"
" microversion as well. Please set api_version to just the"
" desired major version, or omit default_microversion".format(
default_microversion=default_microversion,
service_type=service_type,
)
f"default_microversion of {default_microversion} was given "
f"for {service_type}, but api_version looks like a "
f"microversion as well. Please set api_version to just the "
f"desired major version, or omit default_microversion"
)
if implied_microversion:
default_microversion = implied_microversion
@ -896,10 +884,10 @@ class CloudRegion:
):
if self.get_default_microversion(service_type):
raise exceptions.ConfigException(
"A default microversion for service {service_type} of"
" {default_microversion} was requested, but the cloud"
" only supports a minimum of {min_microversion} and"
" a maximum of {max_microversion}.".format(
"A default microversion for service {service_type} of "
"{default_microversion} was requested, but the cloud "
"only supports a minimum of {min_microversion} and "
"a maximum of {max_microversion}.".format(
service_type=service_type,
default_microversion=default_microversion,
min_microversion=discover.version_to_string(
@ -912,17 +900,17 @@ class CloudRegion:
)
else:
raise exceptions.ConfigException(
"A default microversion for service {service_type} of"
" {default_microversion} was requested, but the cloud"
" only supports a minimum of {min_microversion} and"
" a maximum of {max_microversion}. The default"
" microversion was set because a microversion"
" formatted version string, '{api_version}', was"
" passed for the api_version of the service. If it"
" was not intended to set a default microversion"
" please remove anything other than an integer major"
" version from the version setting for"
" the service.".format(
"A default microversion for service {service_type} of "
"{default_microversion} was requested, but the cloud "
"only supports a minimum of {min_microversion} and "
"a maximum of {max_microversion}. The default "
"microversion was set because a microversion "
"formatted version string, '{api_version}', was "
"passed for the api_version of the service. If it "
"was not intended to set a default microversion "
"please remove anything other than an integer major "
"version from the version setting for "
"the service.".format(
service_type=service_type,
api_version=self.get_api_version(service_type),
default_microversion=default_microversion,

View File

@ -138,10 +138,10 @@ def _fix_argv(argv):
overlap.extend(old)
if overlap:
raise exceptions.ConfigException(
"The following options were given: '{options}' which contain"
" duplicates except that one has _ and one has -. There is"
" no sane way for us to know what you're doing. Remove the"
" duplicate option and try again".format(options=','.join(overlap))
"The following options were given: '{options}' which contain "
"duplicates except that one has _ and one has -. There is "
"no sane way for us to know what you're doing. Remove the "
"duplicate option and try again".format(options=','.join(overlap))
)
@ -264,12 +264,11 @@ class OpenStackConfig:
self.envvar_key = self._get_envvar('OS_CLOUD_NAME', 'envvars')
if self.envvar_key in self.cloud_config['clouds']:
raise exceptions.ConfigException(
'"{0}" defines a cloud named "{1}", but'
' OS_CLOUD_NAME is also set to "{1}". Please rename'
' either your environment based cloud, or one of your'
' file-based clouds.'.format(
self.config_filename, self.envvar_key
)
f'{self.config_filename!r} defines a cloud named '
f'{self.envvar_key!r}, but OS_CLOUD_NAME is also set to '
f'{self.envvar_key!r}. '
f'Please rename either your environment-based cloud, '
f'or one of your file-based clouds.'
)
self.default_cloud = self._get_envvar('OS_CLOUD')
@ -501,7 +500,7 @@ class OpenStackConfig:
region
):
raise exceptions.ConfigException(
'Invalid region entry at: %s' % region
f'Invalid region entry at: {region}'
)
if 'values' not in region:
region['values'] = {}
@ -564,9 +563,9 @@ class OpenStackConfig:
return region
raise exceptions.ConfigException(
'Region {region_name} is not a valid region name for cloud'
' {cloud}. Valid choices are {region_list}. Please note that'
' region names are case sensitive.'.format(
'Region {region_name} is not a valid region name for cloud '
'{cloud}. Valid choices are {region_list}. Please note that '
'region names are case sensitive.'.format(
region_name=region_name,
region_list=','.join([r['name'] for r in regions]),
cloud=cloud,
@ -638,10 +637,8 @@ class OpenStackConfig:
)
elif status == 'shutdown':
raise exceptions.ConfigException(
"{profile_name} references a cloud that no longer"
" exists: {message}".format(
profile_name=profile_name, message=message
)
f"{profile_name} references a cloud that no longer "
f"exists: {message}"
)
_auth_update(cloud, profile_data)
else:
@ -665,8 +662,8 @@ class OpenStackConfig:
for net in networks:
if value and net[key]:
raise exceptions.ConfigException(
"Duplicate network entries for {key}: {net1} and {net2}."
" Only one network can be flagged with {key}".format(
"Duplicate network entries for {key}: {net1} and {net2}. "
"Only one network can be flagged with {key}".format(
key=key, net1=value['name'], net2=net['name']
)
)
@ -705,9 +702,9 @@ class OpenStackConfig:
external = key.startswith('external')
if key in cloud and 'networks' in cloud:
raise exceptions.ConfigException(
"Both {key} and networks were specified in the config."
" Please remove {key} from the config and use the network"
" list to configure network behavior.".format(key=key)
f"Both {key} and networks were specified in the config. "
f"Please remove {key} from the config and use the network "
f"list to configure network behavior."
)
if key in cloud:
warnings.warn(
@ -906,8 +903,8 @@ class OpenStackConfig:
options, _args = parser.parse_known_args(argv)
plugin_names = loading.get_available_plugin_names()
raise exceptions.ConfigException(
"An invalid auth-type was specified: {auth_type}."
" Valid choices are: {plugin_names}.".format(
"An invalid auth-type was specified: {auth_type}. "
"Valid choices are: {plugin_names}.".format(
auth_type=options.os_auth_type,
plugin_names=",".join(plugin_names),
)

View File

@ -58,12 +58,8 @@ def get_profile(profile_name):
response = requests.get(well_known_url)
if not response.ok:
raise exceptions.ConfigException(
"{profile_name} is a remote profile that could not be fetched:"
" {status_code} {reason}".format(
profile_name=profile_name,
status_code=response.status_code,
reason=response.reason,
)
f"{profile_name} is a remote profile that could not be fetched: "
f"{response.status_code} {response.reason}"
)
vendor_defaults[profile_name] = None
return

View File

@ -69,13 +69,9 @@ class HttpException(SDKException, _rex.HTTPError):
# to be None once we're not mocking Session everywhere.
if not message:
if response is not None:
message = "{name}: {code}".format(
name=self.__class__.__name__, code=response.status_code
)
message = f"{self.__class__.__name__}: {response.status_code}"
else:
message = "{name}: Unknown error".format(
name=self.__class__.__name__
)
message = f"{self.__class__.__name__}: Unknown error"
# Call directly rather than via super to control parameters
SDKException.__init__(self, message=message)
@ -102,18 +98,13 @@ class HttpException(SDKException, _rex.HTTPError):
if not self.url or self.message == 'Error':
return self.message
if self.url:
remote_error = "{source} Error for url: {url}".format(
source=self.source, url=self.url
)
remote_error = f"{self.source} Error for url: {self.url}"
if self.details:
remote_error += ', '
if self.details:
remote_error += str(self.details)
return "{message}: {remote_error}".format(
message=super().__str__(),
remote_error=remote_error,
)
return f"{super().__str__()}: {remote_error}"
class BadRequestException(HttpException):
@ -146,11 +137,7 @@ class MethodNotSupported(SDKException):
except AttributeError:
name = resource.__class__.__name__
message = 'The {} method is not supported for {}.{}'.format(
method,
resource.__module__,
name,
)
message = f'The {method} method is not supported for {resource.__module__}.{name}'
super().__init__(message=message)

View File

@ -28,6 +28,4 @@ class BoolStr(Formatter):
elif "false" == expr:
return False
else:
raise ValueError(
"Unable to deserialize boolean string: %s" % value
)
raise ValueError(f"Unable to deserialize boolean string: {value}")

View File

@ -85,9 +85,7 @@ class DownloadMixin:
return resp
except Exception as e:
raise exceptions.SDKException(
"Unable to download image: %s" % e
)
raise exceptions.SDKException(f"Unable to download image: {e}")
# if we are returning the repsonse object, ensure that it
# has the content-md5 header so that the caller doesn't
# need to jump through the same hoops through which we

View File

@ -464,7 +464,7 @@ class Proxy(proxy.Proxy):
if 'queued' != image.status:
raise exceptions.SDKException(
'Image stage is only possible for images in the queued state. '
'Current state is {status}'.format(status=image.status)
f'Current state is {image.status}'
)
if filename:
@ -694,9 +694,9 @@ class Proxy(proxy.Proxy):
):
if not self._connection.has_service('object-store'):
raise exceptions.SDKException(
"The cloud {cloud} is configured to use tasks for image "
f"The cloud {self._connection.config.name} is configured to use tasks for image "
"upload, but no object-store service is available. "
"Aborting.".format(cloud=self._connection.config.name)
"Aborting."
)
properties = image_kwargs.get('properties', {})
@ -759,9 +759,7 @@ class Proxy(proxy.Proxy):
except exceptions.ResourceFailure as e:
glance_task = self.get_task(glance_task)
raise exceptions.SDKException(
"Image creation failed: {message}".format(
message=e.message
),
f"Image creation failed: {e.message}",
extra_data=glance_task,
)
finally:
@ -1839,9 +1837,7 @@ class Proxy(proxy.Proxy):
return task
name = f"{task.__class__.__name__}:{task.id}"
msg = "Timeout waiting for {name} to transition to {status}".format(
name=name, status=status
)
msg = f"Timeout waiting for {name} to transition to {status}"
for count in utils.iterate_timeout(
timeout=wait, message=msg, wait=interval
@ -1850,9 +1846,7 @@ class Proxy(proxy.Proxy):
if not task:
raise exceptions.ResourceFailure(
"{name} went away while waiting for {status}".format(
name=name, status=status
)
f"{name} went away while waiting for {status}"
)
new_status = task.status
@ -1863,12 +1857,10 @@ class Proxy(proxy.Proxy):
if task.message == _IMAGE_ERROR_396:
task_args = {'input': task.input, 'type': task.type}
task = self.create_task(**task_args)
self.log.debug('Got error 396. Recreating task %s' % task)
self.log.debug(f'Got error 396. Recreating task {task}')
else:
raise exceptions.ResourceFailure(
"{name} transitioned to failure state {status}".format(
name=name, status=new_status
)
f"{name} transitioned to failure state {new_status}"
)
self.log.debug(

View File

@ -24,7 +24,7 @@ class HREFToUUID(format.Formatter):
# Only try to proceed if we have an actual URI.
# Just check that we have a scheme, netloc, and path.
if not all(parts[:3]):
raise ValueError("Unable to convert %s to an ID" % value)
raise ValueError(f"Unable to convert {value} to an ID")
# The UUID will be the last portion of the URI.
return parts.path.split("/")[-1]

View File

@ -155,7 +155,7 @@ class Message(resource.Resource):
# parameter when deleting a message that has been claimed, we
# rebuild the request URI if claim_id is not None.
if self.claim_id:
request.url += '?claim_id=%s' % self.claim_id
request.url += f'?claim_id={self.claim_id}'
response = session.delete(request.url, headers=headers)
self._translate_response(response, has_body=False)

View File

@ -5404,8 +5404,7 @@ class Proxy(proxy.Proxy):
resource.tags
except AttributeError:
raise exceptions.InvalidRequest(
'%s resource does not support tag'
% resource.__class__.__name__
f'{resource.__class__.__name__} resource does not support tag'
)
def get_tags(self, resource):
@ -7104,7 +7103,7 @@ class Proxy(proxy.Proxy):
for port in self.ports(
project_id=project_id, network_id=net.id
):
self.log.debug('Looking at port %s' % port)
self.log.debug(f'Looking at port {port}')
if port.device_owner in [
'network:router_interface',
'network:router_interface_distributed',
@ -7127,7 +7126,7 @@ class Proxy(proxy.Proxy):
if network_has_ports_allocated:
# If some ports are on net - we cannot delete it
continue
self.log.debug('Network %s should be deleted' % net)
self.log.debug(f'Network {net} should be deleted')
# __Check__ if we need to drop network according to filters
network_must_be_deleted = self._service_cleanup_del_res(
self.delete_network,
@ -7167,7 +7166,7 @@ class Proxy(proxy.Proxy):
router=port.device_id, port_id=port.id
)
except exceptions.SDKException:
self.log.error('Cannot delete object %s' % obj)
self.log.error(f'Cannot delete object {obj}')
# router disconnected, drop it
self._service_cleanup_del_res(
self.delete_router,

View File

@ -430,9 +430,7 @@ class Proxy(proxy.Proxy):
metadata[self._connection._OBJECT_SHA256_KEY] = sha256
container_name = self._get_container_name(container=container)
endpoint = '{container}/{name}'.format(
container=container_name, name=name
)
endpoint = f'{container_name}/{name}'
if data is not None:
self.log.debug(
@ -582,9 +580,7 @@ class Proxy(proxy.Proxy):
metadata = self.get_object_metadata(name, container).metadata
except exceptions.NotFoundException:
self._connection.log.debug(
"swift stale check, no object: {container}/{name}".format(
container=container, name=name
)
f"swift stale check, no object: {container}/{name}"
)
return True
@ -608,7 +604,7 @@ class Proxy(proxy.Proxy):
if not up_to_date:
self._connection.log.debug(
"swift checksum mismatch: "
" %(filename)s!=%(container)s/%(name)s",
"%(filename)s!=%(container)s/%(name)s",
{'filename': filename, 'container': container, 'name': name},
)
return True
@ -758,9 +754,7 @@ class Proxy(proxy.Proxy):
offset,
segment_size if segment_size < remaining else remaining,
)
name = '{endpoint}/{index:0>6}'.format(
endpoint=endpoint, index=index
)
name = f'{endpoint}/{index:0>6}'
segments[name] = segment
return segments
@ -878,8 +872,8 @@ class Proxy(proxy.Proxy):
temp_url_key = self.get_temp_url_key(container)
if not temp_url_key:
raise exceptions.SDKException(
'temp_url_key was not given, nor was a temporary url key'
' found for the account or the container.'
'temp_url_key was not given, nor was a temporary url key '
'found for the account or the container.'
)
return temp_url_key
@ -933,13 +927,7 @@ class Proxy(proxy.Proxy):
endpoint = parse.urlparse(self.get_endpoint())
path = '/'.join([endpoint.path, res.name, object_prefix])
data = '{}\n{}\n{}\n{}\n{}'.format(
path,
redirect_url,
max_file_size,
max_upload_count,
expires,
)
data = f'{path}\n{redirect_url}\n{max_file_size}\n{max_upload_count}\n{expires}'
sig = hmac.new(temp_url_key, data.encode(), sha1).hexdigest()
return (expires, sig)
@ -1067,7 +1055,7 @@ class Proxy(proxy.Proxy):
ip_range = ip_range.decode('utf-8')
except UnicodeDecodeError:
raise ValueError('ip_range must be representable as UTF-8')
hmac_parts.insert(0, "ip=%s" % ip_range)
hmac_parts.insert(0, f"ip={ip_range}")
hmac_body = '\n'.join(hmac_parts)
@ -1084,11 +1072,7 @@ class Proxy(proxy.Proxy):
else:
exp = str(expiration)
temp_url = '{path}?temp_url_sig={sig}&temp_url_expires={exp}'.format(
path=path_for_body,
sig=sig,
exp=exp,
)
temp_url = f'{path_for_body}?temp_url_sig={sig}&temp_url_expires={exp}'
if ip_range:
temp_url += f'&temp_url_ip_range={ip_range}'

View File

@ -58,6 +58,6 @@ def parse(env_str):
for param in env:
if param not in SECTIONS:
raise ValueError('environment has wrong section "%s"' % param)
raise ValueError(f'environment has wrong section "{param}"')
return env

View File

@ -50,7 +50,7 @@ def poll_for_events(
"""Continuously poll events and logs for performed action on stack."""
def stop_check_action(a):
stop_status = ('%s_FAILED' % action, '%s_COMPLETE' % action)
stop_status = (f'{action}_FAILED', f'{action}_COMPLETE')
return a in stop_status
def stop_check_no_action(a):

View File

@ -50,13 +50,13 @@ def get_template_contents(
return {}, None
else:
raise exceptions.SDKException(
'Must provide one of template_file,'
' template_url or template_object'
'Must provide one of template_file, template_url or '
'template_object'
)
if not tpl:
raise exceptions.SDKException(
'Could not fetch template from %s' % template_url
f'Could not fetch template from {template_url}'
)
try:
@ -65,8 +65,7 @@ def get_template_contents(
template = template_format.parse(tpl)
except ValueError as e:
raise exceptions.SDKException(
'Error parsing template %(url)s %(error)s'
% {'url': template_url, 'error': e}
f'Error parsing template {template_url} {e}'
)
tmpl_base_url = utils.base_url_for_url(template_url)

View File

@ -40,7 +40,7 @@ def read_url_content(url):
# TODO(mordred) Use requests
content = request.urlopen(url).read()
except error.URLError:
raise exceptions.SDKException('Could not fetch contents for %s' % url)
raise exceptions.SDKException(f'Could not fetch contents for {url}')
if content:
try:

View File

@ -573,7 +573,7 @@ class Proxy(proxy.Proxy):
)
except Exception as e:
raise exceptions.SDKException(
"Error in processing template files: %s" % str(e)
f"Error in processing template files: {str(e)}"
)
def _get_cleanup_dependencies(self):

View File

@ -133,16 +133,11 @@ class Stack(resource.Resource):
# we need to use other endpoint for update preview.
base_path = None
if self.name and self.id:
base_path = '/stacks/{stack_name}/{stack_id}'.format(
stack_name=self.name,
stack_id=self.id,
)
base_path = f'/stacks/{self.name}/{self.id}'
elif self.name or self.id:
# We have only one of name/id. Do not try to build a stacks/NAME/ID
# path
base_path = '/stacks/{stack_identity}'.format(
stack_identity=self.name or self.id
)
base_path = f'/stacks/{self.name or self.id}'
request = self._prepare_request(
prepend_key=False, requires_id=False, base_path=base_path
)
@ -248,9 +243,7 @@ class Stack(resource.Resource):
self._translate_response(response, **kwargs)
if self and self.status in ['DELETE_COMPLETE', 'ADOPT_COMPLETE']:
raise exceptions.NotFoundException(
"No stack found for %s" % self.id
)
raise exceptions.NotFoundException(f"No stack found for {self.id}")
return self
@classmethod

View File

@ -51,13 +51,12 @@ def _check_resource(strict=False):
and actual is not None
and not isinstance(actual, resource.Resource)
):
raise ValueError("A %s must be passed" % expected.__name__)
raise ValueError(f"A {expected.__name__} must be passed")
elif isinstance(actual, resource.Resource) and not isinstance(
actual, expected
):
raise ValueError(
"Expected %s but received %s"
% (expected.__name__, actual.__class__.__name__)
f"Expected {expected.__name__} but received {actual.__class__.__name__}"
)
return method(self, expected, actual, *args, **kwargs)
@ -340,16 +339,14 @@ class Proxy(adapter.Adapter):
with self._statsd_client.pipeline() as pipe:
if response is not None:
duration = int(response.elapsed.total_seconds() * 1000)
metric_name = '{}.{}'.format(
key, str(response.status_code)
)
metric_name = f'{key}.{str(response.status_code)}'
pipe.timing(metric_name, duration)
pipe.incr(metric_name)
if duration > 1000:
pipe.incr('%s.over_1000' % key)
pipe.incr(f'{key}.over_1000')
elif exc is not None:
pipe.incr('%s.failed' % key)
pipe.incr('%s.attempted' % key)
pipe.incr(f'{key}.failed')
pipe.incr(f'{key}.attempted')
except Exception:
# We do not want errors in metric reporting ever break client
self.log.exception("Exception reporting metrics")
@ -362,8 +359,8 @@ class Proxy(adapter.Adapter):
if response is not None and not method:
method = response.request.method
parsed_url = urlparse(url)
endpoint = "{}://{}{}".format(
parsed_url.scheme, parsed_url.netloc, parsed_url.path
endpoint = (
f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}"
)
if response is not None:
labels = dict(
@ -713,9 +710,7 @@ class Proxy(adapter.Adapter):
requires_id=requires_id,
base_path=base_path,
skip_cache=skip_cache,
error_message="No {resource_type} found for {value}".format(
resource_type=resource_type.__name__, value=value
),
error_message=f"No {resource_type.__name__} found for {value}",
)
def _list(
@ -875,8 +870,8 @@ class Proxy(adapter.Adapter):
# There are filters set, but we can't get required
# attribute, so skip the resource
self.log.debug(
'Requested cleanup attribute %s is not '
'available on the resource' % k
f'Requested cleanup attribute {k} is not '
'available on the resource'
)
part_cond.append(False)
except Exception:

View File

@ -221,8 +221,9 @@ class _BaseComponent(abc.ABC):
if value and deprecated:
warnings.warn(
"The field %r has been deprecated. %s"
% (self.name, deprecation_reason or "Avoid usage."),
"The field {!r} has been deprecated. {}".format(
self.name, deprecation_reason or "Avoid usage."
),
os_warnings.RemovedFieldWarning,
)
return value
@ -386,8 +387,9 @@ class QueryParameters:
else:
if not allow_unknown_params:
raise exceptions.InvalidResourceQuery(
message="Invalid query params: %s"
% ",".join(invalid_keys),
message="Invalid query params: {}".format(
",".join(invalid_keys)
),
extra_data=invalid_keys,
)
else:
@ -620,9 +622,7 @@ class Resource(dict):
]
args = ", ".join(pairs)
return "{}.{}({})".format(
self.__module__, self.__class__.__name__, args
)
return f"{self.__module__}.{self.__class__.__name__}({args})"
def __eq__(self, comparand):
"""Return True if another resource has the same contents"""
@ -687,9 +687,8 @@ class Resource(dict):
for attr, component in self._attributes_iterator(tuple([Body])):
if component.name == name:
warnings.warn(
"Access to '%s[%s]' is deprecated. "
"Use '%s.%s' attribute instead"
% (self.__class__, name, self.__class__, attr),
f"Access to '{self.__class__}[{name}]' is deprecated. "
f"Use '{self.__class__}.{attr}' attribute instead",
os_warnings.LegacyAPIWarning,
)
return getattr(self, attr)
@ -710,13 +709,9 @@ class Resource(dict):
self._unknown_attrs_in_body[name] = value
return
raise KeyError(
"{name} is not found. {module}.{cls} objects do not support"
" setting arbitrary keys through the"
" dict interface.".format(
module=self.__module__,
cls=self.__class__.__name__,
name=name,
)
f"{name} is not found. "
f"{self.__module__}.{self.__class__.__name__} objects do not "
f"support setting arbitrary keys through the dict interface."
)
def _attributes(
@ -1340,9 +1335,9 @@ class Resource(dict):
if isinstance(session, adapter.Adapter):
return session
raise ValueError(
"The session argument to Resource methods requires either an"
" instance of an openstack.proxy.Proxy object or at the very least"
" a raw keystoneauth1.adapter.Adapter."
"The session argument to Resource methods requires either an "
"instance of an openstack.proxy.Proxy object or at the very least "
"a raw keystoneauth1.adapter.Adapter."
)
@classmethod
@ -1373,7 +1368,7 @@ class Resource(dict):
'delete',
'patch',
}:
raise ValueError('Invalid action: %s' % action)
raise ValueError(f'Invalid action: {action}')
if session.default_microversion:
return session.default_microversion
@ -1414,9 +1409,9 @@ class Resource(dict):
if actual is None:
message = (
"API version %s is required, but the default "
f"API version {expected} is required, but the default "
"version will be used."
) % expected
)
_raise(message)
actual_n = discover.normalize_version_number(actual)
@ -1424,9 +1419,9 @@ class Resource(dict):
expected_n = discover.normalize_version_number(expected)
if actual_n < expected_n:
message = (
"API version %(expected)s is required, but %(actual)s "
f"API version {expected} is required, but {actual} "
"will be used."
) % {'expected': expected, 'actual': actual}
)
_raise(message)
if maximum is not None:
maximum_n = discover.normalize_version_number(maximum)
@ -1514,7 +1509,7 @@ class Resource(dict):
)
else:
raise exceptions.ResourceFailure(
"Invalid create method: %s" % self.create_method
f"Invalid create method: {self.create_method}"
)
has_body = (
@ -1576,7 +1571,7 @@ class Resource(dict):
and isinstance(data, list)
and all([isinstance(x, dict) for x in data])
):
raise ValueError('Invalid data passed: %s' % data)
raise ValueError(f'Invalid data passed: {data}')
session = cls._get_session(session)
if microversion is None:
@ -1592,7 +1587,7 @@ class Resource(dict):
method = session.post
else:
raise exceptions.ResourceFailure(
"Invalid create method: %s" % cls.create_method
f"Invalid create method: {cls.create_method}"
)
_body: ty.List[ty.Any] = []
@ -1831,7 +1826,7 @@ class Resource(dict):
call = getattr(session, method.lower())
except AttributeError:
raise exceptions.ResourceFailure(
"Invalid commit method: %s" % method
f"Invalid commit method: {method}"
)
response = call(
@ -1858,7 +1853,7 @@ class Resource(dict):
parts = path.lstrip('/').split('/', 1)
field = parts[0]
except (KeyError, IndexError):
raise ValueError("Malformed or missing path in %s" % item)
raise ValueError(f"Malformed or missing path in {item}")
try:
component = getattr(self.__class__, field)
@ -1870,7 +1865,7 @@ class Resource(dict):
if len(parts) > 1:
new_path = f'/{server_field}/{parts[1]}'
else:
new_path = '/%s' % server_field
new_path = f'/{server_field}'
converted.append(dict(item, path=new_path))
return converted
@ -2435,9 +2430,7 @@ def wait_for_status(
failures = [f.lower() for f in failures]
name = f"{resource.__class__.__name__}:{resource.id}"
msg = "Timeout waiting for {name} to transition to {status}".format(
name=name, status=status
)
msg = f"Timeout waiting for {name} to transition to {status}"
for count in utils.iterate_timeout(
timeout=wait, message=msg, wait=interval
@ -2445,9 +2438,7 @@ def wait_for_status(
resource = resource.fetch(session, skip_cache=True)
if not resource:
raise exceptions.ResourceFailure(
"{name} went away while waiting for {status}".format(
name=name, status=status
)
f"{name} went away while waiting for {status}"
)
new_status = getattr(resource, attribute)
@ -2456,9 +2447,7 @@ def wait_for_status(
return resource
elif normalized_status in failures:
raise exceptions.ResourceFailure(
"{name} transitioned to failure state {status}".format(
name=name, status=new_status
)
f"{name} transitioned to failure state {new_status}"
)
LOG.debug(
@ -2494,9 +2483,7 @@ def wait_for_delete(session, resource, interval, wait, callback=None):
orig_resource = resource
for count in utils.iterate_timeout(
timeout=wait,
message="Timeout waiting for {res}:{id} to delete".format(
res=resource.__class__.__name__, id=resource.id
),
message=f"Timeout waiting for {resource.__class__.__name__}:{resource.id} to delete",
wait=interval,
):
try:

View File

@ -224,9 +224,7 @@ class ServiceDescription:
if not data and instance._strict_proxies:
raise exceptions.ServiceDiscoveryException(
"Failed to create a working proxy for service "
"{service_type}: No endpoint data found.".format(
service_type=self.service_type
)
f"{self.service_type}: No endpoint data found."
)
# If we've gotten here with a proxy object it means we have
@ -279,8 +277,8 @@ class ServiceDescription:
)
else:
version_kwargs['min_version'] = str(supported_versions[0])
version_kwargs['max_version'] = '{version}.latest'.format(
version=str(supported_versions[-1])
version_kwargs['max_version'] = (
f'{str(supported_versions[-1])}.latest'
)
temp_adapter = config.get_session_client(
@ -291,21 +289,15 @@ class ServiceDescription:
region_name = instance.config.get_region_name(self.service_type)
if version_kwargs:
raise exceptions.NotSupported(
"The {service_type} service for {cloud}:{region_name}"
" exists but does not have any supported versions.".format(
service_type=self.service_type,
cloud=instance.name,
region_name=region_name,
)
f"The {self.service_type} service for "
f"{instance.name}:{region_name} exists but does not have "
f"any supported versions."
)
else:
raise exceptions.NotSupported(
"The {service_type} service for {cloud}:{region_name}"
" exists but no version was discoverable.".format(
service_type=self.service_type,
cloud=instance.name,
region_name=region_name,
)
f"The {self.service_type} service for "
f"{instance.name}:{region_name} exists but no version "
f"was discoverable."
)
proxy_class = self.supported_versions.get(str(found_version[0]))
if proxy_class:
@ -322,11 +314,9 @@ class ServiceDescription:
# service catalog that also doesn't have any useful
# version discovery?
warnings.warn(
"Service {service_type} has no discoverable version. "
f"Service {self.service_type} has no discoverable version. "
"The resulting Proxy object will only have direct "
"passthrough REST capabilities.".format(
service_type=self.service_type
),
"passthrough REST capabilities.",
category=os_warnings.UnsupportedServiceVersion,
)
return temp_adapter

View File

@ -1094,7 +1094,7 @@ class Proxy(proxy.Proxy):
keys_failed_to_delete.append(key)
if keys_failed_to_delete:
raise exceptions.SDKException(
"Some keys failed to be deleted %s" % keys_failed_to_delete
f"Some keys failed to be deleted {keys_failed_to_delete}"
)
def resource_locks(self, **query):

View File

@ -103,10 +103,7 @@ def generate_fake_resource(
base_attrs[name] = [uuid.uuid4().hex]
else:
# Everything else
msg = "Fake value for {}.{} can not be generated".format(
resource_type.__name__,
name,
)
msg = f"Fake value for {resource_type.__name__}.{name} can not be generated"
raise NotImplementedError(msg)
elif issubclass(target_type, list) and value.list_type is None:
# List of str
@ -130,10 +127,7 @@ def generate_fake_resource(
base_attrs[name] = dict()
else:
# Everything else
msg = "Fake value for {}.{} can not be generated".format(
resource_type.__name__,
name,
)
msg = f"Fake value for {resource_type.__name__}.{name} can not be generated"
raise NotImplementedError(msg)
if isinstance(value, resource.URI):

View File

@ -132,11 +132,7 @@ class TestCase(base.BaseTestCase):
if not whole[key] and part[key]:
missing_keys.append(key)
if missing_keys:
self.fail(
"Keys {} are in {} but not in {}".format(
missing_keys, part, whole
)
)
self.fail(f"Keys {missing_keys} are in {part} but not in {whole}")
wrong_values = [
(key, part[key], whole[key])
for key in part
@ -144,8 +140,10 @@ class TestCase(base.BaseTestCase):
]
if wrong_values:
self.fail(
"Mismatched values: %s"
% ", ".join(
"for %s got %s and %s" % tpl for tpl in wrong_values
"Mismatched values: {}".format(
", ".join(
"for {} got {} and {}".format(*tpl)
for tpl in wrong_values
)
)
)

View File

@ -30,9 +30,7 @@ FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddd'
CHOCOLATE_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8ddde'
STRAWBERRY_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddf'
COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1'
ORCHESTRATION_ENDPOINT = 'https://orchestration.example.com/v1/{p}'.format(
p=PROJECT_ID
)
ORCHESTRATION_ENDPOINT = f'https://orchestration.example.com/v1/{PROJECT_ID}'
NO_MD5 = '93b885adfe0da089cdf634904fd59f71'
NO_SHA256 = '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d'
FAKE_PUBLIC_KEY = (
@ -53,15 +51,11 @@ def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24):
'id': flavor_id,
'links': [
{
'href': '{endpoint}/flavors/{id}'.format(
endpoint=COMPUTE_ENDPOINT, id=flavor_id
),
'href': f'{COMPUTE_ENDPOINT}/flavors/{flavor_id}',
'rel': 'self',
},
{
'href': '{endpoint}/flavors/{id}'.format(
endpoint=COMPUTE_ENDPOINT, id=flavor_id
),
'href': f'{COMPUTE_ENDPOINT}/flavors/{flavor_id}',
'rel': 'bookmark',
},
],
@ -231,9 +225,7 @@ def make_fake_stack_event(
"rel": "resource",
},
{
"href": "{endpoint}/stacks/{name}/{id}".format(
endpoint=ORCHESTRATION_ENDPOINT, name=name, id=id
),
"href": f"{ORCHESTRATION_ENDPOINT}/stacks/{name}/{id}",
"rel": "stack",
},
],
@ -288,9 +280,7 @@ def make_fake_image(
'created_at': '2016-02-10T05:03:11Z',
'owner_specified.openstack.md5': md5 or NO_MD5,
'owner_specified.openstack.sha256': sha256 or NO_SHA256,
'owner_specified.openstack.object': 'images/{name}'.format(
name=image_name
),
'owner_specified.openstack.object': f'images/{image_name}',
'protected': False,
}

View File

@ -41,10 +41,10 @@ class TestBareMetalDriverDetails(base.BaseBaremetalTest):
self.assertEqual('fake-hardware', driver.name)
for iface in ('boot', 'deploy', 'management', 'power'):
self.assertIn(
'fake', getattr(driver, 'enabled_%s_interfaces' % iface)
'fake', getattr(driver, f'enabled_{iface}_interfaces')
)
self.assertEqual(
'fake', getattr(driver, 'default_%s_interface' % iface)
'fake', getattr(driver, f'default_{iface}_interface')
)
self.assertNotEqual([], driver.hosts)
@ -53,8 +53,8 @@ class TestBareMetalDriverDetails(base.BaseBaremetalTest):
driver = [d for d in drivers if d.name == 'fake-hardware'][0]
for iface in ('boot', 'deploy', 'management', 'power'):
self.assertIn(
'fake', getattr(driver, 'enabled_%s_interfaces' % iface)
'fake', getattr(driver, f'enabled_{iface}_interfaces')
)
self.assertEqual(
'fake', getattr(driver, 'default_%s_interface' % iface)
'fake', getattr(driver, f'default_{iface}_interface')
)

View File

@ -220,11 +220,7 @@ class BaseFunctionalTest(base.TestCase):
:returns: True if the service exists, otherwise False.
"""
if not self.conn.has_service(service_type):
self.skipTest(
'Service {service_type} not found in cloud'.format(
service_type=service_type
)
)
self.skipTest(f'Service {service_type} not found in cloud')
if not min_microversion:
return
@ -252,9 +248,9 @@ class BaseFunctionalTest(base.TestCase):
# Globally unique names can only rely on some form of uuid
# unix_t is also used to easier determine orphans when running real
# functional tests on a real cloud
return (prefix if prefix else '') + "{time}-{uuid}".format(
time=int(time.time()), uuid=uuid.uuid4().hex
)
return (
prefix if prefix else ''
) + f"{int(time.time())}-{uuid.uuid4().hex}"
def create_temporary_project(self):
"""Create a new temporary project.

View File

@ -58,12 +58,12 @@ class TestClusterTemplate(base.BaseFunctionalTest):
'-N',
'',
'-f',
'%s/id_rsa_sdk' % self.ssh_directory,
f'{self.ssh_directory}/id_rsa_sdk',
]
)
# add keypair to nova
with open('%s/id_rsa_sdk.pub' % self.ssh_directory) as f:
with open(f'{self.ssh_directory}/id_rsa_sdk.pub') as f:
key_content = f.read()
self.user_cloud.create_keypair('testkey', key_content)

View File

@ -339,9 +339,7 @@ class TestCompute(base.BaseFunctionalTest):
# consistency!
for count in utils.iterate_timeout(
60,
'Timeout waiting for volume {volume_id} to detach'.format(
volume_id=volume_id
),
f'Timeout waiting for volume {volume_id} to detach',
):
volume = self.user_cloud.get_volume(volume_id)
if volume.status in (

View File

@ -35,9 +35,9 @@ class TestRecordset(base.BaseFunctionalTest):
'''Test DNS recordsets functionality'''
sub = ''.join(random.choice(string.ascii_lowercase) for _ in range(6))
zone = '%s.example2.net.' % sub
zone = f'{sub}.example2.net.'
email = 'test@example2.net'
name = 'www.%s' % zone
name = f'www.{zone}'
type_ = 'a'
description = 'Test recordset'
ttl = 3600
@ -96,9 +96,9 @@ class TestRecordset(base.BaseFunctionalTest):
'''Test DNS recordsets functionality'''
sub = ''.join(random.choice(string.ascii_lowercase) for _ in range(6))
zone = '%s.example2.net.' % sub
zone = f'{sub}.example2.net.'
email = 'test@example2.net'
name = 'www.%s' % zone
name = f'www.{zone}'
type_ = 'a'
description = 'Test recordset'
ttl = 3600

View File

@ -54,9 +54,7 @@ class TestZone(base.BaseFunctionalTest):
self.assertEqual(
current_ttl + 1,
updated_zone_ttl,
'Failed, updated TTL value is:{} instead of expected:{}'.format(
updated_zone_ttl, current_ttl + 1
),
f'Failed, updated TTL value is:{updated_zone_ttl} instead of expected:{current_ttl + 1}',
)
def test_create_rs(self):

View File

@ -310,7 +310,7 @@ class TestNodeSetProvisionState(base.TestCase):
result = self.node.set_provision_state(self.session, 'active')
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'active'},
headers=mock.ANY,
microversion=None,
@ -321,7 +321,7 @@ class TestNodeSetProvisionState(base.TestCase):
result = self.node.set_provision_state(self.session, 'manage')
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'manage'},
headers=mock.ANY,
microversion='1.4',
@ -334,7 +334,7 @@ class TestNodeSetProvisionState(base.TestCase):
)
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'active', 'configdrive': 'abcd'},
headers=mock.ANY,
microversion=None,
@ -348,7 +348,7 @@ class TestNodeSetProvisionState(base.TestCase):
)
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'active', 'configdrive': config_drive.decode()},
headers=mock.ANY,
microversion=None,
@ -361,7 +361,7 @@ class TestNodeSetProvisionState(base.TestCase):
)
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'rebuild', 'configdrive': 'abcd'},
headers=mock.ANY,
microversion='1.35',
@ -376,7 +376,7 @@ class TestNodeSetProvisionState(base.TestCase):
)
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': target, 'configdrive': {'user_data': 'abcd'}},
headers=mock.ANY,
microversion='1.56',
@ -391,7 +391,7 @@ class TestNodeSetProvisionState(base.TestCase):
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'active', 'deploy_steps': deploy_steps},
headers=mock.ANY,
microversion='1.69',
@ -406,7 +406,7 @@ class TestNodeSetProvisionState(base.TestCase):
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'rebuild', 'deploy_steps': deploy_steps},
headers=mock.ANY,
microversion='1.69',
@ -418,7 +418,7 @@ class TestNodeSetProvisionState(base.TestCase):
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'unhold'},
headers=mock.ANY,
microversion='1.85',
@ -433,7 +433,7 @@ class TestNodeSetProvisionState(base.TestCase):
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'service', 'service_steps': service_steps},
headers=mock.ANY,
microversion='1.87',
@ -448,7 +448,7 @@ class TestNodeSetProvisionState(base.TestCase):
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'clean', 'runbook': runbook},
headers=mock.ANY,
microversion='1.92',
@ -463,7 +463,7 @@ class TestNodeSetProvisionState(base.TestCase):
self.assertIs(result, self.node)
self.session.put.assert_called_once_with(
'nodes/%s/states/provision' % self.node.id,
f'nodes/{self.node.id}/states/provision',
json={'target': 'service', 'runbook': runbook},
headers=mock.ANY,
microversion='1.92',
@ -602,7 +602,7 @@ class TestNodeVif(base.TestCase):
def test_attach_vif(self):
self.assertIsNone(self.node.attach_vif(self.session, self.vif_id))
self.session.post.assert_called_once_with(
'nodes/%s/vifs' % self.node.id,
f'nodes/{self.node.id}/vifs',
json={'id': self.vif_id},
headers=mock.ANY,
microversion='1.67',
@ -616,7 +616,7 @@ class TestNodeVif(base.TestCase):
)
)
self.session.post.assert_called_once_with(
'nodes/%s/vifs' % self.node.id,
f'nodes/{self.node.id}/vifs',
json={'id': self.vif_id},
headers=mock.ANY,
microversion='1.67',
@ -630,7 +630,7 @@ class TestNodeVif(base.TestCase):
)
)
self.session.post.assert_called_once_with(
'nodes/%s/vifs' % self.node.id,
f'nodes/{self.node.id}/vifs',
json={'id': self.vif_id, 'port_uuid': self.vif_port_uuid},
headers=mock.ANY,
microversion='1.67',
@ -646,7 +646,7 @@ class TestNodeVif(base.TestCase):
)
)
self.session.post.assert_called_once_with(
'nodes/%s/vifs' % self.node.id,
f'nodes/{self.node.id}/vifs',
json={
'id': self.vif_id,
'portgroup_uuid': self.vif_portgroup_uuid,
@ -695,7 +695,7 @@ class TestNodeVif(base.TestCase):
res = self.node.list_vifs(self.session)
self.assertEqual(['1234', '5678'], res)
self.session.get.assert_called_once_with(
'nodes/%s/vifs' % self.node.id,
f'nodes/{self.node.id}/vifs',
headers=mock.ANY,
microversion='1.67',
)
@ -849,7 +849,7 @@ class TestNodeInjectNMI(base.TestCase):
def test_inject_nmi(self):
self.node.inject_nmi(self.session)
self.session.put.assert_called_once_with(
'nodes/%s/management/inject_nmi' % FAKE['uuid'],
'nodes/{}/management/inject_nmi'.format(FAKE['uuid']),
json={},
headers=mock.ANY,
microversion='1.29',
@ -878,7 +878,7 @@ class TestNodeSetPowerState(base.TestCase):
def test_power_on(self):
self.node.set_power_state(self.session, 'power on')
self.session.put.assert_called_once_with(
'nodes/%s/states/power' % FAKE['uuid'],
'nodes/{}/states/power'.format(FAKE['uuid']),
json={'target': 'power on'},
headers=mock.ANY,
microversion=None,
@ -888,7 +888,7 @@ class TestNodeSetPowerState(base.TestCase):
def test_soft_power_on(self):
self.node.set_power_state(self.session, 'soft power off')
self.session.put.assert_called_once_with(
'nodes/%s/states/power' % FAKE['uuid'],
'nodes/{}/states/power'.format(FAKE['uuid']),
json={'target': 'soft power off'},
headers=mock.ANY,
microversion='1.27',
@ -912,7 +912,7 @@ class TestNodeMaintenance(base.TestCase):
def test_set(self):
self.node.set_maintenance(self.session)
self.session.put.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json={'reason': None},
headers=mock.ANY,
microversion=mock.ANY,
@ -921,7 +921,7 @@ class TestNodeMaintenance(base.TestCase):
def test_set_with_reason(self):
self.node.set_maintenance(self.session, 'No work on Monday')
self.session.put.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json={'reason': 'No work on Monday'},
headers=mock.ANY,
microversion=mock.ANY,
@ -930,7 +930,7 @@ class TestNodeMaintenance(base.TestCase):
def test_unset(self):
self.node.unset_maintenance(self.session)
self.session.delete.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json=None,
headers=mock.ANY,
microversion=mock.ANY,
@ -940,7 +940,7 @@ class TestNodeMaintenance(base.TestCase):
self.node.is_maintenance = True
self.node.commit(self.session)
self.session.put.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json={'reason': None},
headers=mock.ANY,
microversion=mock.ANY,
@ -953,7 +953,7 @@ class TestNodeMaintenance(base.TestCase):
self.node.maintenance_reason = 'No work on Monday'
self.node.commit(self.session)
self.session.put.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json={'reason': 'No work on Monday'},
headers=mock.ANY,
microversion=mock.ANY,
@ -965,14 +965,14 @@ class TestNodeMaintenance(base.TestCase):
self.node.name = 'lazy-3000'
self.node.commit(self.session)
self.session.put.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json={'reason': None},
headers=mock.ANY,
microversion=mock.ANY,
)
self.session.patch.assert_called_once_with(
'nodes/%s' % self.node.id,
f'nodes/{self.node.id}',
json=[{'path': '/name', 'op': 'replace', 'value': 'lazy-3000'}],
headers=mock.ANY,
microversion=mock.ANY,
@ -984,14 +984,14 @@ class TestNodeMaintenance(base.TestCase):
self.node.name = 'lazy-3000'
self.node.commit(self.session)
self.session.put.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json={'reason': 'No work on Monday'},
headers=mock.ANY,
microversion=mock.ANY,
)
self.session.patch.assert_called_once_with(
'nodes/%s' % self.node.id,
f'nodes/{self.node.id}',
json=[{'path': '/name', 'op': 'replace', 'value': 'lazy-3000'}],
headers=mock.ANY,
microversion=mock.ANY,
@ -1009,7 +1009,7 @@ class TestNodeMaintenance(base.TestCase):
self.node.commit(self.session)
self.session.put.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json={'reason': 'No work on Monday'},
headers=mock.ANY,
microversion=mock.ANY,
@ -1020,7 +1020,7 @@ class TestNodeMaintenance(base.TestCase):
self.assertIsNone(self.node.maintenance_reason)
self.session.delete.assert_called_once_with(
'nodes/%s/maintenance' % self.node.id,
f'nodes/{self.node.id}/maintenance',
json=None,
headers=mock.ANY,
microversion=mock.ANY,
@ -1040,7 +1040,7 @@ class TestNodeBootDevice(base.TestCase):
def test_get_boot_device(self):
self.node.get_boot_device(self.session)
self.session.get.assert_called_once_with(
'nodes/%s/management/boot_device' % self.node.id,
f'nodes/{self.node.id}/management/boot_device',
headers=mock.ANY,
microversion=mock.ANY,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1049,7 +1049,7 @@ class TestNodeBootDevice(base.TestCase):
def test_set_boot_device(self):
self.node.set_boot_device(self.session, 'pxe', persistent=False)
self.session.put.assert_called_once_with(
'nodes/%s/management/boot_device' % self.node.id,
f'nodes/{self.node.id}/management/boot_device',
json={'boot_device': 'pxe', 'persistent': False},
headers=mock.ANY,
microversion=mock.ANY,
@ -1059,7 +1059,7 @@ class TestNodeBootDevice(base.TestCase):
def test_get_supported_boot_devices(self):
self.node.get_supported_boot_devices(self.session)
self.session.get.assert_called_once_with(
'nodes/%s/management/boot_device/supported' % self.node.id,
f'nodes/{self.node.id}/management/boot_device/supported',
headers=mock.ANY,
microversion=mock.ANY,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1080,7 +1080,7 @@ class TestNodeSetBootMode(base.TestCase):
def test_node_set_boot_mode(self):
self.node.set_boot_mode(self.session, 'uefi')
self.session.put.assert_called_once_with(
'nodes/%s/states/boot_mode' % self.node.id,
f'nodes/{self.node.id}/states/boot_mode',
json={'target': 'uefi'},
headers=mock.ANY,
microversion=mock.ANY,
@ -1107,7 +1107,7 @@ class TestNodeSetSecureBoot(base.TestCase):
def test_node_set_secure_boot(self):
self.node.set_secure_boot(self.session, True)
self.session.put.assert_called_once_with(
'nodes/%s/states/secure_boot' % self.node.id,
f'nodes/{self.node.id}/states/secure_boot',
json={'target': True},
headers=mock.ANY,
microversion=mock.ANY,
@ -1167,7 +1167,7 @@ class TestNodeTraits(base.TestCase):
traits = ['CUSTOM_FAKE', 'CUSTOM_REAL', 'CUSTOM_MISSING']
self.node.set_traits(self.session, traits)
self.session.put.assert_called_once_with(
'nodes/%s/traits' % self.node.id,
f'nodes/{self.node.id}/traits',
json={'traits': ['CUSTOM_FAKE', 'CUSTOM_REAL', 'CUSTOM_MISSING']},
headers=mock.ANY,
microversion='1.37',
@ -1264,7 +1264,7 @@ class TestNodePassthru:
def test_get_passthru(self):
self.node.call_vendor_passthru(self.session, "GET", "test_method")
self.session.get.assert_called_once_with(
'nodes/%s/vendor_passthru?method=test_method' % self.node.id,
f'nodes/{self.node.id}/vendor_passthru?method=test_method',
headers=mock.ANY,
microversion='1.37',
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1273,7 +1273,7 @@ class TestNodePassthru:
def test_post_passthru(self):
self.node.call_vendor_passthru(self.session, "POST", "test_method")
self.session.post.assert_called_once_with(
'nodes/%s/vendor_passthru?method=test_method' % self.node.id,
f'nodes/{self.node.id}/vendor_passthru?method=test_method',
headers=mock.ANY,
microversion='1.37',
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1282,7 +1282,7 @@ class TestNodePassthru:
def test_put_passthru(self):
self.node.call_vendor_passthru(self.session, "PUT", "test_method")
self.session.put.assert_called_once_with(
'nodes/%s/vendor_passthru?method=test_method' % self.node.id,
f'nodes/{self.node.id}/vendor_passthru?method=test_method',
headers=mock.ANY,
microversion='1.37',
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1291,7 +1291,7 @@ class TestNodePassthru:
def test_delete_passthru(self):
self.node.call_vendor_passthru(self.session, "DELETE", "test_method")
self.session.delete.assert_called_once_with(
'nodes/%s/vendor_passthru?method=test_method' % self.node.id,
f'nodes/{self.node.id}/vendor_passthru?method=test_method',
headers=mock.ANY,
microversion='1.37',
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1300,7 +1300,7 @@ class TestNodePassthru:
def test_list_passthru(self):
self.node.list_vendor_passthru(self.session)
self.session.get.assert_called_once_with(
'nodes/%s/vendor_passthru/methods' % self.node.id,
f'nodes/{self.node.id}/vendor_passthru/methods',
headers=mock.ANY,
microversion='1.37',
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1321,7 +1321,7 @@ class TestNodeConsole(base.TestCase):
def test_get_console(self):
self.node.get_console(self.session)
self.session.get.assert_called_once_with(
'nodes/%s/states/console' % self.node.id,
f'nodes/{self.node.id}/states/console',
headers=mock.ANY,
microversion=mock.ANY,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1330,7 +1330,7 @@ class TestNodeConsole(base.TestCase):
def test_set_console_mode(self):
self.node.set_console_mode(self.session, True)
self.session.put.assert_called_once_with(
'nodes/%s/states/console' % self.node.id,
f'nodes/{self.node.id}/states/console',
json={'enabled': True},
headers=mock.ANY,
microversion=mock.ANY,
@ -1382,7 +1382,7 @@ class TestNodeInventory(base.TestCase):
self.assertEqual(node_inventory, res)
self.session.get.assert_called_once_with(
'nodes/%s/inventory' % self.node.id,
f'nodes/{self.node.id}/inventory',
headers=mock.ANY,
microversion='1.81',
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
@ -1427,7 +1427,7 @@ class TestNodeFirmware(base.TestCase):
self.assertEqual(node_firmware, res)
self.session.get.assert_called_once_with(
'nodes/%s/firmware' % self.node.id,
f'nodes/{self.node.id}/firmware',
headers=mock.ANY,
microversion='1.86',
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,

View File

@ -203,7 +203,7 @@ class TestCase(base.TestCase):
if append:
to_join.extend([urllib.parse.quote(i) for i in append])
if qs_elements is not None:
qs = '?%s' % '&'.join(qs_elements)
qs = '?{}'.format('&'.join(qs_elements))
return '{uri}{qs}'.format(uri='/'.join(to_join), qs=qs)
def mock_for_keystone_projects(
@ -811,17 +811,13 @@ class TestCase(base.TestCase):
# NOTE(notmorgan): make sure the delimiter is non-url-safe, in this
# case "|" is used so that the split can be a bit easier on
# maintainers of this code.
key = '{method}|{uri}|{params}'.format(
method=method, uri=uri, params=kw_params
)
key = f'{method}|{uri}|{kw_params}'
validate = to_mock.pop('validate', {})
valid_keys = {'json', 'headers', 'params', 'data'}
invalid_keys = set(validate.keys()) - valid_keys
if invalid_keys:
raise TypeError(
"Invalid values passed to validate: {keys}".format(
keys=invalid_keys
)
f"Invalid values passed to validate: {invalid_keys}"
)
headers = structures.CaseInsensitiveDict(
to_mock.pop('headers', {})
@ -841,11 +837,10 @@ class TestCase(base.TestCase):
'PROGRAMMING ERROR: key-word-params '
'should be part of the uri_key and cannot change, '
'it will affect the matcher in requests_mock. '
'%(old)r != %(new)r'
% {
'old': self._uri_registry[key]['kw_params'],
'new': kw_params,
}
'{old!r} != {new!r}'.format(
old=self._uri_registry[key]['kw_params'],
new=kw_params,
)
)
self._uri_registry[key]['response_list'].append(to_mock)
@ -900,9 +895,7 @@ class TestCase(base.TestCase):
'call': '{method} {url}'.format(
method=call['method'], url=call['url']
),
'history': '{method} {url}'.format(
method=history.method, url=history.url
),
'history': f'{history.method} {history.url}',
}
),
)

View File

@ -139,7 +139,7 @@ class TestBackup(base.TestCase):
self.assertEqual(sot, sot.restore(self.sess, 'vol', 'name'))
url = 'backups/%s/restore' % FAKE_ID
url = f'backups/{FAKE_ID}/restore'
body = {"restore": {"volume_id": "vol", "name": "name"}}
self.sess.post.assert_called_with(url, json=body)
@ -148,7 +148,7 @@ class TestBackup(base.TestCase):
self.assertEqual(sot, sot.restore(self.sess, name='name'))
url = 'backups/%s/restore' % FAKE_ID
url = f'backups/{FAKE_ID}/restore'
body = {"restore": {"name": "name"}}
self.sess.post.assert_called_with(url, json=body)
@ -157,7 +157,7 @@ class TestBackup(base.TestCase):
self.assertEqual(sot, sot.restore(self.sess, volume_id='vol'))
url = 'backups/%s/restore' % FAKE_ID
url = f'backups/{FAKE_ID}/restore'
body = {"restore": {"volume_id": "vol"}}
self.sess.post.assert_called_with(url, json=body)
@ -171,7 +171,7 @@ class TestBackup(base.TestCase):
self.assertIsNone(sot.force_delete(self.sess))
url = 'backups/%s/action' % FAKE_ID
url = f'backups/{FAKE_ID}/action'
body = {'os-force_delete': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -182,7 +182,7 @@ class TestBackup(base.TestCase):
self.assertIsNone(sot.reset(self.sess, 'new_status'))
url = 'backups/%s/action' % FAKE_ID
url = f'backups/{FAKE_ID}/action'
body = {'os-reset_status': {'status': 'new_status'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion

View File

@ -97,7 +97,7 @@ class TestSnapshotActions(base.TestCase):
self.assertIsNone(sot.reset(self.sess, 'new_status'))
url = 'snapshots/%s/action' % FAKE_ID
url = f'snapshots/{FAKE_ID}/action'
body = {'os-reset_status': {'status': 'new_status'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion

View File

@ -73,7 +73,7 @@ class TestType(base.TestCase):
)
self.sess.get.assert_called_with(
"types/%s/os-volume-type-access" % sot.id
f"types/{sot.id}/os-volume-type-access"
)
def test_add_private_access(self):
@ -81,7 +81,7 @@ class TestType(base.TestCase):
self.assertIsNone(sot.add_private_access(self.sess, "a"))
url = "types/%s/action" % sot.id
url = f"types/{sot.id}/action"
body = {"addProjectAccess": {"project": "a"}}
self.sess.post.assert_called_with(url, json=body)
@ -90,6 +90,6 @@ class TestType(base.TestCase):
self.assertIsNone(sot.remove_private_access(self.sess, "a"))
url = "types/%s/action" % sot.id
url = f"types/{sot.id}/action"
body = {"removeProjectAccess": {"project": "a"}}
self.sess.post.assert_called_with(url, json=body)

View File

@ -153,7 +153,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.extend(self.sess, '20'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {"os-extend": {"new_size": "20"}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -164,7 +164,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_readonly(self.sess, True))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-update_readonly_flag': {'readonly': True}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -175,7 +175,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_readonly(self.sess, False))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-update_readonly_flag': {'readonly': False}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -186,7 +186,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_bootable_status(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-set_bootable': {'bootable': True}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -197,7 +197,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_bootable_status(self.sess, False))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-set_bootable': {'bootable': False}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -208,7 +208,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_image_metadata(self.sess, {'foo': 'bar'}))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-set_image_metadata': {'foo': 'bar'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -224,7 +224,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.delete_image_metadata(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body_a = {'os-unset_image_metadata': 'foo'}
body_b = {'os-unset_image_metadata': 'baz'}
self.sess.post.assert_has_calls(
@ -243,7 +243,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.delete_image_metadata_item(self.sess, 'foo'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-unset_image_metadata': 'foo'}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -254,7 +254,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.reset_status(self.sess, '1', '2', '3'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-reset_status': {
'status': '1',
@ -271,7 +271,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.reset_status(self.sess, status='1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-reset_status': {
'status': '1',
@ -286,7 +286,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.attach(self.sess, '1', '2'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-attach': {'mountpoint': '1', 'instance_uuid': '2'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -297,7 +297,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.detach(self.sess, '1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-detach': {'attachment_id': '1'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -308,7 +308,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.detach(self.sess, '1', force=True))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-force_detach': {'attachment_id': '1'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -319,7 +319,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.unmanage(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-unmanage': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -330,7 +330,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.retype(self.sess, '1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-retype': {'new_type': '1'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -341,7 +341,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.retype(self.sess, '1', migration_policy='2'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-retype': {'new_type': '1', 'migration_policy': '2'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -352,7 +352,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.migrate(self.sess, host='1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-migrate_volume': {'host': '1'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -367,7 +367,7 @@ class TestVolumeActions(TestVolume):
)
)
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-migrate_volume': {
'host': '1',
@ -384,7 +384,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.complete_migration(self.sess, new_volume_id='1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-migrate_volume_completion': {'new_volume': '1', 'error': False}
}
@ -399,7 +399,7 @@ class TestVolumeActions(TestVolume):
sot.complete_migration(self.sess, new_volume_id='1', error=True)
)
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-migrate_volume_completion': {'new_volume': '1', 'error': True}
}
@ -412,7 +412,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.force_delete(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-force_delete': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion

View File

@ -180,7 +180,7 @@ class TestAttachment(base.TestCase):
sot.id = FAKE_ID
sot.complete(self.sess)
self.sess.post.assert_called_with(
'/attachments/%s/action' % FAKE_ID,
f'/attachments/{FAKE_ID}/action',
json={
'os-complete': '92dc3671-d0ab-4370-8058-c88a71661ec5',
},

View File

@ -152,7 +152,7 @@ class TestBackup(base.TestCase):
self.assertEqual(sot, sot.restore(self.sess, 'vol', 'name'))
url = 'backups/%s/restore' % FAKE_ID
url = f'backups/{FAKE_ID}/restore'
body = {"restore": {"volume_id": "vol", "name": "name"}}
self.sess.post.assert_called_with(url, json=body)
@ -161,7 +161,7 @@ class TestBackup(base.TestCase):
self.assertEqual(sot, sot.restore(self.sess, name='name'))
url = 'backups/%s/restore' % FAKE_ID
url = f'backups/{FAKE_ID}/restore'
body = {"restore": {"name": "name"}}
self.sess.post.assert_called_with(url, json=body)
@ -170,7 +170,7 @@ class TestBackup(base.TestCase):
self.assertEqual(sot, sot.restore(self.sess, volume_id='vol'))
url = 'backups/%s/restore' % FAKE_ID
url = f'backups/{FAKE_ID}/restore'
body = {"restore": {"volume_id": "vol"}}
self.sess.post.assert_called_with(url, json=body)
@ -184,7 +184,7 @@ class TestBackup(base.TestCase):
self.assertIsNone(sot.force_delete(self.sess))
url = 'backups/%s/action' % FAKE_ID
url = f'backups/{FAKE_ID}/action'
body = {'os-force_delete': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -195,7 +195,7 @@ class TestBackup(base.TestCase):
self.assertIsNone(sot.reset(self.sess, 'new_status'))
url = 'backups/%s/action' % FAKE_ID
url = f'backups/{FAKE_ID}/action'
body = {'os-reset_status': {'status': 'new_status'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion

View File

@ -87,7 +87,7 @@ class TestGroupAction(base.TestCase):
self.assertIsNone(sot.delete(self.sess))
url = 'groups/%s/action' % GROUP_ID
url = f'groups/{GROUP_ID}/action'
body = {'delete': {'delete-volumes': False}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -98,7 +98,7 @@ class TestGroupAction(base.TestCase):
self.assertIsNone(sot.reset(self.sess, 'new_status'))
url = 'groups/%s/action' % GROUP_ID
url = f'groups/{GROUP_ID}/action'
body = {'reset_status': {'status': 'new_status'}}
self.sess.post.assert_called_with(
url,

View File

@ -105,7 +105,7 @@ class TestSnapshotActions(base.TestCase):
self.assertIsNone(sot.force_delete(self.sess))
url = 'snapshots/%s/action' % FAKE_ID
url = f'snapshots/{FAKE_ID}/action'
body = {'os-force_delete': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -116,7 +116,7 @@ class TestSnapshotActions(base.TestCase):
self.assertIsNone(sot.reset(self.sess, 'new_status'))
url = 'snapshots/%s/action' % FAKE_ID
url = f'snapshots/{FAKE_ID}/action'
body = {'os-reset_status': {'status': 'new_status'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -127,7 +127,7 @@ class TestSnapshotActions(base.TestCase):
self.assertIsNone(sot.set_status(self.sess, 'new_status'))
url = 'snapshots/%s/action' % FAKE_ID
url = f'snapshots/{FAKE_ID}/action'
body = {'os-update_snapshot_status': {'status': 'new_status'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -206,7 +206,7 @@ class TestSnapshotActions(base.TestCase):
self.assertIsNone(sot.unmanage(self.sess))
url = 'snapshots/%s/action' % FAKE_ID
url = f'snapshots/{FAKE_ID}/action'
body = {'os-unmanage': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion

View File

@ -112,7 +112,7 @@ class TestTransfer(base.TestCase):
sot.accept(self.sess, auth_key=FAKE_AUTH_KEY)
self.sess.post.assert_called_with(
'volume-transfers/%s/accept' % FAKE_TRANSFER,
f'volume-transfers/{FAKE_TRANSFER}/accept',
json={
'accept': {
'auth_key': FAKE_AUTH_KEY,
@ -134,7 +134,7 @@ class TestTransfer(base.TestCase):
sot.accept(self.sess, auth_key=FAKE_AUTH_KEY)
self.sess.post.assert_called_with(
'os-volume-transfer/%s/accept' % FAKE_TRANSFER,
f'os-volume-transfer/{FAKE_TRANSFER}/accept',
json={
'accept': {
'auth_key': FAKE_AUTH_KEY,

View File

@ -150,7 +150,7 @@ class TestType(base.TestCase):
)
self.sess.get.assert_called_with(
"types/%s/os-volume-type-access" % sot.id
f"types/{sot.id}/os-volume-type-access"
)
def test_add_private_access(self):
@ -158,7 +158,7 @@ class TestType(base.TestCase):
self.assertIsNone(sot.add_private_access(self.sess, "a"))
url = "types/%s/action" % sot.id
url = f"types/{sot.id}/action"
body = {"addProjectAccess": {"project": "a"}}
self.sess.post.assert_called_with(url, json=body)
@ -167,6 +167,6 @@ class TestType(base.TestCase):
self.assertIsNone(sot.remove_private_access(self.sess, "a"))
url = "types/%s/action" % sot.id
url = f"types/{sot.id}/action"
body = {"removeProjectAccess": {"project": "a"}}
self.sess.post.assert_called_with(url, json=body)

View File

@ -159,7 +159,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.extend(self.sess, '20'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {"os-extend": {"new_size": "20"}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -170,7 +170,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_readonly(self.sess, True))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-update_readonly_flag': {'readonly': True}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -181,7 +181,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_readonly(self.sess, False))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-update_readonly_flag': {'readonly': False}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -192,7 +192,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_bootable_status(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-set_bootable': {'bootable': True}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -203,7 +203,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_bootable_status(self.sess, False))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-set_bootable': {'bootable': False}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -214,7 +214,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.set_image_metadata(self.sess, {'foo': 'bar'}))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-set_image_metadata': {'foo': 'bar'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -230,7 +230,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.delete_image_metadata(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body_a = {'os-unset_image_metadata': 'foo'}
body_b = {'os-unset_image_metadata': 'baz'}
self.sess.post.assert_has_calls(
@ -249,7 +249,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.delete_image_metadata_item(self.sess, 'foo'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-unset_image_metadata': 'foo'}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -260,7 +260,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.reset_status(self.sess, '1', '2', '3'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-reset_status': {
'status': '1',
@ -277,7 +277,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.reset_status(self.sess, status='1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-reset_status': {
'status': '1',
@ -309,7 +309,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.revert_to_snapshot(self.sess, '1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'revert': {'snapshot_id': '1'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -321,7 +321,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.attach(self.sess, '1', instance='2'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-attach': {'mountpoint': '1', 'instance_uuid': '2'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -332,7 +332,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.attach(self.sess, '1', host_name='2'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-attach': {'mountpoint': '1', 'host_name': '2'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -348,7 +348,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.detach(self.sess, '1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-detach': {'attachment_id': '1'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -361,7 +361,7 @@ class TestVolumeActions(TestVolume):
sot.detach(self.sess, '1', force=True, connector={'a': 'b'})
)
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-force_detach': {'attachment_id': '1', 'connector': {'a': 'b'}}
}
@ -374,7 +374,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.unmanage(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-unmanage': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -385,7 +385,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.retype(self.sess, '1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-retype': {'new_type': '1'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -396,7 +396,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.retype(self.sess, '1', migration_policy='2'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-retype': {'new_type': '1', 'migration_policy': '2'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -407,7 +407,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.migrate(self.sess, host='1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-migrate_volume': {'host': '1'}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -422,7 +422,7 @@ class TestVolumeActions(TestVolume):
)
)
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-migrate_volume': {
'host': '1',
@ -448,7 +448,7 @@ class TestVolumeActions(TestVolume):
)
)
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-migrate_volume': {
'cluster': '1',
@ -466,7 +466,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.complete_migration(self.sess, new_volume_id='1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-migrate_volume_completion': {'new_volume': '1', 'error': False}
}
@ -481,7 +481,7 @@ class TestVolumeActions(TestVolume):
sot.complete_migration(self.sess, new_volume_id='1', error=True)
)
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-migrate_volume_completion': {'new_volume': '1', 'error': True}
}
@ -494,7 +494,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.force_delete(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-force_delete': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -511,7 +511,7 @@ class TestVolumeActions(TestVolume):
self.assertDictEqual({'a': 'b'}, sot.upload_to_image(self.sess, '1'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-volume_upload_image': {'image_name': '1', 'force': False}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -543,7 +543,7 @@ class TestVolumeActions(TestVolume):
),
)
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {
'os-volume_upload_image': {
'image_name': '1',
@ -564,7 +564,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.reserve(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-reserve': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -575,7 +575,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.unreserve(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-unreserve': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -586,7 +586,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.begin_detaching(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-begin_detaching': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -597,7 +597,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.abort_detaching(self.sess))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-roll_detaching': None}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -615,7 +615,7 @@ class TestVolumeActions(TestVolume):
{'c': 'd'}, sot.init_attachment(self.sess, {'a': 'b'})
)
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-initialize_connection': {'connector': {'a': 'b'}}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -626,7 +626,7 @@ class TestVolumeActions(TestVolume):
self.assertIsNone(sot.terminate_attachment(self.sess, {'a': 'b'}))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {'os-terminate_connection': {'connector': {'a': 'b'}}}
self.sess.post.assert_called_with(
url, json=body, microversion=sot._max_microversion
@ -734,6 +734,6 @@ class TestVolumeActions(TestVolume):
self.sess.default_microversion = '3.50'
self.assertIsNone(sot.extend(self.sess, '20'))
url = 'volumes/%s/action' % FAKE_ID
url = f'volumes/{FAKE_ID}/action'
body = {"os-extend": {"new_size": "20"}}
self.sess.post.assert_called_with(url, json=body, microversion="3.50")

View File

@ -331,7 +331,7 @@ class TestUtils(base.TestCase):
# if the use_direct_get flag is set to False(default).
uuid = uuid4().hex
resource = 'network'
func = 'search_%ss' % resource
func = f'search_{resource}s'
filters = {}
with mock.patch.object(self.cloud, func) as search:
_utils._get_entity(self.cloud, resource, uuid, filters)
@ -343,7 +343,7 @@ class TestUtils(base.TestCase):
self.cloud.use_direct_get = True
name = 'name_no_uuid'
resource = 'network'
func = 'search_%ss' % resource
func = f'search_{resource}s'
filters = {}
with mock.patch.object(self.cloud, func) as search:
_utils._get_entity(self.cloud, resource, name, filters)
@ -363,7 +363,7 @@ class TestUtils(base.TestCase):
'security_group',
]
for r in resources:
f = 'get_%s_by_id' % r
f = f'get_{r}_by_id'
with mock.patch.object(self.cloud, f) as get:
_utils._get_entity(self.cloud, r, uuid, {})
get.assert_called_once_with(uuid)
@ -383,7 +383,7 @@ class TestUtils(base.TestCase):
filters = {}
name = 'name_no_uuid'
for r in resources:
f = 'search_%ss' % r
f = f'search_{r}s'
with mock.patch.object(self.cloud, f) as search:
_utils._get_entity(self.cloud, r, name, {})
search.assert_called_once_with(name, filters)
@ -400,5 +400,5 @@ class TestUtils(base.TestCase):
'security_group',
]
for r in resources:
self.assertTrue(hasattr(self.cloud, 'get_%s_by_id' % r))
self.assertTrue(hasattr(self.cloud, 'search_%ss' % r))
self.assertTrue(hasattr(self.cloud, f'get_{r}_by_id'))
self.assertTrue(hasattr(self.cloud, f'search_{r}s'))

View File

@ -29,9 +29,7 @@ class TestAvailabilityZoneNames(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone',
json=_fake_zone_list,
),
]
@ -46,9 +44,7 @@ class TestAvailabilityZoneNames(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone',
status_code=403,
),
]
@ -63,9 +59,7 @@ class TestAvailabilityZoneNames(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-availability-zone'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone',
json=_fake_zone_list,
),
]

View File

@ -88,7 +88,7 @@ class TestBaremetalNode(base.IronicTestCase):
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['address=%s' % mac_address],
qs_elements=[f'address={mac_address}'],
),
json={
'ports': [
@ -2041,7 +2041,7 @@ class TestBaremetalNode(base.IronicTestCase):
method='GET',
uri=self.get_mock_url(
resource='ports',
qs_elements=['address=%s' % mac_address],
qs_elements=[f'address={mac_address}'],
),
json={
'ports': [
@ -2129,7 +2129,7 @@ class TestBaremetalNode(base.IronicTestCase):
method='GET',
uri=self.get_mock_url(
resource='ports',
qs_elements=['address=%s' % mac_address],
qs_elements=[f'address={mac_address}'],
),
json={
'ports': [

View File

@ -83,7 +83,9 @@ class TestBaremetalPort(base.IronicTestCase):
resource='ports',
append=['detail'],
qs_elements=[
'node_uuid=%s' % self.fake_baremetal_node['uuid']
'node_uuid={}'.format(
self.fake_baremetal_node['uuid']
)
],
),
json={
@ -112,7 +114,9 @@ class TestBaremetalPort(base.IronicTestCase):
resource='ports',
append=['detail'],
qs_elements=[
'node_uuid=%s' % self.fake_baremetal_node['uuid']
'node_uuid={}'.format(
self.fake_baremetal_node['uuid']
)
],
),
status_code=400,
@ -136,7 +140,7 @@ class TestBaremetalPort(base.IronicTestCase):
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['address=%s' % mac],
qs_elements=[f'address={mac}'],
),
json={'ports': [self.fake_baremetal_port]},
),
@ -157,7 +161,7 @@ class TestBaremetalPort(base.IronicTestCase):
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['address=%s' % mac],
qs_elements=[f'address={mac}'],
),
json={'ports': []},
),

View File

@ -40,7 +40,7 @@ class TestCloudEndpoints(base.TestCase):
)
def _dummy_url(self):
return 'https://%s.example.com/' % uuid.uuid4().hex
return f'https://{uuid.uuid4().hex}.example.com/'
def test_create_endpoint_v3(self):
service_data = self._get_service_data()

View File

@ -26,9 +26,7 @@ class TestFlavors(base.TestCase):
[
dict(
method='POST',
uri='{endpoint}/flavors'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors',
json={'flavor': fakes.FAKE_FLAVOR},
validate=dict(
json={
@ -64,16 +62,12 @@ class TestFlavors(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/flavors/vanilla'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla',
json=fakes.FAKE_FLAVOR,
),
dict(
method='DELETE',
uri='{endpoint}/flavors/{id}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{fakes.FLAVOR_ID}',
),
]
)
@ -87,16 +81,12 @@ class TestFlavors(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/flavors/invalid'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/invalid',
status_code=404,
),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None',
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
@ -112,23 +102,17 @@ class TestFlavors(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/flavors/vanilla'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla',
json=fakes.FAKE_FLAVOR,
),
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None',
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
dict(
method='DELETE',
uri='{endpoint}/flavors/{id}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=fakes.FLAVOR_ID
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{fakes.FLAVOR_ID}',
status_code=503,
),
]
@ -145,9 +129,7 @@ class TestFlavors(base.TestCase):
uris_to_mock = [
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None',
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
@ -173,9 +155,7 @@ class TestFlavors(base.TestCase):
uris_to_mock = [
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None',
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
@ -213,9 +193,7 @@ class TestFlavors(base.TestCase):
uris_to_mock = [
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None',
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
@ -241,9 +219,7 @@ class TestFlavors(base.TestCase):
uris_to_mock = [
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None',
json={'flavors': fakes.FAKE_FLAVOR_LIST},
),
]
@ -269,9 +245,7 @@ class TestFlavors(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/flavors/detail?is_public=None'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None',
json={'flavors': []},
)
]
@ -284,8 +258,8 @@ class TestFlavors(base.TestCase):
def test_get_flavor_string_and_int(self):
self.use_compute_discovery()
flavor_resource_uri = '{endpoint}/flavors/1/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT
flavor_resource_uri = (
f'{fakes.COMPUTE_ENDPOINT}/flavors/1/os-extra_specs'
)
flavor = fakes.make_fake_flavor('1', 'vanilla')
flavor_json = {'extra_specs': {}}
@ -294,9 +268,7 @@ class TestFlavors(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/1',
json=flavor,
),
dict(method='GET', uri=flavor_resource_uri, json=flavor_json),
@ -315,9 +287,7 @@ class TestFlavors(base.TestCase):
[
dict(
method='POST',
uri='{endpoint}/flavors/{id}/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=1
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{1}/os-extra_specs',
json=dict(extra_specs=extra_specs),
)
]
@ -333,9 +303,7 @@ class TestFlavors(base.TestCase):
[
dict(
method='DELETE',
uri='{endpoint}/flavors/{id}/os-extra_specs/{key}'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=1, key=key
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{1}/os-extra_specs/{key}',
)
for key in keys
]
@ -394,9 +362,7 @@ class TestFlavors(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/flavors/vanilla/os-flavor-access'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla/os-flavor-access',
json={
'flavor_access': [
{'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'}
@ -410,9 +376,7 @@ class TestFlavors(base.TestCase):
def test_get_flavor_by_id(self):
self.use_compute_discovery()
flavor_uri = '{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1'
flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')}
self.register_uris(
@ -430,12 +394,8 @@ class TestFlavors(base.TestCase):
def test_get_flavor_with_extra_specs(self):
self.use_compute_discovery()
flavor_uri = '{endpoint}/flavors/1'.format(
endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_extra_uri = '{endpoint}/flavors/1/os-extra_specs'.format(
endpoint=fakes.COMPUTE_ENDPOINT
)
flavor_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1'
flavor_extra_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1/os-extra_specs'
flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')}
flavor_extra_json = {'extra_specs': {'name': 'test'}}

View File

@ -258,7 +258,7 @@ class TestFloatingIP(base.TestCase):
dict(
method='GET',
uri='https://network.example.com/v2.0/floatingips/'
'{id}'.format(id=fid),
f'{fid}',
json=self.mock_floating_ip_new_rep,
)
]

View File

@ -31,9 +31,7 @@ class TestFloatingIPPool(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-floating-ip-pools'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-floating-ip-pools',
json={"floating_ip_pools": [{"name": "public"}]},
),
]

View File

@ -1274,7 +1274,7 @@ class TestFirewallGroup(FirewallTestCase):
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['name=%s' % self.mock_port['name']],
qs_elements=['name={}'.format(self.mock_port['name'])],
),
json={'ports': [self.mock_port]},
),
@ -1580,7 +1580,7 @@ class TestFirewallGroup(FirewallTestCase):
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['name=%s' % self.mock_port['name']],
qs_elements=['name={}'.format(self.mock_port['name'])],
),
json={'ports': [self.mock_port]},
),

View File

@ -286,8 +286,8 @@ class TestIdentityRoles(base.TestCase):
uri=self.get_mock_url(
resource='role_assignments',
qs_elements=[
'scope.domain.id=%s' % domain_data.domain_id,
'user.id=%s' % user_data.user_id,
f'scope.domain.id={domain_data.domain_id}',
f'user.id={user_data.user_id}',
'effective=True',
],
),

View File

@ -89,16 +89,12 @@ class TestImage(BaseTestImage):
[
dict(
method='GET',
uri='https://image.example.com/v2/images/{name}'.format(
name=self.image_name
),
uri=f'https://image.example.com/v2/images/{self.image_name}',
status_code=404,
),
dict(
method='GET',
uri='https://image.example.com/v2/images?name={name}'.format( # noqa: E501
name=self.image_name
),
uri=f'https://image.example.com/v2/images?name={self.image_name}', # noqa: E501
json=dict(images=[]),
),
dict(
@ -121,23 +117,17 @@ class TestImage(BaseTestImage):
[
dict(
method='GET',
uri='https://image.example.com/v2/images/{name}'.format(
name=self.image_name
),
uri=f'https://image.example.com/v2/images/{self.image_name}',
status_code=404,
),
dict(
method='GET',
uri='https://image.example.com/v2/images?name={name}'.format( # noqa: E501
name=self.image_name
),
uri=f'https://image.example.com/v2/images?name={self.image_name}', # noqa: E501
json=self.fake_search_return,
),
dict(
method='GET',
uri='https://image.example.com/v2/images/{id}/file'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}/file',
content=self.output,
headers={
'Content-Type': 'application/octet-stream',
@ -417,9 +407,7 @@ class TestImage(BaseTestImage):
),
json={
'images': [self.fake_image_dict],
'next': '/v2/images?marker={marker}'.format(
marker=marker
),
'next': f'/v2/images?marker={marker}',
},
),
dict(
@ -821,16 +809,12 @@ class TestImage(BaseTestImage):
),
dict(
method='HEAD',
uri='{endpoint}/{container}'.format(
endpoint=endpoint, container=self.container_name
),
uri=f'{endpoint}/{self.container_name}',
status_code=404,
),
dict(
method='PUT',
uri='{endpoint}/{container}'.format(
endpoint=endpoint, container=self.container_name
),
uri=f'{endpoint}/{self.container_name}',
status_code=201,
headers={
'Date': 'Fri, 16 Dec 2016 18:21:20 GMT',
@ -840,9 +824,7 @@ class TestImage(BaseTestImage):
),
dict(
method='HEAD',
uri='{endpoint}/{container}'.format(
endpoint=endpoint, container=self.container_name
),
uri=f'{endpoint}/{self.container_name}',
headers={
'Content-Length': '0',
'X-Container-Object-Count': '0',
@ -867,20 +849,12 @@ class TestImage(BaseTestImage):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=endpoint,
container=self.container_name,
object=self.image_name,
),
uri=f'{endpoint}/{self.container_name}/{self.image_name}',
status_code=404,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=endpoint,
container=self.container_name,
object=self.image_name,
),
uri=f'{endpoint}/{self.container_name}/{self.image_name}',
status_code=201,
validate=dict(
headers={
@ -903,10 +877,7 @@ class TestImage(BaseTestImage):
json=dict(
type='import',
input={
'import_from': '{container}/{object}'.format(
container=self.container_name,
object=self.image_name,
),
'import_from': f'{self.container_name}/{self.image_name}',
'image_properties': {'name': self.image_name},
},
)
@ -952,10 +923,7 @@ class TestImage(BaseTestImage):
[
{
'op': 'add',
'value': '{container}/{object}'.format(
container=self.container_name,
object=self.image_name,
),
'value': f'{self.container_name}/{self.image_name}',
'path': '/owner_specified.openstack.object', # noqa: E501
},
{
@ -983,11 +951,7 @@ class TestImage(BaseTestImage):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=endpoint,
container=self.container_name,
object=self.image_name,
),
uri=f'{endpoint}/{self.container_name}/{self.image_name}',
headers={
'X-Timestamp': '1429036140.50253',
'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1',
@ -1007,11 +971,7 @@ class TestImage(BaseTestImage):
),
dict(
method='DELETE',
uri='{endpoint}/{container}/{object}'.format(
endpoint=endpoint,
container=self.container_name,
object=self.image_name,
),
uri=f'{endpoint}/{self.container_name}/{self.image_name}',
),
dict(
method='GET',
@ -1069,15 +1029,11 @@ class TestImage(BaseTestImage):
),
dict(
method='DELETE',
uri='https://image.example.com/v2/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}',
),
dict(
method='HEAD',
uri='{endpoint}/{object}'.format(
endpoint=endpoint, object=object_path
),
uri=f'{endpoint}/{object_path}',
headers={
'X-Timestamp': '1429036140.50253',
'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1',
@ -1097,9 +1053,7 @@ class TestImage(BaseTestImage):
),
dict(
method='DELETE',
uri='{endpoint}/{object}'.format(
endpoint=endpoint, object=object_path
),
uri=f'{endpoint}/{object_path}',
),
]
)
@ -1187,11 +1141,7 @@ class TestImage(BaseTestImage):
),
dict(
method='DELETE',
uri='{endpoint}/{container}/{object}'.format(
endpoint=endpoint,
container=self.container_name,
object=self.image_name,
),
uri=f'{endpoint}/{self.container_name}/{self.image_name}',
),
]
)
@ -1230,9 +1180,7 @@ class TestImage(BaseTestImage):
'properties': {
'owner_specified.openstack.md5': fakes.NO_MD5,
'owner_specified.openstack.sha256': fakes.NO_SHA256,
'owner_specified.openstack.object': 'images/{name}'.format(
name=self.image_name
),
'owner_specified.openstack.object': f'images/{self.image_name}',
'is_public': False,
},
}
@ -1263,9 +1211,7 @@ class TestImage(BaseTestImage):
),
dict(
method='PUT',
uri='https://image.example.com/v1/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v1/images/{self.image_id}',
json={'image': ret},
validate=dict(
headers={
@ -1297,9 +1243,7 @@ class TestImage(BaseTestImage):
'properties': {
'owner_specified.openstack.md5': fakes.NO_MD5,
'owner_specified.openstack.sha256': fakes.NO_SHA256,
'owner_specified.openstack.object': 'images/{name}'.format(
name=self.image_name
),
'owner_specified.openstack.object': f'images/{self.image_name}',
'is_public': False,
},
'validate_checksum': True,
@ -1331,9 +1275,7 @@ class TestImage(BaseTestImage):
),
dict(
method='PUT',
uri='https://image.example.com/v1/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v1/images/{self.image_id}',
status_code=400,
validate=dict(
headers={
@ -1344,9 +1286,7 @@ class TestImage(BaseTestImage):
),
dict(
method='DELETE',
uri='https://image.example.com/v1/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v1/images/{self.image_id}',
json={'images': [ret]},
),
]
@ -1369,9 +1309,7 @@ class TestImage(BaseTestImage):
'disk_format': 'qcow2',
'owner_specified.openstack.md5': fakes.NO_MD5,
'owner_specified.openstack.sha256': fakes.NO_SHA256,
'owner_specified.openstack.object': 'images/{name}'.format(
name=self.image_name
),
'owner_specified.openstack.object': f'images/{self.image_name}',
'visibility': 'private',
}
@ -1382,9 +1320,7 @@ class TestImage(BaseTestImage):
self.cloud.update_image_properties(
image=image.Image.existing(**ret),
**{
'owner_specified.openstack.object': 'images/{name}'.format(
name=self.image_name
)
'owner_specified.openstack.object': f'images/{self.image_name}'
},
)
@ -1399,9 +1335,7 @@ class TestImage(BaseTestImage):
'disk_format': 'qcow2',
'owner_specified.openstack.md5': fakes.NO_MD5,
'owner_specified.openstack.sha256': fakes.NO_SHA256,
'owner_specified.openstack.object': 'images/{name}'.format(
name=self.image_name
),
'owner_specified.openstack.object': f'images/{self.image_name}',
'visibility': 'private',
}
@ -1449,9 +1383,7 @@ class TestImage(BaseTestImage):
),
dict(
method='PUT',
uri='https://image.example.com/v2/images/{id}/file'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}/file',
status_code=400,
validate=dict(
headers={
@ -1461,9 +1393,7 @@ class TestImage(BaseTestImage):
),
dict(
method='DELETE',
uri='https://image.example.com/v2/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}',
),
]
)
@ -1530,9 +1460,7 @@ class TestImage(BaseTestImage):
),
dict(
method='DELETE',
uri='https://image.example.com/v2/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}',
),
]
)
@ -1574,9 +1502,7 @@ class TestImage(BaseTestImage):
'disk_format': 'qcow2',
'owner_specified.openstack.md5': fakes.NO_MD5,
'owner_specified.openstack.sha256': fakes.NO_SHA256,
'owner_specified.openstack.object': 'images/{name}'.format(
name=self.image_name
),
'owner_specified.openstack.object': f'images/{self.image_name}',
'int_v': '12345',
'visibility': 'private',
'min_disk': 0,
@ -1627,9 +1553,7 @@ class TestImage(BaseTestImage):
),
dict(
method='PUT',
uri='https://image.example.com/v2/images/{id}/file'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}/file',
validate=dict(
headers={
'Content-Type': 'application/octet-stream',
@ -1638,9 +1562,7 @@ class TestImage(BaseTestImage):
),
dict(
method='GET',
uri='https://image.example.com/v2/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}',
json=ret,
),
dict(
@ -1667,9 +1589,7 @@ class TestImage(BaseTestImage):
'disk_format': 'qcow2',
'owner_specified.openstack.md5': fakes.NO_MD5,
'owner_specified.openstack.sha256': fakes.NO_SHA256,
'owner_specified.openstack.object': 'images/{name}'.format(
name=self.image_name
),
'owner_specified.openstack.object': f'images/{self.image_name}',
'int_v': 12345,
'visibility': 'private',
'min_disk': 0,
@ -1721,9 +1641,7 @@ class TestImage(BaseTestImage):
),
dict(
method='PUT',
uri='https://image.example.com/v2/images/{id}/file'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}/file',
validate=dict(
headers={
'Content-Type': 'application/octet-stream',
@ -1732,9 +1650,7 @@ class TestImage(BaseTestImage):
),
dict(
method='GET',
uri='https://image.example.com/v2/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}',
json=ret,
),
dict(
@ -1761,9 +1677,7 @@ class TestImage(BaseTestImage):
'disk_format': 'qcow2',
'owner_specified.openstack.md5': fakes.NO_MD5,
'owner_specified.openstack.sha256': fakes.NO_SHA256,
'owner_specified.openstack.object': 'images/{name}'.format(
name=self.image_name
),
'owner_specified.openstack.object': f'images/{self.image_name}',
'int_v': '12345',
'protected': False,
'visibility': 'private',
@ -1816,9 +1730,7 @@ class TestImage(BaseTestImage):
),
dict(
method='PUT',
uri='https://image.example.com/v2/images/{id}/file'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}/file',
validate=dict(
headers={
'Content-Type': 'application/octet-stream',
@ -1827,9 +1739,7 @@ class TestImage(BaseTestImage):
),
dict(
method='GET',
uri='https://image.example.com/v2/images/{id}'.format(
id=self.image_id
),
uri=f'https://image.example.com/v2/images/{self.image_id}',
json=ret,
),
dict(
@ -1892,9 +1802,7 @@ class TestImageSuburl(BaseTestImage):
),
json={
'images': [self.fake_image_dict],
'next': '/v2/images?marker={marker}'.format(
marker=marker
),
'next': f'/v2/images?marker={marker}',
},
),
dict(

View File

@ -37,10 +37,7 @@ class TestImageSnapshot(base.TestCase):
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='{endpoint}/servers/{server_id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT,
server_id=self.server_id,
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action',
headers=dict(
Location='{endpoint}/images/{image_id}'.format(
endpoint='https://images.example.com',
@ -87,10 +84,7 @@ class TestImageSnapshot(base.TestCase):
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='{endpoint}/servers/{server_id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT,
server_id=self.server_id,
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action',
headers=dict(
Location='{endpoint}/images/{image_id}'.format(
endpoint='https://images.example.com',

View File

@ -530,9 +530,7 @@ class TestMeta(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/servers/test-id/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups',
json={'security_groups': []},
),
]
@ -609,9 +607,7 @@ class TestMeta(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/servers/test-id/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups',
json={'security_groups': []},
),
]
@ -685,9 +681,7 @@ class TestMeta(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/servers/test-id/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups',
json={'security_groups': []},
),
]
@ -804,9 +798,7 @@ class TestMeta(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/servers/test-id/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups',
json={'security_groups': []},
),
]
@ -865,9 +857,7 @@ class TestMeta(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/servers/test-id/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups',
json={'security_groups': []},
),
]
@ -947,9 +937,7 @@ class TestMeta(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/servers/test-id/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups',
json={'security_groups': []},
),
]

View File

@ -373,7 +373,7 @@ class TestNetworks(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name],
qs_elements=[f'name={network_name}'],
),
json={'networks': [network]},
),
@ -574,7 +574,7 @@ class TestNetworks(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name],
qs_elements=[f'name={network_name}'],
),
json={'networks': [network]},
),
@ -640,7 +640,7 @@ class TestNetworks(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % network_name],
qs_elements=[f'name={network_name}'],
),
json={'networks': [network]},
),

View File

@ -33,12 +33,8 @@ class BaseTestObject(base.TestCase):
self.container = self.getUniqueString()
self.object = self.getUniqueString()
self.endpoint = self.cloud.object_store.get_endpoint()
self.container_endpoint = '{endpoint}/{container}'.format(
endpoint=self.endpoint, container=self.container
)
self.object_endpoint = '{endpoint}/{object}'.format(
endpoint=self.container_endpoint, object=self.object
)
self.container_endpoint = f'{self.endpoint}/{self.container}'
self.object_endpoint = f'{self.container_endpoint}/{self.object}'
def _compare_containers(self, exp, real):
self.assertDictEqual(
@ -330,7 +326,7 @@ class TestObject(BaseTestObject):
)
with testtools.ExpectedException(
exceptions.SDKException,
"Container not found: %s" % self.container,
f"Container not found: {self.container}",
):
self.cloud.get_container_access(self.container)
@ -594,9 +590,7 @@ class TestObject(BaseTestObject):
self.assert_calls()
def test_list_objects(self):
endpoint = '{endpoint}?format=json'.format(
endpoint=self.container_endpoint
)
endpoint = f'{self.container_endpoint}?format=json'
objects = [
{
@ -619,9 +613,7 @@ class TestObject(BaseTestObject):
self._compare_objects(a, b)
def test_list_objects_with_prefix(self):
endpoint = '{endpoint}?format=json&prefix=test'.format(
endpoint=self.container_endpoint
)
endpoint = f'{self.container_endpoint}?format=json&prefix=test'
objects = [
{
@ -644,9 +636,7 @@ class TestObject(BaseTestObject):
self._compare_objects(a, b)
def test_list_objects_exception(self):
endpoint = '{endpoint}?format=json'.format(
endpoint=self.container_endpoint
)
endpoint = f'{self.container_endpoint}?format=json'
self.register_uris(
[
dict(
@ -903,20 +893,12 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=404,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
validate=dict(
headers={
@ -972,11 +954,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
validate=dict(
headers={
@ -1008,11 +986,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=404,
),
]
@ -1021,12 +995,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/{index:0>6}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
index=index,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}',
status_code=201,
)
for index, offset in enumerate(
@ -1038,17 +1007,11 @@ class TestObjectUploads(BaseTestObject):
uris_to_mock.append(
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
validate=dict(
headers={
'x-object-manifest': '{container}/{object}'.format(
container=self.container, object=self.object
),
'x-object-manifest': f'{self.container}/{self.object}',
'x-object-meta-x-sdk-md5': self.md5,
'x-object-meta-x-sdk-sha256': self.sha256,
}
@ -1088,11 +1051,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=404,
),
]
@ -1101,12 +1060,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/{index:0>6}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
index=index,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}',
status_code=201,
headers=dict(Etag=f'etag{index}'),
)
@ -1119,11 +1073,7 @@ class TestObjectUploads(BaseTestObject):
uris_to_mock.append(
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
validate=dict(
params={'multipart-manifest', 'put'},
@ -1153,37 +1103,27 @@ class TestObjectUploads(BaseTestObject):
'header mismatch in manifest call',
)
base_object = '/{container}/{object}'.format(
container=self.container, object=self.object
)
base_object = f'/{self.container}/{self.object}'
self.assertEqual(
[
{
'path': "{base_object}/000000".format(
base_object=base_object
),
'path': f"{base_object}/000000",
'size_bytes': 25,
'etag': 'etag0',
},
{
'path': "{base_object}/000001".format(
base_object=base_object
),
'path': f"{base_object}/000001",
'size_bytes': 25,
'etag': 'etag1',
},
{
'path': "{base_object}/000002".format(
base_object=base_object
),
'path': f"{base_object}/000002",
'size_bytes': 25,
'etag': 'etag2',
},
{
'path': "{base_object}/000003".format(
base_object=base_object
),
'path': f"{base_object}/000003",
'size_bytes': len(self.object) - 75,
'etag': 'etag3',
},
@ -1210,11 +1150,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=404,
),
]
@ -1223,12 +1159,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/{index:0>6}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
index=index,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}',
status_code=201,
headers=dict(Etag=f'etag{index}'),
)
@ -1243,11 +1174,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=400,
validate=dict(
params={'multipart-manifest', 'put'},
@ -1259,11 +1186,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=400,
validate=dict(
params={'multipart-manifest', 'put'},
@ -1275,11 +1198,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
validate=dict(
params={'multipart-manifest', 'put'},
@ -1311,37 +1230,27 @@ class TestObjectUploads(BaseTestObject):
'header mismatch in manifest call',
)
base_object = '/{container}/{object}'.format(
container=self.container, object=self.object
)
base_object = f'/{self.container}/{self.object}'
self.assertEqual(
[
{
'path': "{base_object}/000000".format(
base_object=base_object
),
'path': f"{base_object}/000000",
'size_bytes': 25,
'etag': 'etag0',
},
{
'path': "{base_object}/000001".format(
base_object=base_object
),
'path': f"{base_object}/000001",
'size_bytes': 25,
'etag': 'etag1',
},
{
'path': "{base_object}/000002".format(
base_object=base_object
),
'path': f"{base_object}/000002",
'size_bytes': 25,
'etag': 'etag2',
},
{
'path': "{base_object}/000003".format(
base_object=base_object
),
'path': f"{base_object}/000003",
'size_bytes': len(self.object) - 75,
'etag': 'etag3',
},
@ -1369,11 +1278,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=404,
),
]
@ -1382,12 +1287,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/{index:0>6}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
index=index,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}',
status_code=201,
headers=dict(Etag=f'etag{index}'),
)
@ -1402,11 +1302,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=400,
validate=dict(
params={'multipart-manifest', 'put'},
@ -1418,11 +1314,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=400,
validate=dict(
params={'multipart-manifest', 'put'},
@ -1434,11 +1326,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=400,
validate=dict(
params={'multipart-manifest', 'put'},
@ -1459,9 +1347,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='GET',
uri='{endpoint}/images?format=json&prefix={prefix}'.format(
endpoint=self.endpoint, prefix=self.object
),
uri=f'{self.endpoint}/images?format=json&prefix={self.object}',
complete_qs=True,
json=[
{
@ -1475,9 +1361,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/images/{object}'.format(
endpoint=self.endpoint, object=self.object
),
uri=f'{self.endpoint}/images/{self.object}',
headers={
'X-Timestamp': '1429036140.50253',
'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1',
@ -1495,9 +1379,7 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='DELETE',
uri='{endpoint}/images/{object}'.format(
endpoint=self.endpoint, object=self.object
),
uri=f'{self.endpoint}/images/{self.object}',
),
]
)
@ -1536,56 +1418,32 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=404,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000000'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000000',
status_code=201,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000001'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000001',
status_code=201,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000002'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000002',
status_code=201,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000003'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000003',
status_code=501,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
),
]
@ -1619,69 +1477,41 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=404,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000000'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000000',
headers={'etag': 'etag0'},
status_code=201,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000001'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000001',
headers={'etag': 'etag1'},
status_code=201,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000002'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000002',
headers={'etag': 'etag2'},
status_code=201,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000003'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000003',
status_code=501,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}/000003'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}/000003',
status_code=201,
headers={'etag': 'etag3'},
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
validate=dict(
params={'multipart-manifest', 'put'},
@ -1711,37 +1541,27 @@ class TestObjectUploads(BaseTestObject):
'header mismatch in manifest call',
)
base_object = '/{container}/{object}'.format(
container=self.container, object=self.object
)
base_object = f'/{self.container}/{self.object}'
self.assertEqual(
[
{
'path': "{base_object}/000000".format(
base_object=base_object
),
'path': f"{base_object}/000000",
'size_bytes': 25,
'etag': 'etag0',
},
{
'path': "{base_object}/000001".format(
base_object=base_object
),
'path': f"{base_object}/000001",
'size_bytes': 25,
'etag': 'etag1',
},
{
'path': "{base_object}/000002".format(
base_object=base_object
),
'path': f"{base_object}/000002",
'size_bytes': 25,
'etag': 'etag2',
},
{
'path': "{base_object}/000003".format(
base_object=base_object
),
'path': f"{base_object}/000003",
'size_bytes': len(self.object) - 75,
'etag': 'etag3',
},
@ -1762,20 +1582,12 @@ class TestObjectUploads(BaseTestObject):
),
dict(
method='HEAD',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=200,
),
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
validate=dict(headers={}),
),
@ -1796,11 +1608,7 @@ class TestObjectUploads(BaseTestObject):
[
dict(
method='PUT',
uri='{endpoint}/{container}/{object}'.format(
endpoint=self.endpoint,
container=self.container,
object=self.object,
),
uri=f'{self.endpoint}/{self.container}/{self.object}',
status_code=201,
validate=dict(
headers={},

View File

@ -91,8 +91,8 @@ class TestOperatorCloud(base.TestCase):
self.cloud.config.config['region_name'] = 'testregion'
with testtools.ExpectedException(
exceptions.SDKException,
"Error getting image endpoint on testcloud:testregion:"
" No service",
"Error getting image endpoint on testcloud:testregion: "
"No service",
):
self.cloud.get_session_endpoint("image")

View File

@ -507,7 +507,7 @@ class TestPort(base.TestCase):
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=['name=%s' % port_name],
qs_elements=[f'name={port_name}'],
),
json={'ports': [port1, port2]},
),

View File

@ -179,7 +179,7 @@ class TestProject(base.TestCase):
method='GET',
uri=self.get_mock_url(
resource=(
'projects?domain_id=%s' % project_data.domain_id
f'projects?domain_id={project_data.domain_id}'
)
),
status_code=200,
@ -204,7 +204,7 @@ class TestProject(base.TestCase):
method='GET',
uri=self.get_mock_url(
resource=(
'projects?domain_id=%s' % project_data.domain_id
f'projects?domain_id={project_data.domain_id}'
)
),
status_code=200,
@ -250,7 +250,7 @@ class TestProject(base.TestCase):
method='GET',
uri=self.get_mock_url(
resource=(
'projects?domain_id=%s' % project_data.domain_id
f'projects?domain_id={project_data.domain_id}'
)
),
status_code=200,

View File

@ -104,7 +104,7 @@ class TestQosBandwidthLimitRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -157,7 +157,7 @@ class TestQosBandwidthLimitRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': []},
),
@ -216,7 +216,7 @@ class TestQosBandwidthLimitRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -288,7 +288,7 @@ class TestQosBandwidthLimitRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -516,7 +516,7 @@ class TestQosBandwidthLimitRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -590,7 +590,7 @@ class TestQosBandwidthLimitRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),

View File

@ -87,7 +87,7 @@ class TestQosDscpMarkingRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -140,7 +140,7 @@ class TestQosDscpMarkingRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': []},
),
@ -199,7 +199,7 @@ class TestQosDscpMarkingRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -361,7 +361,7 @@ class TestQosDscpMarkingRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -435,7 +435,7 @@ class TestQosDscpMarkingRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),

View File

@ -88,7 +88,7 @@ class TestQosMinimumBandwidthRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -141,7 +141,7 @@ class TestQosMinimumBandwidthRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': []},
),
@ -200,7 +200,7 @@ class TestQosMinimumBandwidthRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -361,7 +361,7 @@ class TestQosMinimumBandwidthRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -435,7 +435,7 @@ class TestQosMinimumBandwidthRule(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),

View File

@ -86,7 +86,7 @@ class TestQosPolicy(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -225,7 +225,7 @@ class TestQosPolicy(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [self.mock_policy]},
),
@ -323,7 +323,7 @@ class TestQosPolicy(base.TestCase):
'network',
'public',
append=['v2.0', 'qos', 'policies'],
qs_elements=['name=%s' % self.policy_name],
qs_elements=[f'name={self.policy_name}'],
),
json={'policies': [policy1, policy2]},
),

View File

@ -100,7 +100,7 @@ class TestRouter(base.TestCase):
'network',
'public',
append=['v2.0', 'routers'],
qs_elements=['name=%s' % self.router_name],
qs_elements=[f'name={self.router_name}'],
),
json={'routers': [self.mock_router_rep]},
),
@ -450,7 +450,7 @@ class TestRouter(base.TestCase):
'network',
'public',
append=['v2.0', 'routers'],
qs_elements=['name=%s' % self.router_name],
qs_elements=[f'name={self.router_name}'],
),
json={'routers': [self.mock_router_rep]},
),
@ -486,7 +486,7 @@ class TestRouter(base.TestCase):
'network',
'public',
append=['v2.0', 'routers'],
qs_elements=['name=%s' % self.router_name],
qs_elements=[f'name={self.router_name}'],
),
json={'routers': []},
),
@ -576,7 +576,7 @@ class TestRouter(base.TestCase):
'network',
'public',
append=['v2.0', 'ports'],
qs_elements=["device_id=%s" % self.router_id],
qs_elements=[f"device_id={self.router_id}"],
),
json={'ports': (internal_ports + external_ports)},
)

View File

@ -74,7 +74,7 @@ class TestSecurityGroups(base.TestCase):
'network',
'public',
append=['v2.0', 'security-groups'],
qs_elements=["project_id=%s" % project_id],
qs_elements=[f"project_id={project_id}"],
),
json={'security_groups': [neutron_grp_dict]},
)
@ -88,9 +88,7 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups?project_id=42'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups?project_id=42',
json={'security_groups': []},
),
]
@ -126,7 +124,7 @@ class TestSecurityGroups(base.TestCase):
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'security-groups', '%s' % sg_id],
append=['v2.0', 'security-groups', f'{sg_id}'],
),
status_code=200,
json={},
@ -144,16 +142,12 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': nova_return},
),
dict(
method='DELETE',
uri='{endpoint}/os-security-groups/2'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups/2',
),
]
)
@ -184,9 +178,7 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': nova_return},
),
]
@ -240,8 +232,8 @@ class TestSecurityGroups(base.TestCase):
project_id = "861808a93da0484ea1767967c4df8a23"
group_name = self.getUniqueString()
group_desc = (
'security group from'
' test_create_security_group_neutron_specific_tenant'
'security group from '
'test_create_security_group_neutron_specific_tenant'
)
new_group = fakes.make_fake_neutron_security_group(
id='2',
@ -331,9 +323,7 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='POST',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_group': new_group},
validate=dict(
json={
@ -385,7 +375,7 @@ class TestSecurityGroups(base.TestCase):
uri=self.get_mock_url(
'network',
'public',
append=['v2.0', 'security-groups', '%s' % sg_id],
append=['v2.0', 'security-groups', f'{sg_id}'],
),
json={'security_group': update_return},
validate=dict(
@ -418,16 +408,12 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': nova_return},
),
dict(
method='PUT',
uri='{endpoint}/os-security-groups/2'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups/2',
json={'security_group': update_return},
),
]
@ -586,16 +572,12 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': nova_return},
),
dict(
method='POST',
uri='{endpoint}/os-security-group-rules'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules',
json={'security_group_rule': new_rule},
validate=dict(
json={
@ -642,16 +624,12 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': nova_return},
),
dict(
method='POST',
uri='{endpoint}/os-security-group-rules'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules',
json={'security_group_rule': new_rule},
validate=dict(
json={
@ -700,7 +678,7 @@ class TestSecurityGroups(base.TestCase):
append=[
'v2.0',
'security-group-rules',
'%s' % rule_id,
f'{rule_id}',
],
),
json={},
@ -717,9 +695,7 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='DELETE',
uri='{endpoint}/os-security-group-rules/xyz'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules/xyz',
),
]
)
@ -760,9 +736,7 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': [nova_grp_dict]},
),
]
@ -779,9 +753,7 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': [nova_grp_dict]},
),
]
@ -842,16 +814,15 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT,
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': [nova_grp_dict]},
),
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='%s/servers/%s/action'
% (fakes.COMPUTE_ENDPOINT, '1234'),
uri='{}/servers/{}/action'.format(
fakes.COMPUTE_ENDPOINT, '1234'
),
validate=dict(
json={'addSecurityGroup': {'name': 'nova-sec-group'}}
),
@ -894,8 +865,9 @@ class TestSecurityGroups(base.TestCase):
),
dict(
method='POST',
uri='%s/servers/%s/action'
% (fakes.COMPUTE_ENDPOINT, '1234'),
uri='{}/servers/{}/action'.format(
fakes.COMPUTE_ENDPOINT, '1234'
),
validate=dict(
json={
'addSecurityGroup': {'name': 'neutron-sec-group'}
@ -921,16 +893,15 @@ class TestSecurityGroups(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': [nova_grp_dict]},
),
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='%s/servers/%s/action'
% (fakes.COMPUTE_ENDPOINT, '1234'),
uri='{}/servers/{}/action'.format(
fakes.COMPUTE_ENDPOINT, '1234'
),
validate=dict(
json={
'removeSecurityGroup': {'name': 'nova-sec-group'}
@ -974,8 +945,9 @@ class TestSecurityGroups(base.TestCase):
),
dict(
method='POST',
uri='%s/servers/%s/action'
% (fakes.COMPUTE_ENDPOINT, '1234'),
uri='{}/servers/{}/action'.format(
fakes.COMPUTE_ENDPOINT, '1234'
),
validate=dict(json=validate),
),
]
@ -1000,16 +972,12 @@ class TestSecurityGroups(base.TestCase):
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri='{endpoint}/servers/detail'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/detail',
json={'servers': [fake_server]},
),
dict(
method='GET',
uri='{endpoint}/os-security-groups'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups',
json={'security_groups': [nova_grp_dict]},
),
]
@ -1064,9 +1032,7 @@ class TestSecurityGroups(base.TestCase):
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri='{endpoint}/servers/detail'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/detail',
json={'servers': [fake_server]},
),
]

View File

@ -33,9 +33,7 @@ class TestServerConsole(base.TestCase):
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='{endpoint}/servers/{id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action',
json={"output": self.output},
validate=dict(json={'os-getConsoleOutput': {'length': 5}}),
),
@ -53,16 +51,12 @@ class TestServerConsole(base.TestCase):
self.get_nova_discovery_mock_dict(),
dict(
method='GET',
uri='{endpoint}/servers/detail'.format(
endpoint=fakes.COMPUTE_ENDPOINT
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/detail',
json={"servers": [self.server]},
),
dict(
method='POST',
uri='{endpoint}/servers/{id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action',
json={"output": self.output},
validate=dict(json={'os-getConsoleOutput': {}}),
),
@ -81,9 +75,7 @@ class TestServerConsole(base.TestCase):
self.get_nova_discovery_mock_dict(),
dict(
method='POST',
uri='{endpoint}/servers/{id}/action'.format(
endpoint=fakes.COMPUTE_ENDPOINT, id=self.server_id
),
uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action',
status_code=400,
validate=dict(json={'os-getConsoleOutput': {}}),
),

View File

@ -45,9 +45,7 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks',
json={"stacks": fake_stacks},
),
]
@ -88,9 +86,7 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks',
status_code=404,
)
]
@ -110,9 +106,7 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks',
json={"stacks": fake_stacks},
),
]
@ -134,9 +128,7 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks',
json={"stacks": fake_stacks},
),
]
@ -151,9 +143,7 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks',
status_code=404,
)
]
@ -167,36 +157,20 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks/{name}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}',
json={"stack": self.stack},
),
dict(
method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}',
),
]
)
@ -209,9 +183,7 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks/stack_name?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT, resolve=resolve
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/stack_name?{resolve}',
status_code=404,
),
]
@ -225,36 +197,20 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}',
json={"stack": self.stack},
),
dict(
method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}',
status_code=400,
reason="ouch",
),
@ -279,29 +235,15 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks/{name}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}',
json={"stack": self.stack},
),
dict(
@ -316,17 +258,11 @@ class TestStack(base.TestCase):
),
dict(
method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}',
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
qs=marker_qs,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?{marker_qs}',
complete_qs=True,
json={
"events": [
@ -341,11 +277,7 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}',
status_code=404,
),
]
@ -369,29 +301,15 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}',
json={"stack": self.stack},
),
dict(
@ -406,17 +324,11 @@ class TestStack(base.TestCase):
),
dict(
method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}',
),
dict(
method='GET',
uri='{endpoint}/stacks/{id}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
qs=marker_qs,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}/events?{marker_qs}',
complete_qs=True,
json={
"events": [
@ -430,11 +342,7 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}',
status_code=404,
),
]
@ -457,29 +365,15 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}',
json={"stack": self.stack},
),
dict(
@ -494,17 +388,11 @@ class TestStack(base.TestCase):
),
dict(
method='DELETE',
uri='{endpoint}/stacks/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}',
),
dict(
method='GET',
uri='{endpoint}/stacks/{id}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
qs=marker_qs,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}/events?{marker_qs}',
complete_qs=True,
json={
"events": [
@ -518,27 +406,15 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{id}?resolve_outputs=False'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT, id=self.stack_id
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?resolve_outputs=False',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}?{resolve}'.format( # noqa: E501
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', # noqa: E501
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}?{resolve}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
resolve=resolve,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}',
json={"stack": failed_stack},
),
]
@ -557,9 +433,7 @@ class TestStack(base.TestCase):
[
dict(
method='POST',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks',
json={"stack": self.stack},
validate=dict(
json={
@ -574,26 +448,15 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}'
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}',
json={"stack": self.stack},
),
]
@ -616,9 +479,7 @@ class TestStack(base.TestCase):
[
dict(
method='POST',
uri='{endpoint}/stacks'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks',
json={"stack": self.stack},
validate=dict(
json={
@ -633,10 +494,7 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/events?sort_dir=asc'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?sort_dir=asc',
json={
"events": [
fakes.make_fake_stack_event(
@ -650,26 +508,15 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}'
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}',
json={"stack": self.stack},
),
]
@ -692,10 +539,7 @@ class TestStack(base.TestCase):
[
dict(
method='PUT',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}',
validate=dict(
json={
'disable_rollback': False,
@ -709,26 +553,15 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}'
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}',
json={"stack": self.stack},
),
]
@ -768,10 +601,7 @@ class TestStack(base.TestCase):
),
dict(
method='PUT',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}',
validate=dict(
json={
'disable_rollback': False,
@ -785,11 +615,7 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/events?{qs}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
qs=marker_qs,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?{marker_qs}',
json={
"events": [
fakes.make_fake_stack_event(
@ -803,26 +629,15 @@ class TestStack(base.TestCase):
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}'
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}',
json={"stack": self.stack},
),
]
@ -841,26 +656,15 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}'
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}',
json={"stack": self.stack},
),
]
@ -881,26 +685,15 @@ class TestStack(base.TestCase):
[
dict(
method='GET',
uri='{endpoint}/stacks/{name}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}',
status_code=302,
headers=dict(
location='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
)
location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}'
),
),
dict(
method='GET',
uri='{endpoint}/stacks/{name}/{id}'.format(
endpoint=fakes.ORCHESTRATION_ENDPOINT,
id=self.stack_id,
name=self.stack_name,
),
uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}',
json={"stack": in_progress},
),
]

View File

@ -88,7 +88,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'subnets'],
qs_elements=['name=%s' % self.subnet_name],
qs_elements=[f'name={self.subnet_name}'],
),
json={'subnets': [self.mock_subnet_rep]},
),
@ -143,7 +143,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % self.network_name],
qs_elements=[f'name={self.network_name}'],
),
json={'networks': [self.mock_network_rep]},
),
@ -198,7 +198,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % self.network_name],
qs_elements=[f'name={self.network_name}'],
),
json={'networks': [self.mock_network_rep]},
),
@ -246,7 +246,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % self.network_name],
qs_elements=[f'name={self.network_name}'],
),
json={'networks': [self.mock_network_rep]},
),
@ -284,7 +284,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % self.network_name],
qs_elements=[f'name={self.network_name}'],
),
json={'networks': [self.mock_network_rep]},
),
@ -345,7 +345,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % self.network_name],
qs_elements=[f'name={self.network_name}'],
),
json={'networks': [self.mock_network_rep]},
),
@ -468,7 +468,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % self.network_name],
qs_elements=[f'name={self.network_name}'],
),
json={'networks': [net1, net2]},
),
@ -513,7 +513,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % self.network_name],
qs_elements=[f'name={self.network_name}'],
),
json={'networks': [self.mock_network_rep]},
),
@ -585,7 +585,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'networks'],
qs_elements=['name=%s' % self.network_name],
qs_elements=[f'name={self.network_name}'],
),
json={'networks': [self.mock_network_rep]},
),
@ -659,7 +659,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'subnets'],
qs_elements=['name=%s' % self.subnet_name],
qs_elements=[f'name={self.subnet_name}'],
),
json={'subnets': [self.mock_subnet_rep]},
),
@ -724,7 +724,7 @@ class TestSubnet(base.TestCase):
'network',
'public',
append=['v2.0', 'subnets'],
qs_elements=['name=%s' % self.subnet_name],
qs_elements=[f'name={self.subnet_name}'],
),
json={'subnets': [subnet1, subnet2]},
),

View File

@ -57,7 +57,7 @@ class TestUpdateServer(base.TestCase):
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=%s' % self.server_name],
qs_elements=[f'name={self.server_name}'],
),
json={'servers': [self.fake_server]},
),
@ -108,7 +108,7 @@ class TestUpdateServer(base.TestCase):
'compute',
'public',
append=['servers', 'detail'],
qs_elements=['name=%s' % self.server_name],
qs_elements=[f'name={self.server_name}'],
),
json={'servers': [self.fake_server]},
),

Some files were not shown because too many files have changed in this diff Show More