trivial: Prepare for pyupgrade pre-commit hook
This is kept separate from addition of the actual hook so that we can ignore the commit later. Change-Id: I3af752894490d619b3ef755aca5e717edafe104c Signed-off-by: Stephen Finucane <stephenfin@redhat.com>
This commit is contained in:
parent
37db30af3c
commit
8b02b04572
@ -60,7 +60,7 @@ SSH_DIR = _get_resource_value(
|
||||
)
|
||||
PRIVATE_KEYPAIR_FILE = _get_resource_value(
|
||||
'private_keypair_file',
|
||||
'{ssh_dir}/id_rsa.{key}'.format(ssh_dir=SSH_DIR, key=KEYPAIR_NAME),
|
||||
f'{SSH_DIR}/id_rsa.{KEYPAIR_NAME}',
|
||||
)
|
||||
|
||||
EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image'
|
||||
|
@ -52,7 +52,7 @@ class AcceleratorRequest(resource.Resource):
|
||||
# and its value is an ordinary JSON patch. spec:
|
||||
# https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api
|
||||
|
||||
converted = super(AcceleratorRequest, self)._convert_patch(patch)
|
||||
converted = super()._convert_patch(patch)
|
||||
converted = {self.id: converted}
|
||||
return converted
|
||||
|
||||
@ -102,11 +102,9 @@ class AcceleratorRequest(resource.Resource):
|
||||
if isinstance(self, AcceleratorRequest):
|
||||
if self.resources_key in attrs:
|
||||
attrs = attrs[self.resources_key][0]
|
||||
return super(AcceleratorRequest, self)._consume_attrs(mapping, attrs)
|
||||
return super()._consume_attrs(mapping, attrs)
|
||||
|
||||
def create(self, session, base_path=None):
|
||||
# This overrides the default behavior of resource creation because
|
||||
# cyborg doesn't accept resource_key in its request.
|
||||
return super(AcceleratorRequest, self).create(
|
||||
session, prepend_key=False, base_path=base_path
|
||||
)
|
||||
return super().create(session, prepend_key=False, base_path=base_path)
|
||||
|
@ -39,14 +39,10 @@ class DeviceProfile(resource.Resource):
|
||||
# TODO(s_shogo): This implementation only treat [ DeviceProfile ], and
|
||||
# cannot treat multiple DeviceProfiles in list.
|
||||
def _prepare_request_body(self, patch, prepend_key):
|
||||
body = super(DeviceProfile, self)._prepare_request_body(
|
||||
patch, prepend_key
|
||||
)
|
||||
body = super()._prepare_request_body(patch, prepend_key)
|
||||
return [body]
|
||||
|
||||
def create(self, session, base_path=None):
|
||||
# This overrides the default behavior of resource creation because
|
||||
# cyborg doesn't accept resource_key in its request.
|
||||
return super(DeviceProfile, self).create(
|
||||
session, prepend_key=False, base_path=base_path
|
||||
)
|
||||
return super().create(session, prepend_key=False, base_path=base_path)
|
||||
|
@ -173,7 +173,7 @@ class Driver(resource.Resource):
|
||||
:returns: response of method call.
|
||||
"""
|
||||
if verb.upper() not in ['GET', 'PUT', 'POST', 'DELETE']:
|
||||
raise ValueError('Invalid verb: {}'.format(verb))
|
||||
raise ValueError(f'Invalid verb: {verb}')
|
||||
|
||||
session = self._get_session(session)
|
||||
request = self._prepare_request()
|
||||
|
@ -275,7 +275,7 @@ class Node(_common.Resource):
|
||||
# API version 1.1 uses None instead of "available". Make it
|
||||
# consistent.
|
||||
attrs['provision_state'] = 'available'
|
||||
return super(Node, self)._consume_body_attrs(attrs)
|
||||
return super()._consume_body_attrs(attrs)
|
||||
|
||||
def create(self, session, *args, **kwargs):
|
||||
"""Create a remote resource based on this instance.
|
||||
@ -346,9 +346,7 @@ class Node(_common.Resource):
|
||||
# Ironic cannot set provision_state itself, so marking it as unchanged
|
||||
self._clean_body_attrs({'provision_state'})
|
||||
|
||||
super(Node, self).create(
|
||||
session, *args, microversion=microversion, **kwargs
|
||||
)
|
||||
super().create(session, *args, microversion=microversion, **kwargs)
|
||||
|
||||
if (
|
||||
expected_provision_state == 'manageable'
|
||||
@ -395,7 +393,7 @@ class Node(_common.Resource):
|
||||
# the new status.
|
||||
return self.fetch(session)
|
||||
|
||||
return super(Node, self).commit(session, *args, **kwargs)
|
||||
return super().commit(session, *args, **kwargs)
|
||||
|
||||
def set_provision_state(
|
||||
self,
|
||||
@ -724,7 +722,7 @@ class Node(_common.Resource):
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = "Failed to inject NMI to node {node}".format(node=self.id)
|
||||
msg = f"Failed to inject NMI to node {self.id}"
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def set_power_state(self, session, target, wait=False, timeout=None):
|
||||
@ -934,13 +932,13 @@ class Node(_common.Resource):
|
||||
request.url, headers=request.headers, microversion=version
|
||||
)
|
||||
|
||||
msg = "Failed to validate node {node}".format(node=self.id)
|
||||
msg = f"Failed to validate node {self.id}"
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
result = response.json()
|
||||
|
||||
if required:
|
||||
failed = [
|
||||
'%s (%s)' % (key, value.get('reason', 'no reason'))
|
||||
'{} ({})'.format(key, value.get('reason', 'no reason'))
|
||||
for key, value in result.items()
|
||||
if key in required and not value.get('result')
|
||||
]
|
||||
@ -1044,7 +1042,7 @@ class Node(_common.Resource):
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = "Failed to set boot device for node {node}".format(node=self.id)
|
||||
msg = f"Failed to set boot device for node {self.id}"
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def get_supported_boot_devices(self, session):
|
||||
@ -1109,7 +1107,7 @@ class Node(_common.Resource):
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = "Failed to change boot mode for node {node}".format(node=self.id)
|
||||
msg = f"Failed to change boot mode for node {self.id}"
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def set_secure_boot(self, session, target):
|
||||
@ -1243,7 +1241,7 @@ class Node(_common.Resource):
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
|
||||
msg = "Failed to set traits for node {node}".format(node=self.id)
|
||||
msg = f"Failed to set traits for node {self.id}"
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
self.traits = traits
|
||||
@ -1261,7 +1259,7 @@ class Node(_common.Resource):
|
||||
version = self._get_microversion(session, action='commit')
|
||||
request = self._prepare_request(requires_id=True)
|
||||
request.url = utils.urljoin(
|
||||
request.url, 'vendor_passthru?method={}'.format(method)
|
||||
request.url, f'vendor_passthru?method={method}'
|
||||
)
|
||||
|
||||
call = getattr(session, verb.lower())
|
||||
@ -1439,7 +1437,7 @@ class Node(_common.Resource):
|
||||
)
|
||||
|
||||
else:
|
||||
return super(Node, self).patch(
|
||||
return super().patch(
|
||||
session, patch=patch, retry_on_conflict=retry_on_conflict
|
||||
)
|
||||
|
||||
|
@ -71,7 +71,7 @@ class Introspection(resource.Resource):
|
||||
microversion=version,
|
||||
retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
|
||||
)
|
||||
msg = "Failed to abort introspection for node {id}".format(id=self.id)
|
||||
msg = f"Failed to abort introspection for node {self.id}"
|
||||
exceptions.raise_from_response(response, error_message=msg)
|
||||
|
||||
def get_data(self, session, processed=True):
|
||||
|
@ -43,7 +43,7 @@ class BaseBlockStorageProxy(proxy.Proxy, metaclass=abc.ABCMeta):
|
||||
)
|
||||
volume_id = volume_obj['id']
|
||||
data = self.post(
|
||||
'/volumes/{id}/action'.format(id=volume_id),
|
||||
f'/volumes/{volume_id}/action',
|
||||
json={
|
||||
'os-volume_upload_image': {
|
||||
'force': allow_duplicates,
|
||||
|
@ -87,7 +87,7 @@ class Service(resource.Resource):
|
||||
if ignore_missing:
|
||||
return None
|
||||
raise exceptions.ResourceNotFound(
|
||||
"No %s found for %s" % (cls.__name__, name_or_id)
|
||||
f"No {cls.__name__} found for {name_or_id}"
|
||||
)
|
||||
|
||||
def commit(self, session, prepend_key=False, **kwargs):
|
||||
|
@ -51,7 +51,7 @@ class Type(resource.Resource):
|
||||
for k, v in extra_specs.items():
|
||||
if not isinstance(v, str):
|
||||
raise ValueError(
|
||||
"The value for %s (%s) must be a text string" % (k, v)
|
||||
f"The value for {k} ({v}) must be a text string"
|
||||
)
|
||||
|
||||
if key is not None:
|
||||
|
@ -117,7 +117,7 @@ class ComputeCloudMixin:
|
||||
)
|
||||
|
||||
def _nova_extensions(self):
|
||||
extensions = set([e.alias for e in self.compute.extensions()])
|
||||
extensions = {e.alias for e in self.compute.extensions()}
|
||||
return extensions
|
||||
|
||||
def _has_nova_extension(self, extension_name):
|
||||
@ -1229,7 +1229,7 @@ class ComputeCloudMixin:
|
||||
raise exceptions.SDKException(
|
||||
'Server reached ACTIVE state without being'
|
||||
' allocated an IP address AND then could not'
|
||||
' be deleted: {0}'.format(e),
|
||||
' be deleted: {}'.format(e),
|
||||
extra_data=dict(server=server),
|
||||
)
|
||||
raise exceptions.SDKException(
|
||||
@ -1291,9 +1291,7 @@ class ComputeCloudMixin:
|
||||
"""
|
||||
server = self.get_server(name_or_id, bare=True)
|
||||
if not server:
|
||||
raise exceptions.SDKException(
|
||||
'Invalid Server {server}'.format(server=name_or_id)
|
||||
)
|
||||
raise exceptions.SDKException(f'Invalid Server {name_or_id}')
|
||||
|
||||
self.compute.set_server_metadata(server=server.id, **metadata)
|
||||
|
||||
@ -1311,9 +1309,7 @@ class ComputeCloudMixin:
|
||||
"""
|
||||
server = self.get_server(name_or_id, bare=True)
|
||||
if not server:
|
||||
raise exceptions.SDKException(
|
||||
'Invalid Server {server}'.format(server=name_or_id)
|
||||
)
|
||||
raise exceptions.SDKException(f'Invalid Server {name_or_id}')
|
||||
|
||||
self.compute.delete_server_metadata(
|
||||
server=server.id, keys=metadata_keys
|
||||
@ -1545,7 +1541,7 @@ class ComputeCloudMixin:
|
||||
return True
|
||||
except exceptions.SDKException:
|
||||
raise exceptions.SDKException(
|
||||
"Unable to delete flavor {name}".format(name=name_or_id)
|
||||
f"Unable to delete flavor {name_or_id}"
|
||||
)
|
||||
|
||||
def set_flavor_specs(self, flavor_id, extra_specs):
|
||||
|
@ -104,9 +104,7 @@ class DnsCloudMixin:
|
||||
try:
|
||||
return self.dns.create_zone(**zone)
|
||||
except exceptions.SDKException:
|
||||
raise exceptions.SDKException(
|
||||
"Unable to create zone {name}".format(name=name)
|
||||
)
|
||||
raise exceptions.SDKException(f"Unable to create zone {name}")
|
||||
|
||||
@_utils.valid_kwargs('email', 'description', 'ttl', 'masters')
|
||||
def update_zone(self, name_or_id, **kwargs):
|
||||
|
@ -180,14 +180,14 @@ class FloatingIPCloudMixin:
|
||||
:returns: A floating ip
|
||||
`:class:`~openstack.network.v2.floating_ip.FloatingIP`.
|
||||
"""
|
||||
error_message = "Error getting floating ip with ID {id}".format(id=id)
|
||||
error_message = f"Error getting floating ip with ID {id}"
|
||||
|
||||
if self._use_neutron_floating():
|
||||
fip = self.network.get_ip(id)
|
||||
return fip
|
||||
else:
|
||||
data = proxy._json_response(
|
||||
self.compute.get('/os-floating-ips/{id}'.format(id=id)),
|
||||
self.compute.get(f'/os-floating-ips/{id}'),
|
||||
error_message=error_message,
|
||||
)
|
||||
return self._normalize_floating_ip(
|
||||
@ -230,7 +230,7 @@ class FloatingIPCloudMixin:
|
||||
|
||||
if floating_network_id is None:
|
||||
raise exceptions.NotFoundException(
|
||||
"unable to find external network {net}".format(net=network)
|
||||
f"unable to find external network {network}"
|
||||
)
|
||||
else:
|
||||
floating_network_id = self._get_floating_network_id()
|
||||
@ -270,7 +270,7 @@ class FloatingIPCloudMixin:
|
||||
"""
|
||||
|
||||
with _utils.openstacksdk_exceptions(
|
||||
"Unable to create floating IP in pool {pool}".format(pool=pool)
|
||||
f"Unable to create floating IP in pool {pool}"
|
||||
):
|
||||
if pool is None:
|
||||
pools = self.list_floating_ip_pools()
|
||||
@ -442,7 +442,7 @@ class FloatingIPCloudMixin:
|
||||
except exceptions.ResourceNotFound:
|
||||
raise exceptions.NotFoundException(
|
||||
"unable to find network for floating ips with ID "
|
||||
"{0}".format(network_name_or_id)
|
||||
"{}".format(network_name_or_id)
|
||||
)
|
||||
network_id = network['id']
|
||||
else:
|
||||
@ -516,7 +516,7 @@ class FloatingIPCloudMixin:
|
||||
|
||||
def _nova_create_floating_ip(self, pool=None):
|
||||
with _utils.openstacksdk_exceptions(
|
||||
"Unable to create floating IP in pool {pool}".format(pool=pool)
|
||||
f"Unable to create floating IP in pool {pool}"
|
||||
):
|
||||
if pool is None:
|
||||
pools = self.list_floating_ip_pools()
|
||||
@ -599,9 +599,7 @@ class FloatingIPCloudMixin:
|
||||
def _nova_delete_floating_ip(self, floating_ip_id):
|
||||
try:
|
||||
proxy._json_response(
|
||||
self.compute.delete(
|
||||
'/os-floating-ips/{id}'.format(id=floating_ip_id)
|
||||
),
|
||||
self.compute.delete(f'/os-floating-ips/{floating_ip_id}'),
|
||||
error_message='Unable to delete floating IP {fip_id}'.format(
|
||||
fip_id=floating_ip_id
|
||||
),
|
||||
@ -738,7 +736,7 @@ class FloatingIPCloudMixin:
|
||||
)
|
||||
if not port:
|
||||
raise exceptions.SDKException(
|
||||
"unable to find a port for server {0}".format(server['id'])
|
||||
"unable to find a port for server {}".format(server['id'])
|
||||
)
|
||||
|
||||
floating_ip_args = {'port_id': port['id']}
|
||||
@ -753,7 +751,7 @@ class FloatingIPCloudMixin:
|
||||
f_ip = self.get_floating_ip(id=floating_ip_id)
|
||||
if f_ip is None:
|
||||
raise exceptions.SDKException(
|
||||
"unable to find floating IP {0}".format(floating_ip_id)
|
||||
f"unable to find floating IP {floating_ip_id}"
|
||||
)
|
||||
error_message = "Error attaching IP {ip} to instance {id}".format(
|
||||
ip=floating_ip_id, id=server_id
|
||||
@ -763,7 +761,7 @@ class FloatingIPCloudMixin:
|
||||
body['fixed_address'] = fixed_address
|
||||
return proxy._json_response(
|
||||
self.compute.post(
|
||||
'/servers/{server_id}/action'.format(server_id=server_id),
|
||||
f'/servers/{server_id}/action',
|
||||
json=dict(addFloatingIp=body),
|
||||
),
|
||||
error_message=error_message,
|
||||
@ -806,11 +804,9 @@ class FloatingIPCloudMixin:
|
||||
self.network.update_ip(floating_ip_id, port_id=None)
|
||||
except exceptions.SDKException:
|
||||
raise exceptions.SDKException(
|
||||
(
|
||||
"Error detaching IP {ip} from "
|
||||
"server {server_id}".format(
|
||||
ip=floating_ip_id, server_id=server_id
|
||||
)
|
||||
"Error detaching IP {ip} from "
|
||||
"server {server_id}".format(
|
||||
ip=floating_ip_id, server_id=server_id
|
||||
)
|
||||
)
|
||||
|
||||
@ -820,14 +816,14 @@ class FloatingIPCloudMixin:
|
||||
f_ip = self.get_floating_ip(id=floating_ip_id)
|
||||
if f_ip is None:
|
||||
raise exceptions.SDKException(
|
||||
"unable to find floating IP {0}".format(floating_ip_id)
|
||||
f"unable to find floating IP {floating_ip_id}"
|
||||
)
|
||||
error_message = "Error detaching IP {ip} from instance {id}".format(
|
||||
ip=floating_ip_id, id=server_id
|
||||
)
|
||||
return proxy._json_response(
|
||||
self.compute.post(
|
||||
'/servers/{server_id}/action'.format(server_id=server_id),
|
||||
f'/servers/{server_id}/action',
|
||||
json=dict(
|
||||
removeFloatingIp=dict(address=f_ip['floating_ip_address'])
|
||||
),
|
||||
@ -1222,7 +1218,7 @@ class FloatingIPCloudMixin:
|
||||
return port, fixed_address
|
||||
raise exceptions.SDKException(
|
||||
"unable to find a free fixed IPv4 address for server "
|
||||
"{0}".format(server['id'])
|
||||
"{}".format(server['id'])
|
||||
)
|
||||
# unfortunately a port can have more than one fixed IP:
|
||||
# we can't use the search_ports filtering for fixed_address as
|
||||
|
@ -384,31 +384,25 @@ class IdentityCloudMixin:
|
||||
try:
|
||||
user = self.get_user(name_or_id, **kwargs)
|
||||
if not user:
|
||||
self.log.debug(
|
||||
"User {0} not found for deleting".format(name_or_id)
|
||||
)
|
||||
self.log.debug(f"User {name_or_id} not found for deleting")
|
||||
return False
|
||||
|
||||
self.identity.delete_user(user)
|
||||
return True
|
||||
|
||||
except exceptions.SDKException:
|
||||
self.log.exception(
|
||||
"Error in deleting user {user}".format(user=name_or_id)
|
||||
)
|
||||
self.log.exception(f"Error in deleting user {name_or_id}")
|
||||
return False
|
||||
|
||||
def _get_user_and_group(self, user_name_or_id, group_name_or_id):
|
||||
user = self.get_user(user_name_or_id)
|
||||
if not user:
|
||||
raise exceptions.SDKException(
|
||||
'User {user} not found'.format(user=user_name_or_id)
|
||||
)
|
||||
raise exceptions.SDKException(f'User {user_name_or_id} not found')
|
||||
|
||||
group = self.get_group(group_name_or_id)
|
||||
if not group:
|
||||
raise exceptions.SDKException(
|
||||
'Group {user} not found'.format(user=group_name_or_id)
|
||||
f'Group {group_name_or_id} not found'
|
||||
)
|
||||
|
||||
return (user, group)
|
||||
@ -731,7 +725,7 @@ class IdentityCloudMixin:
|
||||
self.identity.delete_endpoint(id)
|
||||
return True
|
||||
except exceptions.SDKException:
|
||||
self.log.exception("Failed to delete endpoint {id}".format(id=id))
|
||||
self.log.exception(f"Failed to delete endpoint {id}")
|
||||
return False
|
||||
|
||||
def create_domain(self, name, description=None, enabled=True):
|
||||
@ -778,7 +772,7 @@ class IdentityCloudMixin:
|
||||
dom = self.get_domain(None, name_or_id)
|
||||
if dom is None:
|
||||
raise exceptions.SDKException(
|
||||
"Domain {0} not found for updating".format(name_or_id)
|
||||
f"Domain {name_or_id} not found for updating"
|
||||
)
|
||||
domain_id = dom['id']
|
||||
|
||||
@ -1006,7 +1000,7 @@ class IdentityCloudMixin:
|
||||
group = self.identity.find_group(name_or_id, **kwargs)
|
||||
if group is None:
|
||||
raise exceptions.SDKException(
|
||||
"Group {0} not found for updating".format(name_or_id)
|
||||
f"Group {name_or_id} not found for updating"
|
||||
)
|
||||
|
||||
group_ref = {}
|
||||
@ -1039,9 +1033,7 @@ class IdentityCloudMixin:
|
||||
return True
|
||||
|
||||
except exceptions.SDKException:
|
||||
self.log.exception(
|
||||
"Unable to delete group {name}".format(name=name_or_id)
|
||||
)
|
||||
self.log.exception(f"Unable to delete group {name_or_id}")
|
||||
return False
|
||||
|
||||
def list_roles(self, **kwargs):
|
||||
@ -1235,9 +1227,7 @@ class IdentityCloudMixin:
|
||||
self.identity.delete_role(role)
|
||||
return True
|
||||
except exceptions.SDKExceptions:
|
||||
self.log.exception(
|
||||
"Unable to delete role {name}".format(name=name_or_id)
|
||||
)
|
||||
self.log.exception(f"Unable to delete role {name_or_id}")
|
||||
raise
|
||||
|
||||
def _get_grant_revoke_params(
|
||||
@ -1261,7 +1251,7 @@ class IdentityCloudMixin:
|
||||
|
||||
data['role'] = self.identity.find_role(name_or_id=role)
|
||||
if not data['role']:
|
||||
raise exceptions.SDKException('Role {0} not found.'.format(role))
|
||||
raise exceptions.SDKException(f'Role {role} not found.')
|
||||
|
||||
if user:
|
||||
# use cloud.get_user to save us from bad searching by name
|
||||
|
@ -170,7 +170,7 @@ class ImageCloudMixin:
|
||||
return image
|
||||
elif image['status'] == 'error':
|
||||
raise exceptions.SDKException(
|
||||
'Image {image} hit error state'.format(image=image_id)
|
||||
f'Image {image_id} hit error state'
|
||||
)
|
||||
|
||||
def delete_image(
|
||||
|
@ -2772,7 +2772,7 @@ class NetworkCloudMixin:
|
||||
port = self.get_port(name_or_id=name_or_id)
|
||||
if port is None:
|
||||
raise exceptions.SDKException(
|
||||
"failed to find port '{port}'".format(port=name_or_id)
|
||||
f"failed to find port '{name_or_id}'"
|
||||
)
|
||||
|
||||
return self.network.update_port(port, **kwargs)
|
||||
@ -2813,7 +2813,7 @@ class NetworkCloudMixin:
|
||||
port = self.get_port(name_or_id, filters)
|
||||
if not port:
|
||||
raise exceptions.ResourceNotFound(
|
||||
'Port {id} not found'.format(id=name_or_id)
|
||||
f'Port {name_or_id} not found'
|
||||
)
|
||||
ids_list.append(port['id'])
|
||||
return ids_list
|
||||
|
@ -453,10 +453,9 @@ class ObjectStoreCloudMixin:
|
||||
error.
|
||||
"""
|
||||
try:
|
||||
for ret in self.object_store.stream_object(
|
||||
yield from self.object_store.stream_object(
|
||||
obj, container, chunk_size=resp_chunk_size
|
||||
):
|
||||
yield ret
|
||||
)
|
||||
except exceptions.ResourceNotFound:
|
||||
return
|
||||
|
||||
|
@ -419,9 +419,7 @@ class SecurityGroupCloudMixin:
|
||||
else:
|
||||
try:
|
||||
exceptions.raise_from_response(
|
||||
self.compute.delete(
|
||||
'/os-security-group-rules/{id}'.format(id=rule_id)
|
||||
)
|
||||
self.compute.delete(f'/os-security-group-rules/{rule_id}')
|
||||
)
|
||||
except exceptions.NotFoundException:
|
||||
return False
|
||||
|
@ -389,9 +389,7 @@ def range_filter(data, key, range_exp):
|
||||
|
||||
# If parsing the range fails, it must be a bad value.
|
||||
if val_range is None:
|
||||
raise exceptions.SDKException(
|
||||
"Invalid range value: {value}".format(value=range_exp)
|
||||
)
|
||||
raise exceptions.SDKException(f"Invalid range value: {range_exp}")
|
||||
|
||||
op = val_range[0]
|
||||
if op:
|
||||
|
@ -28,7 +28,7 @@ class OpenStackCloudUnavailableFeature(OpenStackCloudException):
|
||||
# Backwards compat. These are deprecated and should not be used in new code.
|
||||
class OpenStackCloudCreateException(OpenStackCloudException):
|
||||
def __init__(self, resource, resource_id, extra_data=None, **kwargs):
|
||||
super(OpenStackCloudCreateException, self).__init__(
|
||||
super().__init__(
|
||||
message="Error creating {resource}: {resource_id}".format(
|
||||
resource=resource, resource_id=resource_id
|
||||
),
|
||||
|
@ -265,7 +265,7 @@ def find_best_address(addresses, public=False, cloud_public=True):
|
||||
connect_socket.settimeout(1)
|
||||
connect_socket.connect(sa)
|
||||
return address
|
||||
except socket.error:
|
||||
except OSError:
|
||||
# Sometimes a "no route to address" type error
|
||||
# will fail fast, but can often come alive
|
||||
# when retried.
|
||||
@ -370,7 +370,7 @@ def get_groups_from_server(cloud, server, server_vars):
|
||||
groups.append(region)
|
||||
|
||||
# And one by cloud_region
|
||||
groups.append("%s_%s" % (cloud_name, region))
|
||||
groups.append(f"{cloud_name}_{region}")
|
||||
|
||||
# Check if group metadata key in servers' metadata
|
||||
group = server['metadata'].get('group')
|
||||
@ -385,17 +385,17 @@ def get_groups_from_server(cloud, server, server_vars):
|
||||
|
||||
for key in ('flavor', 'image'):
|
||||
if 'name' in server_vars[key]:
|
||||
groups.append('%s-%s' % (key, server_vars[key]['name']))
|
||||
groups.append('{}-{}'.format(key, server_vars[key]['name']))
|
||||
|
||||
for key, value in iter(server['metadata'].items()):
|
||||
groups.append('meta-%s_%s' % (key, value))
|
||||
groups.append(f'meta-{key}_{value}')
|
||||
|
||||
az = server_vars.get('az', None)
|
||||
if az:
|
||||
# Make groups for az, region_az and cloud_region_az
|
||||
groups.append(az)
|
||||
groups.append('%s_%s' % (region, az))
|
||||
groups.append('%s_%s_%s' % (cloud.name, region, az))
|
||||
groups.append(f'{region}_{az}')
|
||||
groups.append(f'{cloud.name}_{region}_{az}')
|
||||
return groups
|
||||
|
||||
|
||||
|
@ -64,7 +64,7 @@ class _OpenStackCloudMixin:
|
||||
_SHADE_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-shade-autocreated'
|
||||
|
||||
def __init__(self):
|
||||
super(_OpenStackCloudMixin, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
self.log = _log.setup_logging('openstack')
|
||||
|
||||
@ -172,10 +172,10 @@ class _OpenStackCloudMixin:
|
||||
name_key = 'username'
|
||||
else:
|
||||
name_key = 'project_name'
|
||||
id_key = '{prefix}_id'.format(prefix=prefix)
|
||||
id_key = f'{prefix}_id'
|
||||
pop_keys(params, kwargs, name_key, id_key)
|
||||
id_key = '{prefix}_domain_id'.format(prefix=prefix)
|
||||
name_key = '{prefix}_domain_name'.format(prefix=prefix)
|
||||
id_key = f'{prefix}_domain_id'
|
||||
name_key = f'{prefix}_domain_name'
|
||||
pop_keys(params, kwargs, name_key, id_key)
|
||||
|
||||
for key, value in kwargs.items():
|
||||
@ -282,14 +282,14 @@ class _OpenStackCloudMixin:
|
||||
if namespace is None:
|
||||
name_key = self.name
|
||||
else:
|
||||
name_key = '%s:%s' % (self.name, namespace)
|
||||
name_key = f'{self.name}:{namespace}'
|
||||
|
||||
def generate_key(*args, **kwargs):
|
||||
# TODO(frickler): make handling arg keys actually work
|
||||
arg_key = ''
|
||||
kw_keys = sorted(kwargs.keys())
|
||||
kwargs_key = ','.join(
|
||||
['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache']
|
||||
[f'{k}:{kwargs[k]}' for k in kw_keys if k != 'cache']
|
||||
)
|
||||
ans = "_".join([str(name_key), fname, arg_key, kwargs_key])
|
||||
return ans
|
||||
|
@ -57,7 +57,7 @@ class QuotaSet(resource.Resource):
|
||||
error_message=None,
|
||||
**params
|
||||
):
|
||||
return super(QuotaSet, self).fetch(
|
||||
return super().fetch(
|
||||
session,
|
||||
requires_id=False,
|
||||
base_path=base_path,
|
||||
|
@ -103,7 +103,7 @@ class Flavor(resource.Resource):
|
||||
# is_public is ternary - None means give all flavors.
|
||||
# Force it to string to avoid requests skipping it.
|
||||
params['is_public'] = 'None'
|
||||
return super(Flavor, cls).list(
|
||||
return super().list(
|
||||
session, paginated=paginated, base_path=base_path, **params
|
||||
)
|
||||
|
||||
|
@ -60,7 +60,7 @@ class Keypair(resource.Resource):
|
||||
# it **SOMETIMES** keypair picks up id and not name. This is a hammer.
|
||||
if 'id' in attrs:
|
||||
attrs.setdefault('name', attrs.pop('id'))
|
||||
return super(Keypair, self)._consume_attrs(mapping, attrs)
|
||||
return super()._consume_attrs(mapping, attrs)
|
||||
|
||||
@classmethod
|
||||
def existing(cls, connection=None, **kwargs):
|
||||
|
@ -113,7 +113,7 @@ class Limits(resource.Resource):
|
||||
"""
|
||||
# TODO(mordred) We shouldn't have to subclass just to declare
|
||||
# requires_id = False.
|
||||
return super(Limits, self).fetch(
|
||||
return super().fetch(
|
||||
session=session,
|
||||
requires_id=requires_id,
|
||||
error_message=error_message,
|
||||
|
@ -268,7 +268,7 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
|
||||
base_path=None,
|
||||
**kwargs,
|
||||
):
|
||||
request = super(Server, self)._prepare_request(
|
||||
request = super()._prepare_request(
|
||||
requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
base_path=base_path,
|
||||
|
@ -55,6 +55,6 @@ class ServerRemoteConsole(resource.Resource):
|
||||
raise ValueError(
|
||||
'Console type webmks is not supported on server side'
|
||||
)
|
||||
return super(ServerRemoteConsole, self).create(
|
||||
return super().create(
|
||||
session, prepend_key=prepend_key, base_path=base_path, **params
|
||||
)
|
||||
|
@ -84,12 +84,12 @@ class Service(resource.Resource):
|
||||
if ignore_missing:
|
||||
return None
|
||||
raise exceptions.ResourceNotFound(
|
||||
"No %s found for %s" % (cls.__name__, name_or_id)
|
||||
f"No {cls.__name__} found for {name_or_id}"
|
||||
)
|
||||
|
||||
def commit(self, session, prepend_key=False, **kwargs):
|
||||
# we need to set prepend_key to false
|
||||
return super(Service, self).commit(
|
||||
return super().commit(
|
||||
session,
|
||||
prepend_key=prepend_key,
|
||||
**kwargs,
|
||||
|
@ -19,5 +19,5 @@ from openstack.config import cloud_region
|
||||
|
||||
class CloudConfig(cloud_region.CloudRegion):
|
||||
def __init__(self, name, region, config, **kwargs):
|
||||
super(CloudConfig, self).__init__(name, region, config, **kwargs)
|
||||
super().__init__(name, region, config, **kwargs)
|
||||
self.region = region
|
||||
|
@ -76,7 +76,7 @@ def _make_key(key, service_type):
|
||||
|
||||
def _disable_service(config, service_type, reason=None):
|
||||
service_type = service_type.lower().replace('-', '_')
|
||||
key = 'has_{service_type}'.format(service_type=service_type)
|
||||
key = f'has_{service_type}'
|
||||
config[key] = False
|
||||
if reason:
|
||||
d_key = _make_key('disabled_reason', service_type)
|
||||
@ -1217,7 +1217,7 @@ class CloudRegion:
|
||||
|
||||
def has_service(self, service_type):
|
||||
service_type = service_type.lower().replace('-', '_')
|
||||
key = 'has_{service_type}'.format(service_type=service_type)
|
||||
key = f'has_{service_type}'
|
||||
return self.config.get(
|
||||
key, self._service_type_manager.is_official(service_type)
|
||||
)
|
||||
@ -1227,7 +1227,7 @@ class CloudRegion:
|
||||
|
||||
def enable_service(self, service_type):
|
||||
service_type = service_type.lower().replace('-', '_')
|
||||
key = 'has_{service_type}'.format(service_type=service_type)
|
||||
key = f'has_{service_type}'
|
||||
self.config[key] = True
|
||||
|
||||
def get_disabled_reason(self, service_type):
|
||||
|
@ -46,7 +46,7 @@ def get_defaults(json_path=_json_path):
|
||||
cert=None,
|
||||
key=None,
|
||||
)
|
||||
with open(json_path, 'r') as json_file:
|
||||
with open(json_path) as json_file:
|
||||
updates = json.load(json_file)
|
||||
if updates is not None:
|
||||
tmp_defaults.update(updates)
|
||||
|
@ -411,15 +411,13 @@ class OpenStackConfig:
|
||||
ret[newkey] = os.environ[k]
|
||||
# If the only environ keys are selectors or behavior modification,
|
||||
# don't return anything
|
||||
selectors = set(
|
||||
[
|
||||
'OS_CLOUD',
|
||||
'OS_REGION_NAME',
|
||||
'OS_CLIENT_CONFIG_FILE',
|
||||
'OS_CLIENT_SECURE_FILE',
|
||||
'OS_CLOUD_NAME',
|
||||
]
|
||||
)
|
||||
selectors = {
|
||||
'OS_CLOUD',
|
||||
'OS_REGION_NAME',
|
||||
'OS_CLIENT_CONFIG_FILE',
|
||||
'OS_CLIENT_SECURE_FILE',
|
||||
'OS_CLOUD_NAME',
|
||||
}
|
||||
if set(environkeys) - selectors:
|
||||
return ret
|
||||
return None
|
||||
@ -456,12 +454,12 @@ class OpenStackConfig:
|
||||
for path in filelist:
|
||||
if os.path.exists(path):
|
||||
try:
|
||||
with open(path, 'r') as f:
|
||||
with open(path) as f:
|
||||
if path.endswith('json'):
|
||||
return path, json.load(f)
|
||||
else:
|
||||
return path, yaml.safe_load(f)
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
if e.errno == errno.EACCES:
|
||||
# Can't access file so let's continue to the next
|
||||
# file
|
||||
@ -560,9 +558,7 @@ class OpenStackConfig:
|
||||
|
||||
# Only validate cloud name if one was given
|
||||
if name and name not in self.cloud_config['clouds']:
|
||||
raise exceptions.ConfigException(
|
||||
"Cloud {name} was not found.".format(name=name)
|
||||
)
|
||||
raise exceptions.ConfigException(f"Cloud {name} was not found.")
|
||||
|
||||
our_cloud = self.cloud_config['clouds'].get(name, dict())
|
||||
if profile:
|
||||
@ -1440,7 +1436,7 @@ class OpenStackConfig:
|
||||
try:
|
||||
with open(config_file) as fh:
|
||||
cur_config = yaml.safe_load(fh)
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
# Not no such file
|
||||
if e.errno != 2:
|
||||
raise
|
||||
|
4
openstack/config/vendors/__init__.py
vendored
4
openstack/config/vendors/__init__.py
vendored
@ -33,11 +33,11 @@ def _get_vendor_defaults():
|
||||
global _VENDOR_DEFAULTS
|
||||
if not _VENDOR_DEFAULTS:
|
||||
for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.yaml')):
|
||||
with open(vendor, 'r') as f:
|
||||
with open(vendor) as f:
|
||||
vendor_data = yaml.safe_load(f)
|
||||
_VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile']
|
||||
for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.json')):
|
||||
with open(vendor, 'r') as f:
|
||||
with open(vendor) as f:
|
||||
vendor_data = json.load(f)
|
||||
_VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile']
|
||||
return _VENDOR_DEFAULTS
|
||||
|
@ -69,7 +69,7 @@ class Resource(resource.Resource):
|
||||
if ignore_missing:
|
||||
return None
|
||||
raise exceptions.ResourceNotFound(
|
||||
"No %s found for %s" % (cls.__name__, name_or_id)
|
||||
f"No {cls.__name__} found for {name_or_id}"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
|
@ -29,21 +29,21 @@ class SDKException(Exception):
|
||||
def __init__(self, message=None, extra_data=None):
|
||||
self.message = self.__class__.__name__ if message is None else message
|
||||
self.extra_data = extra_data
|
||||
super(SDKException, self).__init__(self.message)
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
class EndpointNotFound(SDKException):
|
||||
"""A mismatch occurred between what the client and server expect."""
|
||||
|
||||
def __init__(self, message=None):
|
||||
super(EndpointNotFound, self).__init__(message)
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class InvalidResponse(SDKException):
|
||||
"""The response from the server is not valid for this request."""
|
||||
|
||||
def __init__(self, response):
|
||||
super(InvalidResponse, self).__init__()
|
||||
super().__init__()
|
||||
self.response = response
|
||||
|
||||
|
||||
@ -51,7 +51,7 @@ class InvalidRequest(SDKException):
|
||||
"""The request to the server is not valid."""
|
||||
|
||||
def __init__(self, message=None):
|
||||
super(InvalidRequest, self).__init__(message)
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class HttpException(SDKException, _rex.HTTPError):
|
||||
@ -111,7 +111,7 @@ class HttpException(SDKException, _rex.HTTPError):
|
||||
remote_error += str(self.details)
|
||||
|
||||
return "{message}: {remote_error}".format(
|
||||
message=super(HttpException, self).__str__(),
|
||||
message=super().__str__(),
|
||||
remote_error=remote_error,
|
||||
)
|
||||
|
||||
@ -142,12 +142,12 @@ class MethodNotSupported(SDKException):
|
||||
except AttributeError:
|
||||
name = resource.__class__.__name__
|
||||
|
||||
message = 'The %s method is not supported for %s.%s' % (
|
||||
message = 'The {} method is not supported for {}.{}'.format(
|
||||
method,
|
||||
resource.__module__,
|
||||
name,
|
||||
)
|
||||
super(MethodNotSupported, self).__init__(message=message)
|
||||
super().__init__(message=message)
|
||||
|
||||
|
||||
class DuplicateResource(SDKException):
|
||||
|
@ -46,7 +46,7 @@ class ConnectionFixture(fixtures.Fixture):
|
||||
}
|
||||
|
||||
def __init__(self, suburl=False, project_id=None, *args, **kwargs):
|
||||
super(ConnectionFixture, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
self._endpoint_templates = _ENDPOINT_TEMPLATES
|
||||
if suburl:
|
||||
self.use_suburl()
|
||||
|
@ -22,7 +22,7 @@ def _verify_checksum(md5, checksum):
|
||||
digest = md5.hexdigest()
|
||||
if digest != checksum:
|
||||
raise exceptions.InvalidResponse(
|
||||
"checksum mismatch: %s != %s" % (checksum, digest)
|
||||
f"checksum mismatch: {checksum} != {digest}"
|
||||
)
|
||||
|
||||
|
||||
|
@ -295,7 +295,7 @@ class Proxy(proxy.Proxy):
|
||||
image = self._connection._get_and_munchify(
|
||||
'image',
|
||||
self.put(
|
||||
'/images/{id}'.format(id=image.id),
|
||||
f'/images/{image.id}',
|
||||
headers=headers,
|
||||
data=image_data,
|
||||
),
|
||||
@ -303,7 +303,7 @@ class Proxy(proxy.Proxy):
|
||||
except exc.HttpException:
|
||||
self.log.debug("Deleting failed upload of image %s", name)
|
||||
try:
|
||||
self.delete('/images/{id}'.format(id=image.id))
|
||||
self.delete(f'/images/{image.id}')
|
||||
except exc.HttpException:
|
||||
# We're just trying to clean up - if it doesn't work - shrug
|
||||
self.log.warning(
|
||||
@ -434,10 +434,10 @@ class Proxy(proxy.Proxy):
|
||||
img_props = {}
|
||||
for k, v in iter(properties.items()):
|
||||
if image.properties.get(k, None) != v:
|
||||
img_props['x-image-meta-{key}'.format(key=k)] = v
|
||||
img_props[f'x-image-meta-{k}'] = v
|
||||
if not img_props:
|
||||
return False
|
||||
self.put('/images/{id}'.format(id=image.id), headers=img_props)
|
||||
self.put(f'/images/{image.id}', headers=img_props)
|
||||
return True
|
||||
|
||||
def update_image_properties(
|
||||
@ -469,7 +469,7 @@ class Proxy(proxy.Proxy):
|
||||
for k, v in iter(kwargs.items()):
|
||||
if v and k in ['ramdisk', 'kernel']:
|
||||
v = self._connection.get_image_id(v)
|
||||
k = '{0}_id'.format(k)
|
||||
k = f'{k}_id'
|
||||
img_props[k] = v
|
||||
|
||||
return self._update_image_properties(image, meta, img_props)
|
||||
|
@ -135,5 +135,5 @@ class Image(resource.Resource, _download.DownloadMixin):
|
||||
if ignore_missing:
|
||||
return None
|
||||
raise exceptions.ResourceNotFound(
|
||||
"No %s found for %s" % (cls.__name__, name_or_id)
|
||||
f"No {cls.__name__} found for {name_or_id}"
|
||||
)
|
||||
|
@ -598,9 +598,7 @@ class Proxy(proxy.Proxy):
|
||||
self.log.debug("Image creation failed", exc_info=True)
|
||||
raise
|
||||
except Exception as e:
|
||||
raise exceptions.SDKException(
|
||||
"Image creation failed: {message}".format(message=str(e))
|
||||
)
|
||||
raise exceptions.SDKException(f"Image creation failed: {str(e)}")
|
||||
|
||||
def _make_v2_image_params(self, meta, properties):
|
||||
ret: ty.Dict = {}
|
||||
@ -949,7 +947,7 @@ class Proxy(proxy.Proxy):
|
||||
for k, v in iter(kwargs.items()):
|
||||
if v and k in ['ramdisk', 'kernel']:
|
||||
v = self._connection.get_image_id(v)
|
||||
k = '{0}_id'.format(k)
|
||||
k = f'{k}_id'
|
||||
properties[k] = v
|
||||
|
||||
img_props = image.properties.copy()
|
||||
@ -1840,7 +1838,7 @@ class Proxy(proxy.Proxy):
|
||||
if task.status.lower() == status.lower():
|
||||
return task
|
||||
|
||||
name = "{res}:{id}".format(res=task.__class__.__name__, id=task.id)
|
||||
name = f"{task.__class__.__name__}:{task.id}"
|
||||
msg = "Timeout waiting for {name} to transition to {status}".format(
|
||||
name=name, status=status
|
||||
)
|
||||
|
@ -385,7 +385,7 @@ class Image(resource.Resource, tag.TagMixin, _download.DownloadMixin):
|
||||
base_path=None,
|
||||
**kwargs,
|
||||
):
|
||||
request = super(Image, self)._prepare_request(
|
||||
request = super()._prepare_request(
|
||||
requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
patch=patch,
|
||||
@ -403,7 +403,7 @@ class Image(resource.Resource, tag.TagMixin, _download.DownloadMixin):
|
||||
@classmethod
|
||||
def find(cls, session, name_or_id, ignore_missing=True, **params):
|
||||
# Do a regular search first (ignoring missing)
|
||||
result = super(Image, cls).find(session, name_or_id, True, **params)
|
||||
result = super().find(session, name_or_id, True, **params)
|
||||
|
||||
if result:
|
||||
return result
|
||||
@ -419,5 +419,5 @@ class Image(resource.Resource, tag.TagMixin, _download.DownloadMixin):
|
||||
if ignore_missing:
|
||||
return None
|
||||
raise exceptions.ResourceNotFound(
|
||||
"No %s found for %s" % (cls.__name__, name_or_id)
|
||||
f"No {cls.__name__} found for {name_or_id}"
|
||||
)
|
||||
|
@ -114,9 +114,7 @@ class AmphoraConfig(resource.Resource):
|
||||
# The default _update code path also has no
|
||||
# way to pass has_body into this function, so overriding the method here.
|
||||
def commit(self, session, base_path=None):
|
||||
return super(AmphoraConfig, self).commit(
|
||||
session, base_path=base_path, has_body=False
|
||||
)
|
||||
return super().commit(session, base_path=base_path, has_body=False)
|
||||
|
||||
|
||||
class AmphoraFailover(resource.Resource):
|
||||
@ -139,6 +137,4 @@ class AmphoraFailover(resource.Resource):
|
||||
# The default _update code path also has no
|
||||
# way to pass has_body into this function, so overriding the method here.
|
||||
def commit(self, session, base_path=None):
|
||||
return super(AmphoraFailover, self).commit(
|
||||
session, base_path=base_path, has_body=False
|
||||
)
|
||||
return super().commit(session, base_path=base_path, has_body=False)
|
||||
|
@ -146,6 +146,4 @@ class LoadBalancerFailover(resource.Resource):
|
||||
# The default _update code path also has no
|
||||
# way to pass has_body into this function, so overriding the method here.
|
||||
def commit(self, session, base_path=None):
|
||||
return super(LoadBalancerFailover, self).commit(
|
||||
session, base_path=base_path, has_body=False
|
||||
)
|
||||
return super().commit(session, base_path=base_path, has_body=False)
|
||||
|
@ -44,7 +44,7 @@ class Quota(resource.Resource):
|
||||
def _prepare_request(
|
||||
self, requires_id=True, base_path=None, prepend_key=False, **kwargs
|
||||
):
|
||||
_request = super(Quota, self)._prepare_request(
|
||||
_request = super()._prepare_request(
|
||||
requires_id, prepend_key, base_path=base_path
|
||||
)
|
||||
if self.resource_key in _request.body:
|
||||
|
@ -56,7 +56,7 @@ class Claim(resource.Resource):
|
||||
project_id = resource.Header("X-PROJECT-ID")
|
||||
|
||||
def _translate_response(self, response, has_body=True):
|
||||
super(Claim, self)._translate_response(response, has_body=has_body)
|
||||
super()._translate_response(response, has_body=has_body)
|
||||
if has_body and self.location:
|
||||
# Extract claim ID from location
|
||||
self.id = self.location.split("claims/")[1]
|
||||
|
@ -28,7 +28,7 @@ class NetworkResource(resource.Resource):
|
||||
if_revision=None,
|
||||
**kwargs
|
||||
):
|
||||
req = super(NetworkResource, self)._prepare_request(
|
||||
req = super()._prepare_request(
|
||||
requires_id=requires_id,
|
||||
prepend_key=prepend_key,
|
||||
patch=patch,
|
||||
|
@ -63,9 +63,7 @@ class Quota(resource.Resource):
|
||||
def _prepare_request(
|
||||
self, requires_id=True, prepend_key=False, base_path=None, **kwargs
|
||||
):
|
||||
_request = super(Quota, self)._prepare_request(
|
||||
requires_id, prepend_key
|
||||
)
|
||||
_request = super()._prepare_request(requires_id, prepend_key)
|
||||
if self.resource_key in _request.body:
|
||||
_body = _request.body[self.resource_key]
|
||||
else:
|
||||
|
@ -96,9 +96,7 @@ class SecurityGroupRule(_base.NetworkResource, tag.TagMixin):
|
||||
updated_at = resource.Body('updated_at')
|
||||
|
||||
def _prepare_request(self, *args, **kwargs):
|
||||
_request = super(SecurityGroupRule, self)._prepare_request(
|
||||
*args, **kwargs
|
||||
)
|
||||
_request = super()._prepare_request(*args, **kwargs)
|
||||
# Old versions of Neutron do not handle being passed a
|
||||
# remote_address_group_id and raise and error. Remove it from
|
||||
# the body if it is blank.
|
||||
|
@ -97,7 +97,7 @@ class BaseResource(resource.Resource):
|
||||
# This must happen before invoking parent _translate_response, cause it
|
||||
# pops known headers.
|
||||
self._last_headers = response.headers.copy()
|
||||
super(BaseResource, self)._translate_response(
|
||||
super()._translate_response(
|
||||
response, has_body=has_body, error_message=error_message
|
||||
)
|
||||
self._set_metadata(response.headers)
|
||||
|
@ -652,7 +652,7 @@ class Proxy(proxy.Proxy):
|
||||
# While Object Storage usually expects the name to be
|
||||
# urlencoded in most requests, the SLO manifest requires
|
||||
# plain object names instead.
|
||||
path='/{name}'.format(name=parse.unquote(name)),
|
||||
path=f'/{parse.unquote(name)}',
|
||||
size_bytes=segment.length,
|
||||
)
|
||||
)
|
||||
@ -808,7 +808,7 @@ class Proxy(proxy.Proxy):
|
||||
continue
|
||||
name = self._object_name_from_url(result.url)
|
||||
for entry in manifest:
|
||||
if entry['path'] == '/{name}'.format(name=parse.unquote(name)):
|
||||
if entry['path'] == f'/{parse.unquote(name)}':
|
||||
entry['etag'] = result.headers['Etag']
|
||||
|
||||
def get_info(self):
|
||||
@ -931,7 +931,7 @@ class Proxy(proxy.Proxy):
|
||||
endpoint = parse.urlparse(self.get_endpoint())
|
||||
path = '/'.join([endpoint.path, res.name, object_prefix])
|
||||
|
||||
data = '%s\n%s\n%s\n%s\n%s' % (
|
||||
data = '{}\n{}\n{}\n{}\n{}'.format(
|
||||
path,
|
||||
redirect_url,
|
||||
max_file_size,
|
||||
@ -1067,7 +1067,7 @@ class Proxy(proxy.Proxy):
|
||||
raise ValueError('ip_range must be representable as UTF-8')
|
||||
hmac_parts.insert(0, "ip=%s" % ip_range)
|
||||
|
||||
hmac_body = u'\n'.join(hmac_parts)
|
||||
hmac_body = '\n'.join(hmac_parts)
|
||||
|
||||
temp_url_key = self._check_temp_url_key(temp_url_key=temp_url_key)
|
||||
|
||||
@ -1082,17 +1082,17 @@ class Proxy(proxy.Proxy):
|
||||
else:
|
||||
exp = str(expiration)
|
||||
|
||||
temp_url = u'{path}?temp_url_sig={sig}&temp_url_expires={exp}'.format(
|
||||
temp_url = '{path}?temp_url_sig={sig}&temp_url_expires={exp}'.format(
|
||||
path=path_for_body,
|
||||
sig=sig,
|
||||
exp=exp,
|
||||
)
|
||||
|
||||
if ip_range:
|
||||
temp_url += u'&temp_url_ip_range={}'.format(ip_range)
|
||||
temp_url += f'&temp_url_ip_range={ip_range}'
|
||||
|
||||
if prefix:
|
||||
temp_url += u'&temp_url_prefix={}'.format(parts[4])
|
||||
temp_url += f'&temp_url_prefix={parts[4]}'
|
||||
# Have return type match path from caller
|
||||
if isinstance(path, bytes):
|
||||
return temp_url.encode('utf-8')
|
||||
|
@ -72,10 +72,10 @@ def poll_for_events(
|
||||
return False
|
||||
|
||||
phys_id = event.get('physical_resource_id', '')
|
||||
links = dict(
|
||||
(link.get('rel'), link.get('href'))
|
||||
links = {
|
||||
link.get('rel'): link.get('href')
|
||||
for link in event.get('links', [])
|
||||
)
|
||||
}
|
||||
stack_id = links.get('stack', phys_id).rsplit('/', 1)[-1]
|
||||
return stack_id == phys_id
|
||||
|
||||
|
@ -30,13 +30,13 @@ def _construct_yaml_str(self, node):
|
||||
return self.construct_scalar(node)
|
||||
|
||||
|
||||
HeatYamlLoader.add_constructor(u'tag:yaml.org,2002:str', _construct_yaml_str)
|
||||
HeatYamlLoader.add_constructor('tag:yaml.org,2002:str', _construct_yaml_str)
|
||||
# Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type
|
||||
# datetime.data which causes problems in API layer when being processed by
|
||||
# openstack.common.jsonutils. Therefore, make unicode string out of timestamps
|
||||
# until jsonutils can handle dates.
|
||||
HeatYamlLoader.add_constructor(
|
||||
u'tag:yaml.org,2002:timestamp', _construct_yaml_str
|
||||
'tag:yaml.org,2002:timestamp', _construct_yaml_str
|
||||
)
|
||||
|
||||
|
||||
|
@ -58,7 +58,7 @@ class Proxy(proxy.Proxy):
|
||||
# (/stacks/name/id/everything_else), so if on third position we
|
||||
# have not a known part - discard it, not to brake further logic
|
||||
del url_parts[2]
|
||||
return super(Proxy, self)._extract_name_consume_url_parts(url_parts)
|
||||
return super()._extract_name_consume_url_parts(url_parts)
|
||||
|
||||
def read_env_and_templates(
|
||||
self,
|
||||
|
@ -48,6 +48,4 @@ class SoftwareConfig(resource.Resource):
|
||||
def create(self, session, base_path=None):
|
||||
# This overrides the default behavior of resource creation because
|
||||
# heat doesn't accept resource_key in its request.
|
||||
return super(SoftwareConfig, self).create(
|
||||
session, prepend_key=False, base_path=base_path
|
||||
)
|
||||
return super().create(session, prepend_key=False, base_path=base_path)
|
||||
|
@ -52,13 +52,9 @@ class SoftwareDeployment(resource.Resource):
|
||||
def create(self, session, base_path=None):
|
||||
# This overrides the default behavior of resource creation because
|
||||
# heat doesn't accept resource_key in its request.
|
||||
return super(SoftwareDeployment, self).create(
|
||||
session, prepend_key=False, base_path=base_path
|
||||
)
|
||||
return super().create(session, prepend_key=False, base_path=base_path)
|
||||
|
||||
def commit(self, session, base_path=None):
|
||||
# This overrides the default behavior of resource creation because
|
||||
# heat doesn't accept resource_key in its request.
|
||||
return super(SoftwareDeployment, self).commit(
|
||||
session, prepend_key=False, base_path=base_path
|
||||
)
|
||||
return super().commit(session, prepend_key=False, base_path=base_path)
|
||||
|
@ -36,7 +36,7 @@ class Stack(resource.Resource):
|
||||
'owner_id',
|
||||
'username',
|
||||
project_id='tenant_id',
|
||||
**tag.TagMixin._tag_query_parameters
|
||||
**tag.TagMixin._tag_query_parameters,
|
||||
)
|
||||
|
||||
# Properties
|
||||
@ -115,14 +115,12 @@ class Stack(resource.Resource):
|
||||
def create(self, session, base_path=None):
|
||||
# This overrides the default behavior of resource creation because
|
||||
# heat doesn't accept resource_key in its request.
|
||||
return super(Stack, self).create(
|
||||
session, prepend_key=False, base_path=base_path
|
||||
)
|
||||
return super().create(session, prepend_key=False, base_path=base_path)
|
||||
|
||||
def commit(self, session, base_path=None):
|
||||
# This overrides the default behavior of resource creation because
|
||||
# heat doesn't accept resource_key in its request.
|
||||
return super(Stack, self).commit(
|
||||
return super().commit(
|
||||
session, prepend_key=False, has_body=False, base_path=None
|
||||
)
|
||||
|
||||
@ -131,16 +129,16 @@ class Stack(resource.Resource):
|
||||
# we need to use other endpoint for update preview.
|
||||
base_path = None
|
||||
if self.name and self.id:
|
||||
base_path = '/stacks/%(stack_name)s/%(stack_id)s' % {
|
||||
'stack_name': self.name,
|
||||
'stack_id': self.id,
|
||||
}
|
||||
base_path = '/stacks/{stack_name}/{stack_id}'.format(
|
||||
stack_name=self.name,
|
||||
stack_id=self.id,
|
||||
)
|
||||
elif self.name or self.id:
|
||||
# We have only one of name/id. Do not try to build a stacks/NAME/ID
|
||||
# path
|
||||
base_path = '/stacks/%(stack_identity)s' % {
|
||||
'stack_identity': self.name or self.id
|
||||
}
|
||||
base_path = '/stacks/{stack_identity}'.format(
|
||||
stack_identity=self.name or self.id
|
||||
)
|
||||
request = self._prepare_request(
|
||||
prepend_key=False, requires_id=False, base_path=base_path
|
||||
)
|
||||
@ -290,7 +288,7 @@ class Stack(resource.Resource):
|
||||
if ignore_missing:
|
||||
return None
|
||||
raise exceptions.ResourceNotFound(
|
||||
"No %s found for %s" % (cls.__name__, name_or_id)
|
||||
f"No {cls.__name__} found for {name_or_id}"
|
||||
)
|
||||
|
||||
|
||||
|
@ -112,7 +112,7 @@ class Proxy(adapter.Adapter):
|
||||
self._influxdb_client = influxdb_client
|
||||
self._influxdb_config = influxdb_config
|
||||
if self.service_type:
|
||||
log_name = 'openstack.{0}'.format(self.service_type)
|
||||
log_name = f'openstack.{self.service_type}'
|
||||
else:
|
||||
log_name = 'openstack'
|
||||
self.log = _log.setup_logging(log_name)
|
||||
@ -333,7 +333,9 @@ class Proxy(adapter.Adapter):
|
||||
with self._statsd_client.pipeline() as pipe:
|
||||
if response is not None:
|
||||
duration = int(response.elapsed.total_seconds() * 1000)
|
||||
metric_name = '%s.%s' % (key, str(response.status_code))
|
||||
metric_name = '{}.{}'.format(
|
||||
key, str(response.status_code)
|
||||
)
|
||||
pipe.timing(metric_name, duration)
|
||||
pipe.incr(metric_name)
|
||||
if duration > 1000:
|
||||
@ -396,7 +398,7 @@ class Proxy(adapter.Adapter):
|
||||
tags['status_code'] = str(response.status_code)
|
||||
# Note(gtema): emit also status_code as a value (counter)
|
||||
fields[str(response.status_code)] = 1
|
||||
fields['%s.%s' % (method, response.status_code)] = 1
|
||||
fields[f'{method}.{response.status_code}'] = 1
|
||||
# Note(gtema): status_code field itself is also very helpful on the
|
||||
# graphs to show what was the code, instead of counting its
|
||||
# occurences
|
||||
@ -411,7 +413,7 @@ class Proxy(adapter.Adapter):
|
||||
else 'openstack_api'
|
||||
)
|
||||
# Note(gtema) append service name into the measurement name
|
||||
measurement = '%s.%s' % (measurement, self.service_type)
|
||||
measurement = f'{measurement}.{self.service_type}'
|
||||
data = [dict(measurement=measurement, tags=tags, fields=fields)]
|
||||
try:
|
||||
self._influxdb_client.write_points(data)
|
||||
|
@ -306,9 +306,7 @@ class _ComponentManager(collections.abc.MutableMapping):
|
||||
@property
|
||||
def dirty(self):
|
||||
"""Return a dict of modified attributes"""
|
||||
return dict(
|
||||
(key, self.attributes.get(key, None)) for key in self._dirty
|
||||
)
|
||||
return {key: self.attributes.get(key, None) for key in self._dirty}
|
||||
|
||||
def clean(self, only=None):
|
||||
"""Signal that the resource no longer has modified attributes.
|
||||
@ -610,7 +608,7 @@ class Resource(dict):
|
||||
|
||||
def __repr__(self):
|
||||
pairs = [
|
||||
"%s=%s" % (k, v if v is not None else 'None')
|
||||
"{}={}".format(k, v if v is not None else 'None')
|
||||
for k, v in dict(
|
||||
itertools.chain(
|
||||
self._body.attributes.items(),
|
||||
@ -622,7 +620,9 @@ class Resource(dict):
|
||||
]
|
||||
args = ", ".join(pairs)
|
||||
|
||||
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__, args)
|
||||
return "{}.{}({})".format(
|
||||
self.__module__, self.__class__.__name__, args
|
||||
)
|
||||
|
||||
def __eq__(self, comparand):
|
||||
"""Return True if another resource has the same contents"""
|
||||
@ -1406,7 +1406,7 @@ class Resource(dict):
|
||||
def _raise(message):
|
||||
if error_message:
|
||||
error_message.rstrip('.')
|
||||
message = '%s. %s' % (error_message, message)
|
||||
message = f'{error_message}. {message}'
|
||||
|
||||
raise exceptions.NotSupported(message)
|
||||
|
||||
@ -1868,7 +1868,7 @@ class Resource(dict):
|
||||
server_field = component.name
|
||||
|
||||
if len(parts) > 1:
|
||||
new_path = '/%s/%s' % (server_field, parts[1])
|
||||
new_path = f'/{server_field}/{parts[1]}'
|
||||
else:
|
||||
new_path = '/%s' % server_field
|
||||
converted.append(dict(item, path=new_path))
|
||||
@ -2172,7 +2172,7 @@ class Resource(dict):
|
||||
|
||||
if not pagination_key and cls.resources_key:
|
||||
# Nova has a {key}_links dict in the main body
|
||||
pagination_key = '{key}_links'.format(key=cls.resources_key)
|
||||
pagination_key = f'{cls.resources_key}_links'
|
||||
|
||||
if pagination_key:
|
||||
links = data.get(pagination_key, {})
|
||||
@ -2371,7 +2371,7 @@ class Resource(dict):
|
||||
return None
|
||||
|
||||
raise exceptions.ResourceNotFound(
|
||||
"No %s found for %s" % (cls.__name__, name_or_id)
|
||||
f"No {cls.__name__} found for {name_or_id}"
|
||||
)
|
||||
|
||||
|
||||
@ -2427,7 +2427,7 @@ def wait_for_status(
|
||||
failures = ['ERROR']
|
||||
|
||||
failures = [f.lower() for f in failures]
|
||||
name = "{res}:{id}".format(res=resource.__class__.__name__, id=resource.id)
|
||||
name = f"{resource.__class__.__name__}:{resource.id}"
|
||||
msg = "Timeout waiting for {name} to transition to {status}".format(
|
||||
name=name, status=status
|
||||
)
|
||||
|
@ -594,7 +594,7 @@ class Proxy(proxy.Proxy):
|
||||
return self._create(
|
||||
_share_network_subnet.ShareNetworkSubnet,
|
||||
**attrs,
|
||||
share_network_id=share_network_id
|
||||
share_network_id=share_network_id,
|
||||
)
|
||||
|
||||
def delete_share_network_subnet(
|
||||
@ -654,7 +654,7 @@ class Proxy(proxy.Proxy):
|
||||
return self._list(
|
||||
_share_snapshot_instance.ShareSnapshotInstance,
|
||||
base_path=base_path,
|
||||
**query
|
||||
**query,
|
||||
)
|
||||
|
||||
def get_share_snapshot_instance(self, snapshot_instance_id):
|
||||
@ -857,7 +857,7 @@ class Proxy(proxy.Proxy):
|
||||
:rtype: :class:`~openstack.shared_file_system.v2.
|
||||
share_access_rules.ShareAccessRules`
|
||||
"""
|
||||
base_path = "/shares/%s/action" % (share_id,)
|
||||
base_path = f"/shares/{share_id}/action"
|
||||
return self._create(
|
||||
_share_access_rule.ShareAccessRule, base_path=base_path, **attrs
|
||||
)
|
||||
@ -911,7 +911,7 @@ class Proxy(proxy.Proxy):
|
||||
return self._list(
|
||||
_share_group_snapshot.ShareGroupSnapshot,
|
||||
base_path=base_path,
|
||||
**query
|
||||
**query,
|
||||
)
|
||||
|
||||
def share_group_snapshot_members(self, group_snapshot_id):
|
||||
@ -956,7 +956,7 @@ class Proxy(proxy.Proxy):
|
||||
return self._create(
|
||||
_share_group_snapshot.ShareGroupSnapshot,
|
||||
share_group_id=share_group_id,
|
||||
**attrs
|
||||
**attrs,
|
||||
)
|
||||
|
||||
def reset_share_group_snapshot_status(self, group_snapshot_id, status):
|
||||
@ -985,7 +985,7 @@ class Proxy(proxy.Proxy):
|
||||
return self._update(
|
||||
_share_group_snapshot.ShareGroupSnapshot,
|
||||
group_snapshot_id,
|
||||
**attrs
|
||||
**attrs,
|
||||
)
|
||||
|
||||
def delete_share_group_snapshot(
|
||||
|
@ -103,7 +103,7 @@ def generate_fake_resource(
|
||||
base_attrs[name] = [uuid.uuid4().hex]
|
||||
else:
|
||||
# Everything else
|
||||
msg = "Fake value for %s.%s can not be generated" % (
|
||||
msg = "Fake value for {}.{} can not be generated".format(
|
||||
resource_type.__name__,
|
||||
name,
|
||||
)
|
||||
@ -130,7 +130,7 @@ def generate_fake_resource(
|
||||
base_attrs[name] = dict()
|
||||
else:
|
||||
# Everything else
|
||||
msg = "Fake value for %s.%s can not be generated" % (
|
||||
msg = "Fake value for {}.{} can not be generated".format(
|
||||
resource_type.__name__,
|
||||
name,
|
||||
)
|
||||
|
@ -96,9 +96,7 @@ class TestCase(base.BaseTestCase):
|
||||
first = first.toDict()
|
||||
if isinstance(second, utils.Munch):
|
||||
second = second.toDict()
|
||||
return super(TestCase, self).assertEqual(
|
||||
first, second, *args, **kwargs
|
||||
)
|
||||
return super().assertEqual(first, second, *args, **kwargs)
|
||||
|
||||
def printLogs(self, *args):
|
||||
self._log_stream.seek(0)
|
||||
@ -135,7 +133,9 @@ class TestCase(base.BaseTestCase):
|
||||
missing_keys.append(key)
|
||||
if missing_keys:
|
||||
self.fail(
|
||||
"Keys %s are in %s but not in %s" % (missing_keys, part, whole)
|
||||
"Keys {} are in {} but not in {}".format(
|
||||
missing_keys, part, whole
|
||||
)
|
||||
)
|
||||
wrong_values = [
|
||||
(key, part[key], whole[key])
|
||||
|
@ -27,9 +27,9 @@ from openstack.orchestration.util import template_format
|
||||
from openstack import utils
|
||||
|
||||
PROJECT_ID = '1c36b64c840a42cd9e9b931a369337f0'
|
||||
FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8dddd'
|
||||
CHOCOLATE_FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8ddde'
|
||||
STRAWBERRY_FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8dddf'
|
||||
FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddd'
|
||||
CHOCOLATE_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8ddde'
|
||||
STRAWBERRY_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddf'
|
||||
COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1'
|
||||
ORCHESTRATION_ENDPOINT = 'https://orchestration.example.com/v1/{p}'.format(
|
||||
p=PROJECT_ID
|
||||
@ -48,30 +48,30 @@ FAKE_PUBLIC_KEY = (
|
||||
|
||||
def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24):
|
||||
return {
|
||||
u'OS-FLV-DISABLED:disabled': False,
|
||||
u'OS-FLV-EXT-DATA:ephemeral': 0,
|
||||
u'disk': disk,
|
||||
u'id': flavor_id,
|
||||
u'links': [
|
||||
'OS-FLV-DISABLED:disabled': False,
|
||||
'OS-FLV-EXT-DATA:ephemeral': 0,
|
||||
'disk': disk,
|
||||
'id': flavor_id,
|
||||
'links': [
|
||||
{
|
||||
u'href': u'{endpoint}/flavors/{id}'.format(
|
||||
'href': '{endpoint}/flavors/{id}'.format(
|
||||
endpoint=COMPUTE_ENDPOINT, id=flavor_id
|
||||
),
|
||||
u'rel': u'self',
|
||||
'rel': 'self',
|
||||
},
|
||||
{
|
||||
u'href': u'{endpoint}/flavors/{id}'.format(
|
||||
'href': '{endpoint}/flavors/{id}'.format(
|
||||
endpoint=COMPUTE_ENDPOINT, id=flavor_id
|
||||
),
|
||||
u'rel': u'bookmark',
|
||||
'rel': 'bookmark',
|
||||
},
|
||||
],
|
||||
u'name': name,
|
||||
u'os-flavor-access:is_public': True,
|
||||
u'ram': ram,
|
||||
u'rxtx_factor': 1.0,
|
||||
u'swap': 0,
|
||||
u'vcpus': vcpus,
|
||||
'name': name,
|
||||
'os-flavor-access:is_public': True,
|
||||
'ram': ram,
|
||||
'rxtx_factor': 1.0,
|
||||
'swap': 0,
|
||||
'vcpus': vcpus,
|
||||
}
|
||||
|
||||
|
||||
@ -251,9 +251,9 @@ def make_fake_image(
|
||||
md5=NO_MD5,
|
||||
sha256=NO_SHA256,
|
||||
status='active',
|
||||
image_name=u'fake_image',
|
||||
image_name='fake_image',
|
||||
data=None,
|
||||
checksum=u'ee36e35a297980dee1b514de9803ec6d',
|
||||
checksum='ee36e35a297980dee1b514de9803ec6d',
|
||||
):
|
||||
if data:
|
||||
md5 = utils.md5(usedforsecurity=False)
|
||||
@ -265,34 +265,34 @@ def make_fake_image(
|
||||
md5 = md5.hexdigest()
|
||||
sha256 = sha256.hexdigest()
|
||||
return {
|
||||
u'image_state': u'available',
|
||||
u'container_format': u'bare',
|
||||
u'min_ram': 0,
|
||||
u'ramdisk_id': 'fake_ramdisk_id',
|
||||
u'updated_at': u'2016-02-10T05:05:02Z',
|
||||
u'file': '/v2/images/' + image_id + '/file',
|
||||
u'size': 3402170368,
|
||||
u'image_type': u'snapshot',
|
||||
u'disk_format': u'qcow2',
|
||||
u'id': image_id,
|
||||
u'schema': u'/v2/schemas/image',
|
||||
u'status': status,
|
||||
u'tags': [],
|
||||
u'visibility': u'private',
|
||||
u'locations': [
|
||||
{u'url': u'http://127.0.0.1/images/' + image_id, u'metadata': {}}
|
||||
'image_state': 'available',
|
||||
'container_format': 'bare',
|
||||
'min_ram': 0,
|
||||
'ramdisk_id': 'fake_ramdisk_id',
|
||||
'updated_at': '2016-02-10T05:05:02Z',
|
||||
'file': '/v2/images/' + image_id + '/file',
|
||||
'size': 3402170368,
|
||||
'image_type': 'snapshot',
|
||||
'disk_format': 'qcow2',
|
||||
'id': image_id,
|
||||
'schema': '/v2/schemas/image',
|
||||
'status': status,
|
||||
'tags': [],
|
||||
'visibility': 'private',
|
||||
'locations': [
|
||||
{'url': 'http://127.0.0.1/images/' + image_id, 'metadata': {}}
|
||||
],
|
||||
u'min_disk': 40,
|
||||
u'virtual_size': None,
|
||||
u'name': image_name,
|
||||
u'checksum': md5 or checksum,
|
||||
u'created_at': u'2016-02-10T05:03:11Z',
|
||||
u'owner_specified.openstack.md5': md5 or NO_MD5,
|
||||
u'owner_specified.openstack.sha256': sha256 or NO_SHA256,
|
||||
u'owner_specified.openstack.object': 'images/{name}'.format(
|
||||
'min_disk': 40,
|
||||
'virtual_size': None,
|
||||
'name': image_name,
|
||||
'checksum': md5 or checksum,
|
||||
'created_at': '2016-02-10T05:03:11Z',
|
||||
'owner_specified.openstack.md5': md5 or NO_MD5,
|
||||
'owner_specified.openstack.sha256': sha256 or NO_SHA256,
|
||||
'owner_specified.openstack.object': 'images/{name}'.format(
|
||||
name=image_name
|
||||
),
|
||||
u'protected': False,
|
||||
'protected': False,
|
||||
}
|
||||
|
||||
|
||||
|
@ -18,7 +18,7 @@ class BaseBaremetalTest(base.BaseFunctionalTest):
|
||||
node_id = None
|
||||
|
||||
def setUp(self):
|
||||
super(BaseBaremetalTest, self).setUp()
|
||||
super().setUp()
|
||||
self.require_service(
|
||||
'baremetal', min_microversion=self.min_microversion
|
||||
)
|
||||
|
@ -18,7 +18,7 @@ from openstack.tests.functional.baremetal import base
|
||||
|
||||
class Base(base.BaseBaremetalTest):
|
||||
def setUp(self):
|
||||
super(Base, self).setUp()
|
||||
super().setUp()
|
||||
# NOTE(dtantsur): generate a unique resource class to prevent parallel
|
||||
# tests from clashing.
|
||||
self.resource_class = 'baremetal-%d' % random.randrange(1024)
|
||||
|
@ -18,7 +18,7 @@ class TestBareMetalDeployTemplate(base.BaseBaremetalTest):
|
||||
min_microversion = '1.55'
|
||||
|
||||
def setUp(self):
|
||||
super(TestBareMetalDeployTemplate, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
def test_baremetal_deploy_create_get_delete(self):
|
||||
steps = [
|
||||
|
@ -405,7 +405,7 @@ class TestBareMetalVif(base.BaseBaremetalTest):
|
||||
min_microversion = '1.28'
|
||||
|
||||
def setUp(self):
|
||||
super(TestBareMetalVif, self).setUp()
|
||||
super().setUp()
|
||||
self.node = self.create_node(network_interface='noop')
|
||||
self.vif_id = "200712fc-fdfb-47da-89a6-2d19f76c7618"
|
||||
|
||||
@ -445,7 +445,7 @@ class TestTraits(base.BaseBaremetalTest):
|
||||
min_microversion = '1.37'
|
||||
|
||||
def setUp(self):
|
||||
super(TestTraits, self).setUp()
|
||||
super().setUp()
|
||||
self.node = self.create_node()
|
||||
|
||||
def test_add_remove_node_trait(self):
|
||||
|
@ -17,7 +17,7 @@ from openstack.tests.functional.baremetal import base
|
||||
|
||||
class TestBareMetalPort(base.BaseBaremetalTest):
|
||||
def setUp(self):
|
||||
super(TestBareMetalPort, self).setUp()
|
||||
super().setUp()
|
||||
self.node = self.create_node()
|
||||
|
||||
def test_port_create_get_delete(self):
|
||||
|
@ -19,7 +19,7 @@ class TestBareMetalPortGroup(base.BaseBaremetalTest):
|
||||
min_microversion = '1.23'
|
||||
|
||||
def setUp(self):
|
||||
super(TestBareMetalPortGroup, self).setUp()
|
||||
super().setUp()
|
||||
self.node = self.create_node()
|
||||
|
||||
def test_port_group_create_get_delete(self):
|
||||
|
@ -19,7 +19,7 @@ class TestBareMetalVolumeconnector(base.BaseBaremetalTest):
|
||||
min_microversion = '1.32'
|
||||
|
||||
def setUp(self):
|
||||
super(TestBareMetalVolumeconnector, self).setUp()
|
||||
super().setUp()
|
||||
self.node = self.create_node(provision_state='enroll')
|
||||
|
||||
def test_volume_connector_create_get_delete(self):
|
||||
|
@ -19,7 +19,7 @@ class TestBareMetalVolumetarget(base.BaseBaremetalTest):
|
||||
min_microversion = '1.32'
|
||||
|
||||
def setUp(self):
|
||||
super(TestBareMetalVolumetarget, self).setUp()
|
||||
super().setUp()
|
||||
self.node = self.create_node(provision_state='enroll')
|
||||
|
||||
def test_volume_target_create_get_delete(self):
|
||||
|
@ -44,7 +44,7 @@ class BaseFunctionalTest(base.TestCase):
|
||||
_wait_for_timeout_key = ''
|
||||
|
||||
def setUp(self):
|
||||
super(BaseFunctionalTest, self).setUp()
|
||||
super().setUp()
|
||||
self.conn = connection.Connection(config=TEST_CLOUD_REGION)
|
||||
_disable_keep_alive(self.conn)
|
||||
|
||||
@ -249,7 +249,7 @@ class BaseFunctionalTest(base.TestCase):
|
||||
|
||||
class KeystoneBaseFunctionalTest(BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(KeystoneBaseFunctionalTest, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
use_keystone_v2 = os.environ.get('OPENSTACKSDK_USE_KEYSTONE_V2', False)
|
||||
if use_keystone_v2:
|
||||
|
@ -17,7 +17,7 @@ class BaseBlockStorageTest(base.BaseFunctionalTest):
|
||||
_wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE'
|
||||
|
||||
def setUp(self):
|
||||
super(BaseBlockStorageTest, self).setUp()
|
||||
super().setUp()
|
||||
self._set_user_cloud(block_storage_api_version='2')
|
||||
self._set_operator_cloud(block_storage_api_version='2')
|
||||
|
||||
|
@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v2 import base
|
||||
|
||||
class TestBackup(base.BaseBlockStorageTest):
|
||||
def setUp(self):
|
||||
super(TestBackup, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
if not self.user_cloud.has_service('object-store'):
|
||||
self.skipTest('Object service is requred, but not available')
|
||||
@ -62,7 +62,7 @@ class TestBackup(base.BaseBlockStorageTest):
|
||||
self.VOLUME_ID, ignore_missing=False
|
||||
)
|
||||
self.assertIsNone(sot)
|
||||
super(TestBackup, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def test_get(self):
|
||||
sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID)
|
||||
|
@ -18,7 +18,7 @@ from openstack.tests.functional.block_storage.v2 import base
|
||||
|
||||
class TestSnapshot(base.BaseBlockStorageTest):
|
||||
def setUp(self):
|
||||
super(TestSnapshot, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
self.SNAPSHOT_NAME = self.getUniqueString()
|
||||
self.SNAPSHOT_ID = None
|
||||
@ -65,7 +65,7 @@ class TestSnapshot(base.BaseBlockStorageTest):
|
||||
self.VOLUME_ID, ignore_missing=False
|
||||
)
|
||||
self.assertIsNone(sot)
|
||||
super(TestSnapshot, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def test_get(self):
|
||||
sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID)
|
||||
|
@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v2 import base
|
||||
|
||||
class TestStats(base.BaseBlockStorageTest):
|
||||
def setUp(self):
|
||||
super(TestStats, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
sot = self.operator_cloud.block_storage.backend_pools()
|
||||
for pool in sot:
|
||||
|
@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v2 import base
|
||||
|
||||
class TestType(base.BaseBlockStorageTest):
|
||||
def setUp(self):
|
||||
super(TestType, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
self.TYPE_NAME = self.getUniqueString()
|
||||
self.TYPE_ID = None
|
||||
@ -34,7 +34,7 @@ class TestType(base.BaseBlockStorageTest):
|
||||
self.TYPE_ID, ignore_missing=False
|
||||
)
|
||||
self.assertIsNone(sot)
|
||||
super(TestType, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def test_get(self):
|
||||
sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID)
|
||||
|
@ -16,7 +16,7 @@ from openstack.tests.functional.block_storage.v2 import base
|
||||
|
||||
class TestVolume(base.BaseBlockStorageTest):
|
||||
def setUp(self):
|
||||
super(TestVolume, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
if not self.user_cloud.has_service('block-storage'):
|
||||
self.skipTest('block-storage service not supported by cloud')
|
||||
@ -43,7 +43,7 @@ class TestVolume(base.BaseBlockStorageTest):
|
||||
self.VOLUME_ID, ignore_missing=False
|
||||
)
|
||||
self.assertIsNone(sot)
|
||||
super(TestVolume, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def test_get(self):
|
||||
sot = self.user_cloud.block_storage.get_volume(self.VOLUME_ID)
|
||||
|
@ -17,7 +17,7 @@ class BaseBlockStorageTest(base.BaseFunctionalTest):
|
||||
_wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE'
|
||||
|
||||
def setUp(self):
|
||||
super(BaseBlockStorageTest, self).setUp()
|
||||
super().setUp()
|
||||
self._set_user_cloud(block_storage_api_version='3')
|
||||
if not self.user_cloud.has_service('block-storage', '3'):
|
||||
self.skipTest('block-storage service not supported by cloud')
|
||||
|
@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v3 import base
|
||||
|
||||
class TestBackup(base.BaseBlockStorageTest):
|
||||
def setUp(self):
|
||||
super(TestBackup, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
if not self.user_cloud.has_service('object-store'):
|
||||
self.skipTest('Object service is requred, but not available')
|
||||
@ -62,7 +62,7 @@ class TestBackup(base.BaseBlockStorageTest):
|
||||
self.VOLUME_ID, ignore_missing=False
|
||||
)
|
||||
self.assertIsNone(sot)
|
||||
super(TestBackup, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def test_get(self):
|
||||
sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID)
|
||||
|
@ -18,7 +18,7 @@ from openstack.tests.functional.block_storage.v3 import base
|
||||
|
||||
class TestSnapshot(base.BaseBlockStorageTest):
|
||||
def setUp(self):
|
||||
super(TestSnapshot, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
self.SNAPSHOT_NAME = self.getUniqueString()
|
||||
self.SNAPSHOT_ID = None
|
||||
@ -65,7 +65,7 @@ class TestSnapshot(base.BaseBlockStorageTest):
|
||||
self.VOLUME_ID, ignore_missing=False
|
||||
)
|
||||
self.assertIsNone(sot)
|
||||
super(TestSnapshot, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def test_get(self):
|
||||
sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID)
|
||||
|
@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v3 import base
|
||||
|
||||
class TestType(base.BaseBlockStorageTest):
|
||||
def setUp(self):
|
||||
super(TestType, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
self.TYPE_NAME = self.getUniqueString()
|
||||
self.TYPE_ID = None
|
||||
@ -36,7 +36,7 @@ class TestType(base.BaseBlockStorageTest):
|
||||
self.TYPE_ID, ignore_missing=False
|
||||
)
|
||||
self.assertIsNone(sot)
|
||||
super(TestType, self).tearDown()
|
||||
super().tearDown()
|
||||
|
||||
def test_get(self):
|
||||
sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID)
|
||||
|
@ -27,7 +27,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestClusterTemplate(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestClusterTemplate, self).setUp()
|
||||
super().setUp()
|
||||
if not self.user_cloud.has_service(
|
||||
'container-infrastructure-management'
|
||||
):
|
||||
|
@ -108,7 +108,7 @@ def wait_for_delete(client, client_args, check_interval=1, timeout=60):
|
||||
|
||||
class TestClustering(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestClustering, self).setUp()
|
||||
super().setUp()
|
||||
self.skipTest('clustering service not supported by cloud')
|
||||
|
||||
def test_create_profile(self):
|
||||
|
@ -32,7 +32,7 @@ class TestCompute(base.BaseFunctionalTest):
|
||||
# but on a bad day, test_attach_detach_volume can take more time.
|
||||
self.TIMEOUT_SCALING_FACTOR = 1.5
|
||||
|
||||
super(TestCompute, self).setUp()
|
||||
super().setUp()
|
||||
self.server_name = self.getUniqueString()
|
||||
|
||||
def _cleanup_servers_and_volumes(self, server_name):
|
||||
@ -522,7 +522,7 @@ class TestCompute(base.BaseFunctionalTest):
|
||||
|
||||
self.user_cloud.delete_server_metadata(self.server_name, ['key1'])
|
||||
updated_server = self.user_cloud.get_server(self.server_name)
|
||||
self.assertEqual(set(updated_server.metadata.items()), set([]))
|
||||
self.assertEqual(set(updated_server.metadata.items()), set())
|
||||
|
||||
self.assertRaises(
|
||||
exceptions.NotFoundException,
|
||||
|
@ -39,8 +39,5 @@ class TestDevstack(base.BaseFunctionalTest):
|
||||
]
|
||||
|
||||
def test_has_service(self):
|
||||
if (
|
||||
os.environ.get('OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0')
|
||||
== '1'
|
||||
):
|
||||
if os.environ.get(f'OPENSTACKSDK_HAS_{self.env}', '0') == '1':
|
||||
self.assertTrue(self.user_cloud.has_service(self.service))
|
||||
|
@ -23,7 +23,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestDomain(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestDomain, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
i_ver = self.operator_cloud.config.get_api_version('identity')
|
||||
|
@ -38,7 +38,7 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
super(TestEndpoints, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
|
||||
|
@ -25,7 +25,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestFlavor(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestFlavor, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
# Generate a random name for flavors in this test
|
||||
self.new_item_name = self.getUniqueString('flavor')
|
||||
|
@ -33,7 +33,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestFloatingIPPool(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestFloatingIPPool, self).setUp()
|
||||
super().setUp()
|
||||
|
||||
if not self.user_cloud._has_nova_extension('os-floating-ip-pools'):
|
||||
# Skipping this test is floating-ip-pool extension is not
|
||||
|
@ -23,7 +23,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestGroup(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestGroup, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
|
||||
|
@ -26,7 +26,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestIdentity(base.KeystoneBaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestIdentity, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
self.role_prefix = 'test_role' + ''.join(
|
||||
|
@ -22,7 +22,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestMagnumServices(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestMagnumServices, self).setUp()
|
||||
super().setUp()
|
||||
if not self.user_cloud.has_service(
|
||||
'container-infrastructure-management'
|
||||
):
|
||||
|
@ -23,7 +23,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestNetwork(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestNetwork, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
|
||||
|
@ -29,7 +29,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestObject(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestObject, self).setUp()
|
||||
super().setUp()
|
||||
if not self.user_cloud.has_service('object-store'):
|
||||
self.skipTest('Object service not supported by cloud')
|
||||
|
||||
|
@ -28,7 +28,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestPort(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestPort, self).setUp()
|
||||
super().setUp()
|
||||
# Skip Neutron tests if neutron is not present
|
||||
if not self.user_cloud.has_service('network'):
|
||||
self.skipTest('Network service not supported by cloud')
|
||||
@ -118,13 +118,13 @@ class TestPort(base.BaseFunctionalTest):
|
||||
updated_port = self.user_cloud.get_port(name_or_id=port['id'])
|
||||
self.assertEqual(port.get('name'), new_port_name)
|
||||
port.pop('revision_number', None)
|
||||
port.pop(u'revision_number', None)
|
||||
port.pop('revision_number', None)
|
||||
port.pop('updated_at', None)
|
||||
port.pop('updated_at', None)
|
||||
port.pop(u'updated_at', None)
|
||||
updated_port.pop('revision_number', None)
|
||||
updated_port.pop(u'revision_number', None)
|
||||
updated_port.pop('revision_number', None)
|
||||
updated_port.pop('updated_at', None)
|
||||
updated_port.pop('updated_at', None)
|
||||
updated_port.pop(u'updated_at', None)
|
||||
|
||||
self.assertEqual(port, updated_port)
|
||||
|
||||
|
@ -26,7 +26,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestProject(base.KeystoneBaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestProject, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
|
||||
|
@ -25,7 +25,7 @@ class TestProjectCleanup(base.BaseFunctionalTest):
|
||||
_wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_CLEANUP'
|
||||
|
||||
def setUp(self):
|
||||
super(TestProjectCleanup, self).setUp()
|
||||
super().setUp()
|
||||
if not self.user_cloud_alt:
|
||||
self.skipTest("Alternate demo cloud is required for this test")
|
||||
|
||||
|
@ -24,7 +24,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestQosBandwidthLimitRule(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestQosBandwidthLimitRule, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
if not self.operator_cloud.has_service('network'):
|
||||
|
@ -24,7 +24,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestQosDscpMarkingRule(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestQosDscpMarkingRule, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
if not self.operator_cloud.has_service('network'):
|
||||
|
@ -24,7 +24,7 @@ from openstack.tests.functional import base
|
||||
|
||||
class TestQosMinimumBandwidthRule(base.BaseFunctionalTest):
|
||||
def setUp(self):
|
||||
super(TestQosMinimumBandwidthRule, self).setUp()
|
||||
super().setUp()
|
||||
if not self.operator_cloud:
|
||||
self.skipTest("Operator cloud is required for this test")
|
||||
if not self.operator_cloud.has_service('network'):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user