Merge "trivial: Prepare for pyupgrade pre-commit hook"

This commit is contained in:
Zuul 2024-05-03 19:54:49 +00:00 committed by Gerrit Code Review
commit 4a5701d874
333 changed files with 990 additions and 1087 deletions

View File

@ -60,7 +60,7 @@ SSH_DIR = _get_resource_value(
) )
PRIVATE_KEYPAIR_FILE = _get_resource_value( PRIVATE_KEYPAIR_FILE = _get_resource_value(
'private_keypair_file', 'private_keypair_file',
'{ssh_dir}/id_rsa.{key}'.format(ssh_dir=SSH_DIR, key=KEYPAIR_NAME), f'{SSH_DIR}/id_rsa.{KEYPAIR_NAME}',
) )
EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image' EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image'

View File

@ -52,7 +52,7 @@ class AcceleratorRequest(resource.Resource):
# and its value is an ordinary JSON patch. spec: # and its value is an ordinary JSON patch. spec:
# https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api # https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api
converted = super(AcceleratorRequest, self)._convert_patch(patch) converted = super()._convert_patch(patch)
converted = {self.id: converted} converted = {self.id: converted}
return converted return converted
@ -102,11 +102,9 @@ class AcceleratorRequest(resource.Resource):
if isinstance(self, AcceleratorRequest): if isinstance(self, AcceleratorRequest):
if self.resources_key in attrs: if self.resources_key in attrs:
attrs = attrs[self.resources_key][0] attrs = attrs[self.resources_key][0]
return super(AcceleratorRequest, self)._consume_attrs(mapping, attrs) return super()._consume_attrs(mapping, attrs)
def create(self, session, base_path=None): def create(self, session, base_path=None):
# This overrides the default behavior of resource creation because # This overrides the default behavior of resource creation because
# cyborg doesn't accept resource_key in its request. # cyborg doesn't accept resource_key in its request.
return super(AcceleratorRequest, self).create( return super().create(session, prepend_key=False, base_path=base_path)
session, prepend_key=False, base_path=base_path
)

View File

@ -39,14 +39,10 @@ class DeviceProfile(resource.Resource):
# TODO(s_shogo): This implementation only treat [ DeviceProfile ], and # TODO(s_shogo): This implementation only treat [ DeviceProfile ], and
# cannot treat multiple DeviceProfiles in list. # cannot treat multiple DeviceProfiles in list.
def _prepare_request_body(self, patch, prepend_key): def _prepare_request_body(self, patch, prepend_key):
body = super(DeviceProfile, self)._prepare_request_body( body = super()._prepare_request_body(patch, prepend_key)
patch, prepend_key
)
return [body] return [body]
def create(self, session, base_path=None): def create(self, session, base_path=None):
# This overrides the default behavior of resource creation because # This overrides the default behavior of resource creation because
# cyborg doesn't accept resource_key in its request. # cyborg doesn't accept resource_key in its request.
return super(DeviceProfile, self).create( return super().create(session, prepend_key=False, base_path=base_path)
session, prepend_key=False, base_path=base_path
)

View File

@ -173,7 +173,7 @@ class Driver(resource.Resource):
:returns: response of method call. :returns: response of method call.
""" """
if verb.upper() not in ['GET', 'PUT', 'POST', 'DELETE']: if verb.upper() not in ['GET', 'PUT', 'POST', 'DELETE']:
raise ValueError('Invalid verb: {}'.format(verb)) raise ValueError(f'Invalid verb: {verb}')
session = self._get_session(session) session = self._get_session(session)
request = self._prepare_request() request = self._prepare_request()

View File

@ -275,7 +275,7 @@ class Node(_common.Resource):
# API version 1.1 uses None instead of "available". Make it # API version 1.1 uses None instead of "available". Make it
# consistent. # consistent.
attrs['provision_state'] = 'available' attrs['provision_state'] = 'available'
return super(Node, self)._consume_body_attrs(attrs) return super()._consume_body_attrs(attrs)
def create(self, session, *args, **kwargs): def create(self, session, *args, **kwargs):
"""Create a remote resource based on this instance. """Create a remote resource based on this instance.
@ -346,9 +346,7 @@ class Node(_common.Resource):
# Ironic cannot set provision_state itself, so marking it as unchanged # Ironic cannot set provision_state itself, so marking it as unchanged
self._clean_body_attrs({'provision_state'}) self._clean_body_attrs({'provision_state'})
super(Node, self).create( super().create(session, *args, microversion=microversion, **kwargs)
session, *args, microversion=microversion, **kwargs
)
if ( if (
expected_provision_state == 'manageable' expected_provision_state == 'manageable'
@ -395,7 +393,7 @@ class Node(_common.Resource):
# the new status. # the new status.
return self.fetch(session) return self.fetch(session)
return super(Node, self).commit(session, *args, **kwargs) return super().commit(session, *args, **kwargs)
def set_provision_state( def set_provision_state(
self, self,
@ -724,7 +722,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES, retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
) )
msg = "Failed to inject NMI to node {node}".format(node=self.id) msg = f"Failed to inject NMI to node {self.id}"
exceptions.raise_from_response(response, error_message=msg) exceptions.raise_from_response(response, error_message=msg)
def set_power_state(self, session, target, wait=False, timeout=None): def set_power_state(self, session, target, wait=False, timeout=None):
@ -934,13 +932,13 @@ class Node(_common.Resource):
request.url, headers=request.headers, microversion=version request.url, headers=request.headers, microversion=version
) )
msg = "Failed to validate node {node}".format(node=self.id) msg = f"Failed to validate node {self.id}"
exceptions.raise_from_response(response, error_message=msg) exceptions.raise_from_response(response, error_message=msg)
result = response.json() result = response.json()
if required: if required:
failed = [ failed = [
'%s (%s)' % (key, value.get('reason', 'no reason')) '{} ({})'.format(key, value.get('reason', 'no reason'))
for key, value in result.items() for key, value in result.items()
if key in required and not value.get('result') if key in required and not value.get('result')
] ]
@ -1044,7 +1042,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES, retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
) )
msg = "Failed to set boot device for node {node}".format(node=self.id) msg = f"Failed to set boot device for node {self.id}"
exceptions.raise_from_response(response, error_message=msg) exceptions.raise_from_response(response, error_message=msg)
def get_supported_boot_devices(self, session): def get_supported_boot_devices(self, session):
@ -1109,7 +1107,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES, retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
) )
msg = "Failed to change boot mode for node {node}".format(node=self.id) msg = f"Failed to change boot mode for node {self.id}"
exceptions.raise_from_response(response, error_message=msg) exceptions.raise_from_response(response, error_message=msg)
def set_secure_boot(self, session, target): def set_secure_boot(self, session, target):
@ -1243,7 +1241,7 @@ class Node(_common.Resource):
retriable_status_codes=_common.RETRIABLE_STATUS_CODES, retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
) )
msg = "Failed to set traits for node {node}".format(node=self.id) msg = f"Failed to set traits for node {self.id}"
exceptions.raise_from_response(response, error_message=msg) exceptions.raise_from_response(response, error_message=msg)
self.traits = traits self.traits = traits
@ -1261,7 +1259,7 @@ class Node(_common.Resource):
version = self._get_microversion(session, action='commit') version = self._get_microversion(session, action='commit')
request = self._prepare_request(requires_id=True) request = self._prepare_request(requires_id=True)
request.url = utils.urljoin( request.url = utils.urljoin(
request.url, 'vendor_passthru?method={}'.format(method) request.url, f'vendor_passthru?method={method}'
) )
call = getattr(session, verb.lower()) call = getattr(session, verb.lower())
@ -1439,7 +1437,7 @@ class Node(_common.Resource):
) )
else: else:
return super(Node, self).patch( return super().patch(
session, patch=patch, retry_on_conflict=retry_on_conflict session, patch=patch, retry_on_conflict=retry_on_conflict
) )

View File

@ -71,7 +71,7 @@ class Introspection(resource.Resource):
microversion=version, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES, retriable_status_codes=_common.RETRIABLE_STATUS_CODES,
) )
msg = "Failed to abort introspection for node {id}".format(id=self.id) msg = f"Failed to abort introspection for node {self.id}"
exceptions.raise_from_response(response, error_message=msg) exceptions.raise_from_response(response, error_message=msg)
def get_data(self, session, processed=True): def get_data(self, session, processed=True):

View File

@ -43,7 +43,7 @@ class BaseBlockStorageProxy(proxy.Proxy, metaclass=abc.ABCMeta):
) )
volume_id = volume_obj['id'] volume_id = volume_obj['id']
data = self.post( data = self.post(
'/volumes/{id}/action'.format(id=volume_id), f'/volumes/{volume_id}/action',
json={ json={
'os-volume_upload_image': { 'os-volume_upload_image': {
'force': allow_duplicates, 'force': allow_duplicates,

View File

@ -87,7 +87,7 @@ class Service(resource.Resource):
if ignore_missing: if ignore_missing:
return None return None
raise exceptions.ResourceNotFound( raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id) f"No {cls.__name__} found for {name_or_id}"
) )
def commit(self, session, prepend_key=False, **kwargs): def commit(self, session, prepend_key=False, **kwargs):

View File

@ -51,7 +51,7 @@ class Type(resource.Resource):
for k, v in extra_specs.items(): for k, v in extra_specs.items():
if not isinstance(v, str): if not isinstance(v, str):
raise ValueError( raise ValueError(
"The value for %s (%s) must be a text string" % (k, v) f"The value for {k} ({v}) must be a text string"
) )
if key is not None: if key is not None:

View File

@ -117,7 +117,7 @@ class ComputeCloudMixin:
) )
def _nova_extensions(self): def _nova_extensions(self):
extensions = set([e.alias for e in self.compute.extensions()]) extensions = {e.alias for e in self.compute.extensions()}
return extensions return extensions
def _has_nova_extension(self, extension_name): def _has_nova_extension(self, extension_name):
@ -1229,7 +1229,7 @@ class ComputeCloudMixin:
raise exceptions.SDKException( raise exceptions.SDKException(
'Server reached ACTIVE state without being' 'Server reached ACTIVE state without being'
' allocated an IP address AND then could not' ' allocated an IP address AND then could not'
' be deleted: {0}'.format(e), ' be deleted: {}'.format(e),
extra_data=dict(server=server), extra_data=dict(server=server),
) )
raise exceptions.SDKException( raise exceptions.SDKException(
@ -1291,9 +1291,7 @@ class ComputeCloudMixin:
""" """
server = self.get_server(name_or_id, bare=True) server = self.get_server(name_or_id, bare=True)
if not server: if not server:
raise exceptions.SDKException( raise exceptions.SDKException(f'Invalid Server {name_or_id}')
'Invalid Server {server}'.format(server=name_or_id)
)
self.compute.set_server_metadata(server=server.id, **metadata) self.compute.set_server_metadata(server=server.id, **metadata)
@ -1311,9 +1309,7 @@ class ComputeCloudMixin:
""" """
server = self.get_server(name_or_id, bare=True) server = self.get_server(name_or_id, bare=True)
if not server: if not server:
raise exceptions.SDKException( raise exceptions.SDKException(f'Invalid Server {name_or_id}')
'Invalid Server {server}'.format(server=name_or_id)
)
self.compute.delete_server_metadata( self.compute.delete_server_metadata(
server=server.id, keys=metadata_keys server=server.id, keys=metadata_keys
@ -1545,7 +1541,7 @@ class ComputeCloudMixin:
return True return True
except exceptions.SDKException: except exceptions.SDKException:
raise exceptions.SDKException( raise exceptions.SDKException(
"Unable to delete flavor {name}".format(name=name_or_id) f"Unable to delete flavor {name_or_id}"
) )
def set_flavor_specs(self, flavor_id, extra_specs): def set_flavor_specs(self, flavor_id, extra_specs):

View File

@ -104,9 +104,7 @@ class DnsCloudMixin:
try: try:
return self.dns.create_zone(**zone) return self.dns.create_zone(**zone)
except exceptions.SDKException: except exceptions.SDKException:
raise exceptions.SDKException( raise exceptions.SDKException(f"Unable to create zone {name}")
"Unable to create zone {name}".format(name=name)
)
@_utils.valid_kwargs('email', 'description', 'ttl', 'masters') @_utils.valid_kwargs('email', 'description', 'ttl', 'masters')
def update_zone(self, name_or_id, **kwargs): def update_zone(self, name_or_id, **kwargs):

View File

@ -180,14 +180,14 @@ class FloatingIPCloudMixin:
:returns: A floating ip :returns: A floating ip
`:class:`~openstack.network.v2.floating_ip.FloatingIP`. `:class:`~openstack.network.v2.floating_ip.FloatingIP`.
""" """
error_message = "Error getting floating ip with ID {id}".format(id=id) error_message = f"Error getting floating ip with ID {id}"
if self._use_neutron_floating(): if self._use_neutron_floating():
fip = self.network.get_ip(id) fip = self.network.get_ip(id)
return fip return fip
else: else:
data = proxy._json_response( data = proxy._json_response(
self.compute.get('/os-floating-ips/{id}'.format(id=id)), self.compute.get(f'/os-floating-ips/{id}'),
error_message=error_message, error_message=error_message,
) )
return self._normalize_floating_ip( return self._normalize_floating_ip(
@ -230,7 +230,7 @@ class FloatingIPCloudMixin:
if floating_network_id is None: if floating_network_id is None:
raise exceptions.NotFoundException( raise exceptions.NotFoundException(
"unable to find external network {net}".format(net=network) f"unable to find external network {network}"
) )
else: else:
floating_network_id = self._get_floating_network_id() floating_network_id = self._get_floating_network_id()
@ -270,7 +270,7 @@ class FloatingIPCloudMixin:
""" """
with _utils.openstacksdk_exceptions( with _utils.openstacksdk_exceptions(
"Unable to create floating IP in pool {pool}".format(pool=pool) f"Unable to create floating IP in pool {pool}"
): ):
if pool is None: if pool is None:
pools = self.list_floating_ip_pools() pools = self.list_floating_ip_pools()
@ -442,7 +442,7 @@ class FloatingIPCloudMixin:
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
raise exceptions.NotFoundException( raise exceptions.NotFoundException(
"unable to find network for floating ips with ID " "unable to find network for floating ips with ID "
"{0}".format(network_name_or_id) "{}".format(network_name_or_id)
) )
network_id = network['id'] network_id = network['id']
else: else:
@ -516,7 +516,7 @@ class FloatingIPCloudMixin:
def _nova_create_floating_ip(self, pool=None): def _nova_create_floating_ip(self, pool=None):
with _utils.openstacksdk_exceptions( with _utils.openstacksdk_exceptions(
"Unable to create floating IP in pool {pool}".format(pool=pool) f"Unable to create floating IP in pool {pool}"
): ):
if pool is None: if pool is None:
pools = self.list_floating_ip_pools() pools = self.list_floating_ip_pools()
@ -599,9 +599,7 @@ class FloatingIPCloudMixin:
def _nova_delete_floating_ip(self, floating_ip_id): def _nova_delete_floating_ip(self, floating_ip_id):
try: try:
proxy._json_response( proxy._json_response(
self.compute.delete( self.compute.delete(f'/os-floating-ips/{floating_ip_id}'),
'/os-floating-ips/{id}'.format(id=floating_ip_id)
),
error_message='Unable to delete floating IP {fip_id}'.format( error_message='Unable to delete floating IP {fip_id}'.format(
fip_id=floating_ip_id fip_id=floating_ip_id
), ),
@ -738,7 +736,7 @@ class FloatingIPCloudMixin:
) )
if not port: if not port:
raise exceptions.SDKException( raise exceptions.SDKException(
"unable to find a port for server {0}".format(server['id']) "unable to find a port for server {}".format(server['id'])
) )
floating_ip_args = {'port_id': port['id']} floating_ip_args = {'port_id': port['id']}
@ -753,7 +751,7 @@ class FloatingIPCloudMixin:
f_ip = self.get_floating_ip(id=floating_ip_id) f_ip = self.get_floating_ip(id=floating_ip_id)
if f_ip is None: if f_ip is None:
raise exceptions.SDKException( raise exceptions.SDKException(
"unable to find floating IP {0}".format(floating_ip_id) f"unable to find floating IP {floating_ip_id}"
) )
error_message = "Error attaching IP {ip} to instance {id}".format( error_message = "Error attaching IP {ip} to instance {id}".format(
ip=floating_ip_id, id=server_id ip=floating_ip_id, id=server_id
@ -763,7 +761,7 @@ class FloatingIPCloudMixin:
body['fixed_address'] = fixed_address body['fixed_address'] = fixed_address
return proxy._json_response( return proxy._json_response(
self.compute.post( self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id), f'/servers/{server_id}/action',
json=dict(addFloatingIp=body), json=dict(addFloatingIp=body),
), ),
error_message=error_message, error_message=error_message,
@ -806,13 +804,11 @@ class FloatingIPCloudMixin:
self.network.update_ip(floating_ip_id, port_id=None) self.network.update_ip(floating_ip_id, port_id=None)
except exceptions.SDKException: except exceptions.SDKException:
raise exceptions.SDKException( raise exceptions.SDKException(
(
"Error detaching IP {ip} from " "Error detaching IP {ip} from "
"server {server_id}".format( "server {server_id}".format(
ip=floating_ip_id, server_id=server_id ip=floating_ip_id, server_id=server_id
) )
) )
)
return True return True
@ -820,14 +816,14 @@ class FloatingIPCloudMixin:
f_ip = self.get_floating_ip(id=floating_ip_id) f_ip = self.get_floating_ip(id=floating_ip_id)
if f_ip is None: if f_ip is None:
raise exceptions.SDKException( raise exceptions.SDKException(
"unable to find floating IP {0}".format(floating_ip_id) f"unable to find floating IP {floating_ip_id}"
) )
error_message = "Error detaching IP {ip} from instance {id}".format( error_message = "Error detaching IP {ip} from instance {id}".format(
ip=floating_ip_id, id=server_id ip=floating_ip_id, id=server_id
) )
return proxy._json_response( return proxy._json_response(
self.compute.post( self.compute.post(
'/servers/{server_id}/action'.format(server_id=server_id), f'/servers/{server_id}/action',
json=dict( json=dict(
removeFloatingIp=dict(address=f_ip['floating_ip_address']) removeFloatingIp=dict(address=f_ip['floating_ip_address'])
), ),
@ -1222,7 +1218,7 @@ class FloatingIPCloudMixin:
return port, fixed_address return port, fixed_address
raise exceptions.SDKException( raise exceptions.SDKException(
"unable to find a free fixed IPv4 address for server " "unable to find a free fixed IPv4 address for server "
"{0}".format(server['id']) "{}".format(server['id'])
) )
# unfortunately a port can have more than one fixed IP: # unfortunately a port can have more than one fixed IP:
# we can't use the search_ports filtering for fixed_address as # we can't use the search_ports filtering for fixed_address as

View File

@ -384,31 +384,25 @@ class IdentityCloudMixin:
try: try:
user = self.get_user(name_or_id, **kwargs) user = self.get_user(name_or_id, **kwargs)
if not user: if not user:
self.log.debug( self.log.debug(f"User {name_or_id} not found for deleting")
"User {0} not found for deleting".format(name_or_id)
)
return False return False
self.identity.delete_user(user) self.identity.delete_user(user)
return True return True
except exceptions.SDKException: except exceptions.SDKException:
self.log.exception( self.log.exception(f"Error in deleting user {name_or_id}")
"Error in deleting user {user}".format(user=name_or_id)
)
return False return False
def _get_user_and_group(self, user_name_or_id, group_name_or_id): def _get_user_and_group(self, user_name_or_id, group_name_or_id):
user = self.get_user(user_name_or_id) user = self.get_user(user_name_or_id)
if not user: if not user:
raise exceptions.SDKException( raise exceptions.SDKException(f'User {user_name_or_id} not found')
'User {user} not found'.format(user=user_name_or_id)
)
group = self.get_group(group_name_or_id) group = self.get_group(group_name_or_id)
if not group: if not group:
raise exceptions.SDKException( raise exceptions.SDKException(
'Group {user} not found'.format(user=group_name_or_id) f'Group {group_name_or_id} not found'
) )
return (user, group) return (user, group)
@ -731,7 +725,7 @@ class IdentityCloudMixin:
self.identity.delete_endpoint(id) self.identity.delete_endpoint(id)
return True return True
except exceptions.SDKException: except exceptions.SDKException:
self.log.exception("Failed to delete endpoint {id}".format(id=id)) self.log.exception(f"Failed to delete endpoint {id}")
return False return False
def create_domain(self, name, description=None, enabled=True): def create_domain(self, name, description=None, enabled=True):
@ -778,7 +772,7 @@ class IdentityCloudMixin:
dom = self.get_domain(None, name_or_id) dom = self.get_domain(None, name_or_id)
if dom is None: if dom is None:
raise exceptions.SDKException( raise exceptions.SDKException(
"Domain {0} not found for updating".format(name_or_id) f"Domain {name_or_id} not found for updating"
) )
domain_id = dom['id'] domain_id = dom['id']
@ -1006,7 +1000,7 @@ class IdentityCloudMixin:
group = self.identity.find_group(name_or_id, **kwargs) group = self.identity.find_group(name_or_id, **kwargs)
if group is None: if group is None:
raise exceptions.SDKException( raise exceptions.SDKException(
"Group {0} not found for updating".format(name_or_id) f"Group {name_or_id} not found for updating"
) )
group_ref = {} group_ref = {}
@ -1039,9 +1033,7 @@ class IdentityCloudMixin:
return True return True
except exceptions.SDKException: except exceptions.SDKException:
self.log.exception( self.log.exception(f"Unable to delete group {name_or_id}")
"Unable to delete group {name}".format(name=name_or_id)
)
return False return False
def list_roles(self, **kwargs): def list_roles(self, **kwargs):
@ -1235,9 +1227,7 @@ class IdentityCloudMixin:
self.identity.delete_role(role) self.identity.delete_role(role)
return True return True
except exceptions.SDKExceptions: except exceptions.SDKExceptions:
self.log.exception( self.log.exception(f"Unable to delete role {name_or_id}")
"Unable to delete role {name}".format(name=name_or_id)
)
raise raise
def _get_grant_revoke_params( def _get_grant_revoke_params(
@ -1261,7 +1251,7 @@ class IdentityCloudMixin:
data['role'] = self.identity.find_role(name_or_id=role) data['role'] = self.identity.find_role(name_or_id=role)
if not data['role']: if not data['role']:
raise exceptions.SDKException('Role {0} not found.'.format(role)) raise exceptions.SDKException(f'Role {role} not found.')
if user: if user:
# use cloud.get_user to save us from bad searching by name # use cloud.get_user to save us from bad searching by name

View File

@ -170,7 +170,7 @@ class ImageCloudMixin:
return image return image
elif image['status'] == 'error': elif image['status'] == 'error':
raise exceptions.SDKException( raise exceptions.SDKException(
'Image {image} hit error state'.format(image=image_id) f'Image {image_id} hit error state'
) )
def delete_image( def delete_image(

View File

@ -2772,7 +2772,7 @@ class NetworkCloudMixin:
port = self.get_port(name_or_id=name_or_id) port = self.get_port(name_or_id=name_or_id)
if port is None: if port is None:
raise exceptions.SDKException( raise exceptions.SDKException(
"failed to find port '{port}'".format(port=name_or_id) f"failed to find port '{name_or_id}'"
) )
return self.network.update_port(port, **kwargs) return self.network.update_port(port, **kwargs)
@ -2813,7 +2813,7 @@ class NetworkCloudMixin:
port = self.get_port(name_or_id, filters) port = self.get_port(name_or_id, filters)
if not port: if not port:
raise exceptions.ResourceNotFound( raise exceptions.ResourceNotFound(
'Port {id} not found'.format(id=name_or_id) f'Port {name_or_id} not found'
) )
ids_list.append(port['id']) ids_list.append(port['id'])
return ids_list return ids_list

View File

@ -453,10 +453,9 @@ class ObjectStoreCloudMixin:
error. error.
""" """
try: try:
for ret in self.object_store.stream_object( yield from self.object_store.stream_object(
obj, container, chunk_size=resp_chunk_size obj, container, chunk_size=resp_chunk_size
): )
yield ret
except exceptions.ResourceNotFound: except exceptions.ResourceNotFound:
return return

View File

@ -419,9 +419,7 @@ class SecurityGroupCloudMixin:
else: else:
try: try:
exceptions.raise_from_response( exceptions.raise_from_response(
self.compute.delete( self.compute.delete(f'/os-security-group-rules/{rule_id}')
'/os-security-group-rules/{id}'.format(id=rule_id)
)
) )
except exceptions.NotFoundException: except exceptions.NotFoundException:
return False return False

View File

@ -389,9 +389,7 @@ def range_filter(data, key, range_exp):
# If parsing the range fails, it must be a bad value. # If parsing the range fails, it must be a bad value.
if val_range is None: if val_range is None:
raise exceptions.SDKException( raise exceptions.SDKException(f"Invalid range value: {range_exp}")
"Invalid range value: {value}".format(value=range_exp)
)
op = val_range[0] op = val_range[0]
if op: if op:

View File

@ -28,7 +28,7 @@ class OpenStackCloudUnavailableFeature(OpenStackCloudException):
# Backwards compat. These are deprecated and should not be used in new code. # Backwards compat. These are deprecated and should not be used in new code.
class OpenStackCloudCreateException(OpenStackCloudException): class OpenStackCloudCreateException(OpenStackCloudException):
def __init__(self, resource, resource_id, extra_data=None, **kwargs): def __init__(self, resource, resource_id, extra_data=None, **kwargs):
super(OpenStackCloudCreateException, self).__init__( super().__init__(
message="Error creating {resource}: {resource_id}".format( message="Error creating {resource}: {resource_id}".format(
resource=resource, resource_id=resource_id resource=resource, resource_id=resource_id
), ),

View File

@ -265,7 +265,7 @@ def find_best_address(addresses, public=False, cloud_public=True):
connect_socket.settimeout(1) connect_socket.settimeout(1)
connect_socket.connect(sa) connect_socket.connect(sa)
return address return address
except socket.error: except OSError:
# Sometimes a "no route to address" type error # Sometimes a "no route to address" type error
# will fail fast, but can often come alive # will fail fast, but can often come alive
# when retried. # when retried.
@ -370,7 +370,7 @@ def get_groups_from_server(cloud, server, server_vars):
groups.append(region) groups.append(region)
# And one by cloud_region # And one by cloud_region
groups.append("%s_%s" % (cloud_name, region)) groups.append(f"{cloud_name}_{region}")
# Check if group metadata key in servers' metadata # Check if group metadata key in servers' metadata
group = server['metadata'].get('group') group = server['metadata'].get('group')
@ -385,17 +385,17 @@ def get_groups_from_server(cloud, server, server_vars):
for key in ('flavor', 'image'): for key in ('flavor', 'image'):
if 'name' in server_vars[key]: if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name'])) groups.append('{}-{}'.format(key, server_vars[key]['name']))
for key, value in iter(server['metadata'].items()): for key, value in iter(server['metadata'].items()):
groups.append('meta-%s_%s' % (key, value)) groups.append(f'meta-{key}_{value}')
az = server_vars.get('az', None) az = server_vars.get('az', None)
if az: if az:
# Make groups for az, region_az and cloud_region_az # Make groups for az, region_az and cloud_region_az
groups.append(az) groups.append(az)
groups.append('%s_%s' % (region, az)) groups.append(f'{region}_{az}')
groups.append('%s_%s_%s' % (cloud.name, region, az)) groups.append(f'{cloud.name}_{region}_{az}')
return groups return groups

View File

@ -64,7 +64,7 @@ class _OpenStackCloudMixin:
_SHADE_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-shade-autocreated' _SHADE_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-shade-autocreated'
def __init__(self): def __init__(self):
super(_OpenStackCloudMixin, self).__init__() super().__init__()
self.log = _log.setup_logging('openstack') self.log = _log.setup_logging('openstack')
@ -172,10 +172,10 @@ class _OpenStackCloudMixin:
name_key = 'username' name_key = 'username'
else: else:
name_key = 'project_name' name_key = 'project_name'
id_key = '{prefix}_id'.format(prefix=prefix) id_key = f'{prefix}_id'
pop_keys(params, kwargs, name_key, id_key) pop_keys(params, kwargs, name_key, id_key)
id_key = '{prefix}_domain_id'.format(prefix=prefix) id_key = f'{prefix}_domain_id'
name_key = '{prefix}_domain_name'.format(prefix=prefix) name_key = f'{prefix}_domain_name'
pop_keys(params, kwargs, name_key, id_key) pop_keys(params, kwargs, name_key, id_key)
for key, value in kwargs.items(): for key, value in kwargs.items():
@ -282,14 +282,14 @@ class _OpenStackCloudMixin:
if namespace is None: if namespace is None:
name_key = self.name name_key = self.name
else: else:
name_key = '%s:%s' % (self.name, namespace) name_key = f'{self.name}:{namespace}'
def generate_key(*args, **kwargs): def generate_key(*args, **kwargs):
# TODO(frickler): make handling arg keys actually work # TODO(frickler): make handling arg keys actually work
arg_key = '' arg_key = ''
kw_keys = sorted(kwargs.keys()) kw_keys = sorted(kwargs.keys())
kwargs_key = ','.join( kwargs_key = ','.join(
['%s:%s' % (k, kwargs[k]) for k in kw_keys if k != 'cache'] [f'{k}:{kwargs[k]}' for k in kw_keys if k != 'cache']
) )
ans = "_".join([str(name_key), fname, arg_key, kwargs_key]) ans = "_".join([str(name_key), fname, arg_key, kwargs_key])
return ans return ans

View File

@ -57,7 +57,7 @@ class QuotaSet(resource.Resource):
error_message=None, error_message=None,
**params **params
): ):
return super(QuotaSet, self).fetch( return super().fetch(
session, session,
requires_id=False, requires_id=False,
base_path=base_path, base_path=base_path,

View File

@ -103,7 +103,7 @@ class Flavor(resource.Resource):
# is_public is ternary - None means give all flavors. # is_public is ternary - None means give all flavors.
# Force it to string to avoid requests skipping it. # Force it to string to avoid requests skipping it.
params['is_public'] = 'None' params['is_public'] = 'None'
return super(Flavor, cls).list( return super().list(
session, paginated=paginated, base_path=base_path, **params session, paginated=paginated, base_path=base_path, **params
) )

View File

@ -60,7 +60,7 @@ class Keypair(resource.Resource):
# it **SOMETIMES** keypair picks up id and not name. This is a hammer. # it **SOMETIMES** keypair picks up id and not name. This is a hammer.
if 'id' in attrs: if 'id' in attrs:
attrs.setdefault('name', attrs.pop('id')) attrs.setdefault('name', attrs.pop('id'))
return super(Keypair, self)._consume_attrs(mapping, attrs) return super()._consume_attrs(mapping, attrs)
@classmethod @classmethod
def existing(cls, connection=None, **kwargs): def existing(cls, connection=None, **kwargs):

View File

@ -113,7 +113,7 @@ class Limits(resource.Resource):
""" """
# TODO(mordred) We shouldn't have to subclass just to declare # TODO(mordred) We shouldn't have to subclass just to declare
# requires_id = False. # requires_id = False.
return super(Limits, self).fetch( return super().fetch(
session=session, session=session,
requires_id=requires_id, requires_id=requires_id,
error_message=error_message, error_message=error_message,

View File

@ -268,7 +268,7 @@ class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin):
base_path=None, base_path=None,
**kwargs, **kwargs,
): ):
request = super(Server, self)._prepare_request( request = super()._prepare_request(
requires_id=requires_id, requires_id=requires_id,
prepend_key=prepend_key, prepend_key=prepend_key,
base_path=base_path, base_path=base_path,

View File

@ -55,6 +55,6 @@ class ServerRemoteConsole(resource.Resource):
raise ValueError( raise ValueError(
'Console type webmks is not supported on server side' 'Console type webmks is not supported on server side'
) )
return super(ServerRemoteConsole, self).create( return super().create(
session, prepend_key=prepend_key, base_path=base_path, **params session, prepend_key=prepend_key, base_path=base_path, **params
) )

View File

@ -84,12 +84,12 @@ class Service(resource.Resource):
if ignore_missing: if ignore_missing:
return None return None
raise exceptions.ResourceNotFound( raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id) f"No {cls.__name__} found for {name_or_id}"
) )
def commit(self, session, prepend_key=False, **kwargs): def commit(self, session, prepend_key=False, **kwargs):
# we need to set prepend_key to false # we need to set prepend_key to false
return super(Service, self).commit( return super().commit(
session, session,
prepend_key=prepend_key, prepend_key=prepend_key,
**kwargs, **kwargs,

View File

@ -19,5 +19,5 @@ from openstack.config import cloud_region
class CloudConfig(cloud_region.CloudRegion): class CloudConfig(cloud_region.CloudRegion):
def __init__(self, name, region, config, **kwargs): def __init__(self, name, region, config, **kwargs):
super(CloudConfig, self).__init__(name, region, config, **kwargs) super().__init__(name, region, config, **kwargs)
self.region = region self.region = region

View File

@ -76,7 +76,7 @@ def _make_key(key, service_type):
def _disable_service(config, service_type, reason=None): def _disable_service(config, service_type, reason=None):
service_type = service_type.lower().replace('-', '_') service_type = service_type.lower().replace('-', '_')
key = 'has_{service_type}'.format(service_type=service_type) key = f'has_{service_type}'
config[key] = False config[key] = False
if reason: if reason:
d_key = _make_key('disabled_reason', service_type) d_key = _make_key('disabled_reason', service_type)
@ -1217,7 +1217,7 @@ class CloudRegion:
def has_service(self, service_type): def has_service(self, service_type):
service_type = service_type.lower().replace('-', '_') service_type = service_type.lower().replace('-', '_')
key = 'has_{service_type}'.format(service_type=service_type) key = f'has_{service_type}'
return self.config.get( return self.config.get(
key, self._service_type_manager.is_official(service_type) key, self._service_type_manager.is_official(service_type)
) )
@ -1227,7 +1227,7 @@ class CloudRegion:
def enable_service(self, service_type): def enable_service(self, service_type):
service_type = service_type.lower().replace('-', '_') service_type = service_type.lower().replace('-', '_')
key = 'has_{service_type}'.format(service_type=service_type) key = f'has_{service_type}'
self.config[key] = True self.config[key] = True
def get_disabled_reason(self, service_type): def get_disabled_reason(self, service_type):

View File

@ -46,7 +46,7 @@ def get_defaults(json_path=_json_path):
cert=None, cert=None,
key=None, key=None,
) )
with open(json_path, 'r') as json_file: with open(json_path) as json_file:
updates = json.load(json_file) updates = json.load(json_file)
if updates is not None: if updates is not None:
tmp_defaults.update(updates) tmp_defaults.update(updates)

View File

@ -411,15 +411,13 @@ class OpenStackConfig:
ret[newkey] = os.environ[k] ret[newkey] = os.environ[k]
# If the only environ keys are selectors or behavior modification, # If the only environ keys are selectors or behavior modification,
# don't return anything # don't return anything
selectors = set( selectors = {
[
'OS_CLOUD', 'OS_CLOUD',
'OS_REGION_NAME', 'OS_REGION_NAME',
'OS_CLIENT_CONFIG_FILE', 'OS_CLIENT_CONFIG_FILE',
'OS_CLIENT_SECURE_FILE', 'OS_CLIENT_SECURE_FILE',
'OS_CLOUD_NAME', 'OS_CLOUD_NAME',
] }
)
if set(environkeys) - selectors: if set(environkeys) - selectors:
return ret return ret
return None return None
@ -456,12 +454,12 @@ class OpenStackConfig:
for path in filelist: for path in filelist:
if os.path.exists(path): if os.path.exists(path):
try: try:
with open(path, 'r') as f: with open(path) as f:
if path.endswith('json'): if path.endswith('json'):
return path, json.load(f) return path, json.load(f)
else: else:
return path, yaml.safe_load(f) return path, yaml.safe_load(f)
except IOError as e: except OSError as e:
if e.errno == errno.EACCES: if e.errno == errno.EACCES:
# Can't access file so let's continue to the next # Can't access file so let's continue to the next
# file # file
@ -560,9 +558,7 @@ class OpenStackConfig:
# Only validate cloud name if one was given # Only validate cloud name if one was given
if name and name not in self.cloud_config['clouds']: if name and name not in self.cloud_config['clouds']:
raise exceptions.ConfigException( raise exceptions.ConfigException(f"Cloud {name} was not found.")
"Cloud {name} was not found.".format(name=name)
)
our_cloud = self.cloud_config['clouds'].get(name, dict()) our_cloud = self.cloud_config['clouds'].get(name, dict())
if profile: if profile:
@ -1440,7 +1436,7 @@ class OpenStackConfig:
try: try:
with open(config_file) as fh: with open(config_file) as fh:
cur_config = yaml.safe_load(fh) cur_config = yaml.safe_load(fh)
except IOError as e: except OSError as e:
# Not no such file # Not no such file
if e.errno != 2: if e.errno != 2:
raise raise

View File

@ -33,11 +33,11 @@ def _get_vendor_defaults():
global _VENDOR_DEFAULTS global _VENDOR_DEFAULTS
if not _VENDOR_DEFAULTS: if not _VENDOR_DEFAULTS:
for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.yaml')): for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.yaml')):
with open(vendor, 'r') as f: with open(vendor) as f:
vendor_data = yaml.safe_load(f) vendor_data = yaml.safe_load(f)
_VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile'] _VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile']
for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.json')): for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.json')):
with open(vendor, 'r') as f: with open(vendor) as f:
vendor_data = json.load(f) vendor_data = json.load(f)
_VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile'] _VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile']
return _VENDOR_DEFAULTS return _VENDOR_DEFAULTS

View File

@ -69,7 +69,7 @@ class Resource(resource.Resource):
if ignore_missing: if ignore_missing:
return None return None
raise exceptions.ResourceNotFound( raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id) f"No {cls.__name__} found for {name_or_id}"
) )
@classmethod @classmethod

View File

@ -29,21 +29,21 @@ class SDKException(Exception):
def __init__(self, message=None, extra_data=None): def __init__(self, message=None, extra_data=None):
self.message = self.__class__.__name__ if message is None else message self.message = self.__class__.__name__ if message is None else message
self.extra_data = extra_data self.extra_data = extra_data
super(SDKException, self).__init__(self.message) super().__init__(self.message)
class EndpointNotFound(SDKException): class EndpointNotFound(SDKException):
"""A mismatch occurred between what the client and server expect.""" """A mismatch occurred between what the client and server expect."""
def __init__(self, message=None): def __init__(self, message=None):
super(EndpointNotFound, self).__init__(message) super().__init__(message)
class InvalidResponse(SDKException): class InvalidResponse(SDKException):
"""The response from the server is not valid for this request.""" """The response from the server is not valid for this request."""
def __init__(self, response): def __init__(self, response):
super(InvalidResponse, self).__init__() super().__init__()
self.response = response self.response = response
@ -51,7 +51,7 @@ class InvalidRequest(SDKException):
"""The request to the server is not valid.""" """The request to the server is not valid."""
def __init__(self, message=None): def __init__(self, message=None):
super(InvalidRequest, self).__init__(message) super().__init__(message)
class HttpException(SDKException, _rex.HTTPError): class HttpException(SDKException, _rex.HTTPError):
@ -111,7 +111,7 @@ class HttpException(SDKException, _rex.HTTPError):
remote_error += str(self.details) remote_error += str(self.details)
return "{message}: {remote_error}".format( return "{message}: {remote_error}".format(
message=super(HttpException, self).__str__(), message=super().__str__(),
remote_error=remote_error, remote_error=remote_error,
) )
@ -142,12 +142,12 @@ class MethodNotSupported(SDKException):
except AttributeError: except AttributeError:
name = resource.__class__.__name__ name = resource.__class__.__name__
message = 'The %s method is not supported for %s.%s' % ( message = 'The {} method is not supported for {}.{}'.format(
method, method,
resource.__module__, resource.__module__,
name, name,
) )
super(MethodNotSupported, self).__init__(message=message) super().__init__(message=message)
class DuplicateResource(SDKException): class DuplicateResource(SDKException):

View File

@ -46,7 +46,7 @@ class ConnectionFixture(fixtures.Fixture):
} }
def __init__(self, suburl=False, project_id=None, *args, **kwargs): def __init__(self, suburl=False, project_id=None, *args, **kwargs):
super(ConnectionFixture, self).__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._endpoint_templates = _ENDPOINT_TEMPLATES self._endpoint_templates = _ENDPOINT_TEMPLATES
if suburl: if suburl:
self.use_suburl() self.use_suburl()

View File

@ -22,7 +22,7 @@ def _verify_checksum(md5, checksum):
digest = md5.hexdigest() digest = md5.hexdigest()
if digest != checksum: if digest != checksum:
raise exceptions.InvalidResponse( raise exceptions.InvalidResponse(
"checksum mismatch: %s != %s" % (checksum, digest) f"checksum mismatch: {checksum} != {digest}"
) )

View File

@ -295,7 +295,7 @@ class Proxy(proxy.Proxy):
image = self._connection._get_and_munchify( image = self._connection._get_and_munchify(
'image', 'image',
self.put( self.put(
'/images/{id}'.format(id=image.id), f'/images/{image.id}',
headers=headers, headers=headers,
data=image_data, data=image_data,
), ),
@ -303,7 +303,7 @@ class Proxy(proxy.Proxy):
except exc.HttpException: except exc.HttpException:
self.log.debug("Deleting failed upload of image %s", name) self.log.debug("Deleting failed upload of image %s", name)
try: try:
self.delete('/images/{id}'.format(id=image.id)) self.delete(f'/images/{image.id}')
except exc.HttpException: except exc.HttpException:
# We're just trying to clean up - if it doesn't work - shrug # We're just trying to clean up - if it doesn't work - shrug
self.log.warning( self.log.warning(
@ -434,10 +434,10 @@ class Proxy(proxy.Proxy):
img_props = {} img_props = {}
for k, v in iter(properties.items()): for k, v in iter(properties.items()):
if image.properties.get(k, None) != v: if image.properties.get(k, None) != v:
img_props['x-image-meta-{key}'.format(key=k)] = v img_props[f'x-image-meta-{k}'] = v
if not img_props: if not img_props:
return False return False
self.put('/images/{id}'.format(id=image.id), headers=img_props) self.put(f'/images/{image.id}', headers=img_props)
return True return True
def update_image_properties( def update_image_properties(
@ -469,7 +469,7 @@ class Proxy(proxy.Proxy):
for k, v in iter(kwargs.items()): for k, v in iter(kwargs.items()):
if v and k in ['ramdisk', 'kernel']: if v and k in ['ramdisk', 'kernel']:
v = self._connection.get_image_id(v) v = self._connection.get_image_id(v)
k = '{0}_id'.format(k) k = f'{k}_id'
img_props[k] = v img_props[k] = v
return self._update_image_properties(image, meta, img_props) return self._update_image_properties(image, meta, img_props)

View File

@ -135,5 +135,5 @@ class Image(resource.Resource, _download.DownloadMixin):
if ignore_missing: if ignore_missing:
return None return None
raise exceptions.ResourceNotFound( raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id) f"No {cls.__name__} found for {name_or_id}"
) )

View File

@ -598,9 +598,7 @@ class Proxy(proxy.Proxy):
self.log.debug("Image creation failed", exc_info=True) self.log.debug("Image creation failed", exc_info=True)
raise raise
except Exception as e: except Exception as e:
raise exceptions.SDKException( raise exceptions.SDKException(f"Image creation failed: {str(e)}")
"Image creation failed: {message}".format(message=str(e))
)
def _make_v2_image_params(self, meta, properties): def _make_v2_image_params(self, meta, properties):
ret: ty.Dict = {} ret: ty.Dict = {}
@ -949,7 +947,7 @@ class Proxy(proxy.Proxy):
for k, v in iter(kwargs.items()): for k, v in iter(kwargs.items()):
if v and k in ['ramdisk', 'kernel']: if v and k in ['ramdisk', 'kernel']:
v = self._connection.get_image_id(v) v = self._connection.get_image_id(v)
k = '{0}_id'.format(k) k = f'{k}_id'
properties[k] = v properties[k] = v
img_props = image.properties.copy() img_props = image.properties.copy()
@ -1840,7 +1838,7 @@ class Proxy(proxy.Proxy):
if task.status.lower() == status.lower(): if task.status.lower() == status.lower():
return task return task
name = "{res}:{id}".format(res=task.__class__.__name__, id=task.id) name = f"{task.__class__.__name__}:{task.id}"
msg = "Timeout waiting for {name} to transition to {status}".format( msg = "Timeout waiting for {name} to transition to {status}".format(
name=name, status=status name=name, status=status
) )

View File

@ -385,7 +385,7 @@ class Image(resource.Resource, tag.TagMixin, _download.DownloadMixin):
base_path=None, base_path=None,
**kwargs, **kwargs,
): ):
request = super(Image, self)._prepare_request( request = super()._prepare_request(
requires_id=requires_id, requires_id=requires_id,
prepend_key=prepend_key, prepend_key=prepend_key,
patch=patch, patch=patch,
@ -403,7 +403,7 @@ class Image(resource.Resource, tag.TagMixin, _download.DownloadMixin):
@classmethod @classmethod
def find(cls, session, name_or_id, ignore_missing=True, **params): def find(cls, session, name_or_id, ignore_missing=True, **params):
# Do a regular search first (ignoring missing) # Do a regular search first (ignoring missing)
result = super(Image, cls).find(session, name_or_id, True, **params) result = super().find(session, name_or_id, True, **params)
if result: if result:
return result return result
@ -419,5 +419,5 @@ class Image(resource.Resource, tag.TagMixin, _download.DownloadMixin):
if ignore_missing: if ignore_missing:
return None return None
raise exceptions.ResourceNotFound( raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id) f"No {cls.__name__} found for {name_or_id}"
) )

View File

@ -114,9 +114,7 @@ class AmphoraConfig(resource.Resource):
# The default _update code path also has no # The default _update code path also has no
# way to pass has_body into this function, so overriding the method here. # way to pass has_body into this function, so overriding the method here.
def commit(self, session, base_path=None): def commit(self, session, base_path=None):
return super(AmphoraConfig, self).commit( return super().commit(session, base_path=base_path, has_body=False)
session, base_path=base_path, has_body=False
)
class AmphoraFailover(resource.Resource): class AmphoraFailover(resource.Resource):
@ -139,6 +137,4 @@ class AmphoraFailover(resource.Resource):
# The default _update code path also has no # The default _update code path also has no
# way to pass has_body into this function, so overriding the method here. # way to pass has_body into this function, so overriding the method here.
def commit(self, session, base_path=None): def commit(self, session, base_path=None):
return super(AmphoraFailover, self).commit( return super().commit(session, base_path=base_path, has_body=False)
session, base_path=base_path, has_body=False
)

View File

@ -146,6 +146,4 @@ class LoadBalancerFailover(resource.Resource):
# The default _update code path also has no # The default _update code path also has no
# way to pass has_body into this function, so overriding the method here. # way to pass has_body into this function, so overriding the method here.
def commit(self, session, base_path=None): def commit(self, session, base_path=None):
return super(LoadBalancerFailover, self).commit( return super().commit(session, base_path=base_path, has_body=False)
session, base_path=base_path, has_body=False
)

View File

@ -44,7 +44,7 @@ class Quota(resource.Resource):
def _prepare_request( def _prepare_request(
self, requires_id=True, base_path=None, prepend_key=False, **kwargs self, requires_id=True, base_path=None, prepend_key=False, **kwargs
): ):
_request = super(Quota, self)._prepare_request( _request = super()._prepare_request(
requires_id, prepend_key, base_path=base_path requires_id, prepend_key, base_path=base_path
) )
if self.resource_key in _request.body: if self.resource_key in _request.body:

View File

@ -56,7 +56,7 @@ class Claim(resource.Resource):
project_id = resource.Header("X-PROJECT-ID") project_id = resource.Header("X-PROJECT-ID")
def _translate_response(self, response, has_body=True): def _translate_response(self, response, has_body=True):
super(Claim, self)._translate_response(response, has_body=has_body) super()._translate_response(response, has_body=has_body)
if has_body and self.location: if has_body and self.location:
# Extract claim ID from location # Extract claim ID from location
self.id = self.location.split("claims/")[1] self.id = self.location.split("claims/")[1]

View File

@ -28,7 +28,7 @@ class NetworkResource(resource.Resource):
if_revision=None, if_revision=None,
**kwargs **kwargs
): ):
req = super(NetworkResource, self)._prepare_request( req = super()._prepare_request(
requires_id=requires_id, requires_id=requires_id,
prepend_key=prepend_key, prepend_key=prepend_key,
patch=patch, patch=patch,

View File

@ -63,9 +63,7 @@ class Quota(resource.Resource):
def _prepare_request( def _prepare_request(
self, requires_id=True, prepend_key=False, base_path=None, **kwargs self, requires_id=True, prepend_key=False, base_path=None, **kwargs
): ):
_request = super(Quota, self)._prepare_request( _request = super()._prepare_request(requires_id, prepend_key)
requires_id, prepend_key
)
if self.resource_key in _request.body: if self.resource_key in _request.body:
_body = _request.body[self.resource_key] _body = _request.body[self.resource_key]
else: else:

View File

@ -96,9 +96,7 @@ class SecurityGroupRule(_base.NetworkResource, tag.TagMixin):
updated_at = resource.Body('updated_at') updated_at = resource.Body('updated_at')
def _prepare_request(self, *args, **kwargs): def _prepare_request(self, *args, **kwargs):
_request = super(SecurityGroupRule, self)._prepare_request( _request = super()._prepare_request(*args, **kwargs)
*args, **kwargs
)
# Old versions of Neutron do not handle being passed a # Old versions of Neutron do not handle being passed a
# remote_address_group_id and raise and error. Remove it from # remote_address_group_id and raise and error. Remove it from
# the body if it is blank. # the body if it is blank.

View File

@ -97,7 +97,7 @@ class BaseResource(resource.Resource):
# This must happen before invoking parent _translate_response, cause it # This must happen before invoking parent _translate_response, cause it
# pops known headers. # pops known headers.
self._last_headers = response.headers.copy() self._last_headers = response.headers.copy()
super(BaseResource, self)._translate_response( super()._translate_response(
response, has_body=has_body, error_message=error_message response, has_body=has_body, error_message=error_message
) )
self._set_metadata(response.headers) self._set_metadata(response.headers)

View File

@ -652,7 +652,7 @@ class Proxy(proxy.Proxy):
# While Object Storage usually expects the name to be # While Object Storage usually expects the name to be
# urlencoded in most requests, the SLO manifest requires # urlencoded in most requests, the SLO manifest requires
# plain object names instead. # plain object names instead.
path='/{name}'.format(name=parse.unquote(name)), path=f'/{parse.unquote(name)}',
size_bytes=segment.length, size_bytes=segment.length,
) )
) )
@ -808,7 +808,7 @@ class Proxy(proxy.Proxy):
continue continue
name = self._object_name_from_url(result.url) name = self._object_name_from_url(result.url)
for entry in manifest: for entry in manifest:
if entry['path'] == '/{name}'.format(name=parse.unquote(name)): if entry['path'] == f'/{parse.unquote(name)}':
entry['etag'] = result.headers['Etag'] entry['etag'] = result.headers['Etag']
def get_info(self): def get_info(self):
@ -931,7 +931,7 @@ class Proxy(proxy.Proxy):
endpoint = parse.urlparse(self.get_endpoint()) endpoint = parse.urlparse(self.get_endpoint())
path = '/'.join([endpoint.path, res.name, object_prefix]) path = '/'.join([endpoint.path, res.name, object_prefix])
data = '%s\n%s\n%s\n%s\n%s' % ( data = '{}\n{}\n{}\n{}\n{}'.format(
path, path,
redirect_url, redirect_url,
max_file_size, max_file_size,
@ -1067,7 +1067,7 @@ class Proxy(proxy.Proxy):
raise ValueError('ip_range must be representable as UTF-8') raise ValueError('ip_range must be representable as UTF-8')
hmac_parts.insert(0, "ip=%s" % ip_range) hmac_parts.insert(0, "ip=%s" % ip_range)
hmac_body = u'\n'.join(hmac_parts) hmac_body = '\n'.join(hmac_parts)
temp_url_key = self._check_temp_url_key(temp_url_key=temp_url_key) temp_url_key = self._check_temp_url_key(temp_url_key=temp_url_key)
@ -1082,17 +1082,17 @@ class Proxy(proxy.Proxy):
else: else:
exp = str(expiration) exp = str(expiration)
temp_url = u'{path}?temp_url_sig={sig}&temp_url_expires={exp}'.format( temp_url = '{path}?temp_url_sig={sig}&temp_url_expires={exp}'.format(
path=path_for_body, path=path_for_body,
sig=sig, sig=sig,
exp=exp, exp=exp,
) )
if ip_range: if ip_range:
temp_url += u'&temp_url_ip_range={}'.format(ip_range) temp_url += f'&temp_url_ip_range={ip_range}'
if prefix: if prefix:
temp_url += u'&temp_url_prefix={}'.format(parts[4]) temp_url += f'&temp_url_prefix={parts[4]}'
# Have return type match path from caller # Have return type match path from caller
if isinstance(path, bytes): if isinstance(path, bytes):
return temp_url.encode('utf-8') return temp_url.encode('utf-8')

View File

@ -72,10 +72,10 @@ def poll_for_events(
return False return False
phys_id = event.get('physical_resource_id', '') phys_id = event.get('physical_resource_id', '')
links = dict( links = {
(link.get('rel'), link.get('href')) link.get('rel'): link.get('href')
for link in event.get('links', []) for link in event.get('links', [])
) }
stack_id = links.get('stack', phys_id).rsplit('/', 1)[-1] stack_id = links.get('stack', phys_id).rsplit('/', 1)[-1]
return stack_id == phys_id return stack_id == phys_id

View File

@ -30,13 +30,13 @@ def _construct_yaml_str(self, node):
return self.construct_scalar(node) return self.construct_scalar(node)
HeatYamlLoader.add_constructor(u'tag:yaml.org,2002:str', _construct_yaml_str) HeatYamlLoader.add_constructor('tag:yaml.org,2002:str', _construct_yaml_str)
# Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type # Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type
# datetime.data which causes problems in API layer when being processed by # datetime.data which causes problems in API layer when being processed by
# openstack.common.jsonutils. Therefore, make unicode string out of timestamps # openstack.common.jsonutils. Therefore, make unicode string out of timestamps
# until jsonutils can handle dates. # until jsonutils can handle dates.
HeatYamlLoader.add_constructor( HeatYamlLoader.add_constructor(
u'tag:yaml.org,2002:timestamp', _construct_yaml_str 'tag:yaml.org,2002:timestamp', _construct_yaml_str
) )

View File

@ -58,7 +58,7 @@ class Proxy(proxy.Proxy):
# (/stacks/name/id/everything_else), so if on third position we # (/stacks/name/id/everything_else), so if on third position we
# have not a known part - discard it, not to brake further logic # have not a known part - discard it, not to brake further logic
del url_parts[2] del url_parts[2]
return super(Proxy, self)._extract_name_consume_url_parts(url_parts) return super()._extract_name_consume_url_parts(url_parts)
def read_env_and_templates( def read_env_and_templates(
self, self,

View File

@ -48,6 +48,4 @@ class SoftwareConfig(resource.Resource):
def create(self, session, base_path=None): def create(self, session, base_path=None):
# This overrides the default behavior of resource creation because # This overrides the default behavior of resource creation because
# heat doesn't accept resource_key in its request. # heat doesn't accept resource_key in its request.
return super(SoftwareConfig, self).create( return super().create(session, prepend_key=False, base_path=base_path)
session, prepend_key=False, base_path=base_path
)

View File

@ -52,13 +52,9 @@ class SoftwareDeployment(resource.Resource):
def create(self, session, base_path=None): def create(self, session, base_path=None):
# This overrides the default behavior of resource creation because # This overrides the default behavior of resource creation because
# heat doesn't accept resource_key in its request. # heat doesn't accept resource_key in its request.
return super(SoftwareDeployment, self).create( return super().create(session, prepend_key=False, base_path=base_path)
session, prepend_key=False, base_path=base_path
)
def commit(self, session, base_path=None): def commit(self, session, base_path=None):
# This overrides the default behavior of resource creation because # This overrides the default behavior of resource creation because
# heat doesn't accept resource_key in its request. # heat doesn't accept resource_key in its request.
return super(SoftwareDeployment, self).commit( return super().commit(session, prepend_key=False, base_path=base_path)
session, prepend_key=False, base_path=base_path
)

View File

@ -36,7 +36,7 @@ class Stack(resource.Resource):
'owner_id', 'owner_id',
'username', 'username',
project_id='tenant_id', project_id='tenant_id',
**tag.TagMixin._tag_query_parameters **tag.TagMixin._tag_query_parameters,
) )
# Properties # Properties
@ -115,14 +115,12 @@ class Stack(resource.Resource):
def create(self, session, base_path=None): def create(self, session, base_path=None):
# This overrides the default behavior of resource creation because # This overrides the default behavior of resource creation because
# heat doesn't accept resource_key in its request. # heat doesn't accept resource_key in its request.
return super(Stack, self).create( return super().create(session, prepend_key=False, base_path=base_path)
session, prepend_key=False, base_path=base_path
)
def commit(self, session, base_path=None): def commit(self, session, base_path=None):
# This overrides the default behavior of resource creation because # This overrides the default behavior of resource creation because
# heat doesn't accept resource_key in its request. # heat doesn't accept resource_key in its request.
return super(Stack, self).commit( return super().commit(
session, prepend_key=False, has_body=False, base_path=None session, prepend_key=False, has_body=False, base_path=None
) )
@ -131,16 +129,16 @@ class Stack(resource.Resource):
# we need to use other endpoint for update preview. # we need to use other endpoint for update preview.
base_path = None base_path = None
if self.name and self.id: if self.name and self.id:
base_path = '/stacks/%(stack_name)s/%(stack_id)s' % { base_path = '/stacks/{stack_name}/{stack_id}'.format(
'stack_name': self.name, stack_name=self.name,
'stack_id': self.id, stack_id=self.id,
} )
elif self.name or self.id: elif self.name or self.id:
# We have only one of name/id. Do not try to build a stacks/NAME/ID # We have only one of name/id. Do not try to build a stacks/NAME/ID
# path # path
base_path = '/stacks/%(stack_identity)s' % { base_path = '/stacks/{stack_identity}'.format(
'stack_identity': self.name or self.id stack_identity=self.name or self.id
} )
request = self._prepare_request( request = self._prepare_request(
prepend_key=False, requires_id=False, base_path=base_path prepend_key=False, requires_id=False, base_path=base_path
) )
@ -290,7 +288,7 @@ class Stack(resource.Resource):
if ignore_missing: if ignore_missing:
return None return None
raise exceptions.ResourceNotFound( raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id) f"No {cls.__name__} found for {name_or_id}"
) )

View File

@ -112,7 +112,7 @@ class Proxy(adapter.Adapter):
self._influxdb_client = influxdb_client self._influxdb_client = influxdb_client
self._influxdb_config = influxdb_config self._influxdb_config = influxdb_config
if self.service_type: if self.service_type:
log_name = 'openstack.{0}'.format(self.service_type) log_name = f'openstack.{self.service_type}'
else: else:
log_name = 'openstack' log_name = 'openstack'
self.log = _log.setup_logging(log_name) self.log = _log.setup_logging(log_name)
@ -333,7 +333,9 @@ class Proxy(adapter.Adapter):
with self._statsd_client.pipeline() as pipe: with self._statsd_client.pipeline() as pipe:
if response is not None: if response is not None:
duration = int(response.elapsed.total_seconds() * 1000) duration = int(response.elapsed.total_seconds() * 1000)
metric_name = '%s.%s' % (key, str(response.status_code)) metric_name = '{}.{}'.format(
key, str(response.status_code)
)
pipe.timing(metric_name, duration) pipe.timing(metric_name, duration)
pipe.incr(metric_name) pipe.incr(metric_name)
if duration > 1000: if duration > 1000:
@ -396,7 +398,7 @@ class Proxy(adapter.Adapter):
tags['status_code'] = str(response.status_code) tags['status_code'] = str(response.status_code)
# Note(gtema): emit also status_code as a value (counter) # Note(gtema): emit also status_code as a value (counter)
fields[str(response.status_code)] = 1 fields[str(response.status_code)] = 1
fields['%s.%s' % (method, response.status_code)] = 1 fields[f'{method}.{response.status_code}'] = 1
# Note(gtema): status_code field itself is also very helpful on the # Note(gtema): status_code field itself is also very helpful on the
# graphs to show what was the code, instead of counting its # graphs to show what was the code, instead of counting its
# occurences # occurences
@ -411,7 +413,7 @@ class Proxy(adapter.Adapter):
else 'openstack_api' else 'openstack_api'
) )
# Note(gtema) append service name into the measurement name # Note(gtema) append service name into the measurement name
measurement = '%s.%s' % (measurement, self.service_type) measurement = f'{measurement}.{self.service_type}'
data = [dict(measurement=measurement, tags=tags, fields=fields)] data = [dict(measurement=measurement, tags=tags, fields=fields)]
try: try:
self._influxdb_client.write_points(data) self._influxdb_client.write_points(data)

View File

@ -306,9 +306,7 @@ class _ComponentManager(collections.abc.MutableMapping):
@property @property
def dirty(self): def dirty(self):
"""Return a dict of modified attributes""" """Return a dict of modified attributes"""
return dict( return {key: self.attributes.get(key, None) for key in self._dirty}
(key, self.attributes.get(key, None)) for key in self._dirty
)
def clean(self, only=None): def clean(self, only=None):
"""Signal that the resource no longer has modified attributes. """Signal that the resource no longer has modified attributes.
@ -610,7 +608,7 @@ class Resource(dict):
def __repr__(self): def __repr__(self):
pairs = [ pairs = [
"%s=%s" % (k, v if v is not None else 'None') "{}={}".format(k, v if v is not None else 'None')
for k, v in dict( for k, v in dict(
itertools.chain( itertools.chain(
self._body.attributes.items(), self._body.attributes.items(),
@ -622,7 +620,9 @@ class Resource(dict):
] ]
args = ", ".join(pairs) args = ", ".join(pairs)
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__, args) return "{}.{}({})".format(
self.__module__, self.__class__.__name__, args
)
def __eq__(self, comparand): def __eq__(self, comparand):
"""Return True if another resource has the same contents""" """Return True if another resource has the same contents"""
@ -1406,7 +1406,7 @@ class Resource(dict):
def _raise(message): def _raise(message):
if error_message: if error_message:
error_message.rstrip('.') error_message.rstrip('.')
message = '%s. %s' % (error_message, message) message = f'{error_message}. {message}'
raise exceptions.NotSupported(message) raise exceptions.NotSupported(message)
@ -1868,7 +1868,7 @@ class Resource(dict):
server_field = component.name server_field = component.name
if len(parts) > 1: if len(parts) > 1:
new_path = '/%s/%s' % (server_field, parts[1]) new_path = f'/{server_field}/{parts[1]}'
else: else:
new_path = '/%s' % server_field new_path = '/%s' % server_field
converted.append(dict(item, path=new_path)) converted.append(dict(item, path=new_path))
@ -2172,7 +2172,7 @@ class Resource(dict):
if not pagination_key and cls.resources_key: if not pagination_key and cls.resources_key:
# Nova has a {key}_links dict in the main body # Nova has a {key}_links dict in the main body
pagination_key = '{key}_links'.format(key=cls.resources_key) pagination_key = f'{cls.resources_key}_links'
if pagination_key: if pagination_key:
links = data.get(pagination_key, {}) links = data.get(pagination_key, {})
@ -2371,7 +2371,7 @@ class Resource(dict):
return None return None
raise exceptions.ResourceNotFound( raise exceptions.ResourceNotFound(
"No %s found for %s" % (cls.__name__, name_or_id) f"No {cls.__name__} found for {name_or_id}"
) )
@ -2427,7 +2427,7 @@ def wait_for_status(
failures = ['ERROR'] failures = ['ERROR']
failures = [f.lower() for f in failures] failures = [f.lower() for f in failures]
name = "{res}:{id}".format(res=resource.__class__.__name__, id=resource.id) name = f"{resource.__class__.__name__}:{resource.id}"
msg = "Timeout waiting for {name} to transition to {status}".format( msg = "Timeout waiting for {name} to transition to {status}".format(
name=name, status=status name=name, status=status
) )

View File

@ -596,7 +596,7 @@ class Proxy(proxy.Proxy):
return self._create( return self._create(
_share_network_subnet.ShareNetworkSubnet, _share_network_subnet.ShareNetworkSubnet,
**attrs, **attrs,
share_network_id=share_network_id share_network_id=share_network_id,
) )
def delete_share_network_subnet( def delete_share_network_subnet(
@ -656,7 +656,7 @@ class Proxy(proxy.Proxy):
return self._list( return self._list(
_share_snapshot_instance.ShareSnapshotInstance, _share_snapshot_instance.ShareSnapshotInstance,
base_path=base_path, base_path=base_path,
**query **query,
) )
def get_share_snapshot_instance(self, snapshot_instance_id): def get_share_snapshot_instance(self, snapshot_instance_id):
@ -859,7 +859,7 @@ class Proxy(proxy.Proxy):
:rtype: :class:`~openstack.shared_file_system.v2. :rtype: :class:`~openstack.shared_file_system.v2.
share_access_rules.ShareAccessRules` share_access_rules.ShareAccessRules`
""" """
base_path = "/shares/%s/action" % (share_id,) base_path = f"/shares/{share_id}/action"
return self._create( return self._create(
_share_access_rule.ShareAccessRule, base_path=base_path, **attrs _share_access_rule.ShareAccessRule, base_path=base_path, **attrs
) )
@ -913,7 +913,7 @@ class Proxy(proxy.Proxy):
return self._list( return self._list(
_share_group_snapshot.ShareGroupSnapshot, _share_group_snapshot.ShareGroupSnapshot,
base_path=base_path, base_path=base_path,
**query **query,
) )
def share_group_snapshot_members(self, group_snapshot_id): def share_group_snapshot_members(self, group_snapshot_id):
@ -958,7 +958,7 @@ class Proxy(proxy.Proxy):
return self._create( return self._create(
_share_group_snapshot.ShareGroupSnapshot, _share_group_snapshot.ShareGroupSnapshot,
share_group_id=share_group_id, share_group_id=share_group_id,
**attrs **attrs,
) )
def reset_share_group_snapshot_status(self, group_snapshot_id, status): def reset_share_group_snapshot_status(self, group_snapshot_id, status):
@ -987,7 +987,7 @@ class Proxy(proxy.Proxy):
return self._update( return self._update(
_share_group_snapshot.ShareGroupSnapshot, _share_group_snapshot.ShareGroupSnapshot,
group_snapshot_id, group_snapshot_id,
**attrs **attrs,
) )
def delete_share_group_snapshot( def delete_share_group_snapshot(

View File

@ -103,7 +103,7 @@ def generate_fake_resource(
base_attrs[name] = [uuid.uuid4().hex] base_attrs[name] = [uuid.uuid4().hex]
else: else:
# Everything else # Everything else
msg = "Fake value for %s.%s can not be generated" % ( msg = "Fake value for {}.{} can not be generated".format(
resource_type.__name__, resource_type.__name__,
name, name,
) )
@ -130,7 +130,7 @@ def generate_fake_resource(
base_attrs[name] = dict() base_attrs[name] = dict()
else: else:
# Everything else # Everything else
msg = "Fake value for %s.%s can not be generated" % ( msg = "Fake value for {}.{} can not be generated".format(
resource_type.__name__, resource_type.__name__,
name, name,
) )

View File

@ -96,9 +96,7 @@ class TestCase(base.BaseTestCase):
first = first.toDict() first = first.toDict()
if isinstance(second, utils.Munch): if isinstance(second, utils.Munch):
second = second.toDict() second = second.toDict()
return super(TestCase, self).assertEqual( return super().assertEqual(first, second, *args, **kwargs)
first, second, *args, **kwargs
)
def printLogs(self, *args): def printLogs(self, *args):
self._log_stream.seek(0) self._log_stream.seek(0)
@ -135,7 +133,9 @@ class TestCase(base.BaseTestCase):
missing_keys.append(key) missing_keys.append(key)
if missing_keys: if missing_keys:
self.fail( self.fail(
"Keys %s are in %s but not in %s" % (missing_keys, part, whole) "Keys {} are in {} but not in {}".format(
missing_keys, part, whole
)
) )
wrong_values = [ wrong_values = [
(key, part[key], whole[key]) (key, part[key], whole[key])

View File

@ -27,9 +27,9 @@ from openstack.orchestration.util import template_format
from openstack import utils from openstack import utils
PROJECT_ID = '1c36b64c840a42cd9e9b931a369337f0' PROJECT_ID = '1c36b64c840a42cd9e9b931a369337f0'
FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8dddd' FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddd'
CHOCOLATE_FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8ddde' CHOCOLATE_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8ddde'
STRAWBERRY_FLAVOR_ID = u'0c1d9008-f546-4608-9e8f-f8bdaec8dddf' STRAWBERRY_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddf'
COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1' COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1'
ORCHESTRATION_ENDPOINT = 'https://orchestration.example.com/v1/{p}'.format( ORCHESTRATION_ENDPOINT = 'https://orchestration.example.com/v1/{p}'.format(
p=PROJECT_ID p=PROJECT_ID
@ -48,30 +48,30 @@ FAKE_PUBLIC_KEY = (
def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24): def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24):
return { return {
u'OS-FLV-DISABLED:disabled': False, 'OS-FLV-DISABLED:disabled': False,
u'OS-FLV-EXT-DATA:ephemeral': 0, 'OS-FLV-EXT-DATA:ephemeral': 0,
u'disk': disk, 'disk': disk,
u'id': flavor_id, 'id': flavor_id,
u'links': [ 'links': [
{ {
u'href': u'{endpoint}/flavors/{id}'.format( 'href': '{endpoint}/flavors/{id}'.format(
endpoint=COMPUTE_ENDPOINT, id=flavor_id endpoint=COMPUTE_ENDPOINT, id=flavor_id
), ),
u'rel': u'self', 'rel': 'self',
}, },
{ {
u'href': u'{endpoint}/flavors/{id}'.format( 'href': '{endpoint}/flavors/{id}'.format(
endpoint=COMPUTE_ENDPOINT, id=flavor_id endpoint=COMPUTE_ENDPOINT, id=flavor_id
), ),
u'rel': u'bookmark', 'rel': 'bookmark',
}, },
], ],
u'name': name, 'name': name,
u'os-flavor-access:is_public': True, 'os-flavor-access:is_public': True,
u'ram': ram, 'ram': ram,
u'rxtx_factor': 1.0, 'rxtx_factor': 1.0,
u'swap': 0, 'swap': 0,
u'vcpus': vcpus, 'vcpus': vcpus,
} }
@ -251,9 +251,9 @@ def make_fake_image(
md5=NO_MD5, md5=NO_MD5,
sha256=NO_SHA256, sha256=NO_SHA256,
status='active', status='active',
image_name=u'fake_image', image_name='fake_image',
data=None, data=None,
checksum=u'ee36e35a297980dee1b514de9803ec6d', checksum='ee36e35a297980dee1b514de9803ec6d',
): ):
if data: if data:
md5 = utils.md5(usedforsecurity=False) md5 = utils.md5(usedforsecurity=False)
@ -265,34 +265,34 @@ def make_fake_image(
md5 = md5.hexdigest() md5 = md5.hexdigest()
sha256 = sha256.hexdigest() sha256 = sha256.hexdigest()
return { return {
u'image_state': u'available', 'image_state': 'available',
u'container_format': u'bare', 'container_format': 'bare',
u'min_ram': 0, 'min_ram': 0,
u'ramdisk_id': 'fake_ramdisk_id', 'ramdisk_id': 'fake_ramdisk_id',
u'updated_at': u'2016-02-10T05:05:02Z', 'updated_at': '2016-02-10T05:05:02Z',
u'file': '/v2/images/' + image_id + '/file', 'file': '/v2/images/' + image_id + '/file',
u'size': 3402170368, 'size': 3402170368,
u'image_type': u'snapshot', 'image_type': 'snapshot',
u'disk_format': u'qcow2', 'disk_format': 'qcow2',
u'id': image_id, 'id': image_id,
u'schema': u'/v2/schemas/image', 'schema': '/v2/schemas/image',
u'status': status, 'status': status,
u'tags': [], 'tags': [],
u'visibility': u'private', 'visibility': 'private',
u'locations': [ 'locations': [
{u'url': u'http://127.0.0.1/images/' + image_id, u'metadata': {}} {'url': 'http://127.0.0.1/images/' + image_id, 'metadata': {}}
], ],
u'min_disk': 40, 'min_disk': 40,
u'virtual_size': None, 'virtual_size': None,
u'name': image_name, 'name': image_name,
u'checksum': md5 or checksum, 'checksum': md5 or checksum,
u'created_at': u'2016-02-10T05:03:11Z', 'created_at': '2016-02-10T05:03:11Z',
u'owner_specified.openstack.md5': md5 or NO_MD5, 'owner_specified.openstack.md5': md5 or NO_MD5,
u'owner_specified.openstack.sha256': sha256 or NO_SHA256, 'owner_specified.openstack.sha256': sha256 or NO_SHA256,
u'owner_specified.openstack.object': 'images/{name}'.format( 'owner_specified.openstack.object': 'images/{name}'.format(
name=image_name name=image_name
), ),
u'protected': False, 'protected': False,
} }

View File

@ -18,7 +18,7 @@ class BaseBaremetalTest(base.BaseFunctionalTest):
node_id = None node_id = None
def setUp(self): def setUp(self):
super(BaseBaremetalTest, self).setUp() super().setUp()
self.require_service( self.require_service(
'baremetal', min_microversion=self.min_microversion 'baremetal', min_microversion=self.min_microversion
) )

View File

@ -18,7 +18,7 @@ from openstack.tests.functional.baremetal import base
class Base(base.BaseBaremetalTest): class Base(base.BaseBaremetalTest):
def setUp(self): def setUp(self):
super(Base, self).setUp() super().setUp()
# NOTE(dtantsur): generate a unique resource class to prevent parallel # NOTE(dtantsur): generate a unique resource class to prevent parallel
# tests from clashing. # tests from clashing.
self.resource_class = 'baremetal-%d' % random.randrange(1024) self.resource_class = 'baremetal-%d' % random.randrange(1024)

View File

@ -18,7 +18,7 @@ class TestBareMetalDeployTemplate(base.BaseBaremetalTest):
min_microversion = '1.55' min_microversion = '1.55'
def setUp(self): def setUp(self):
super(TestBareMetalDeployTemplate, self).setUp() super().setUp()
def test_baremetal_deploy_create_get_delete(self): def test_baremetal_deploy_create_get_delete(self):
steps = [ steps = [

View File

@ -405,7 +405,7 @@ class TestBareMetalVif(base.BaseBaremetalTest):
min_microversion = '1.28' min_microversion = '1.28'
def setUp(self): def setUp(self):
super(TestBareMetalVif, self).setUp() super().setUp()
self.node = self.create_node(network_interface='noop') self.node = self.create_node(network_interface='noop')
self.vif_id = "200712fc-fdfb-47da-89a6-2d19f76c7618" self.vif_id = "200712fc-fdfb-47da-89a6-2d19f76c7618"
@ -445,7 +445,7 @@ class TestTraits(base.BaseBaremetalTest):
min_microversion = '1.37' min_microversion = '1.37'
def setUp(self): def setUp(self):
super(TestTraits, self).setUp() super().setUp()
self.node = self.create_node() self.node = self.create_node()
def test_add_remove_node_trait(self): def test_add_remove_node_trait(self):

View File

@ -17,7 +17,7 @@ from openstack.tests.functional.baremetal import base
class TestBareMetalPort(base.BaseBaremetalTest): class TestBareMetalPort(base.BaseBaremetalTest):
def setUp(self): def setUp(self):
super(TestBareMetalPort, self).setUp() super().setUp()
self.node = self.create_node() self.node = self.create_node()
def test_port_create_get_delete(self): def test_port_create_get_delete(self):

View File

@ -19,7 +19,7 @@ class TestBareMetalPortGroup(base.BaseBaremetalTest):
min_microversion = '1.23' min_microversion = '1.23'
def setUp(self): def setUp(self):
super(TestBareMetalPortGroup, self).setUp() super().setUp()
self.node = self.create_node() self.node = self.create_node()
def test_port_group_create_get_delete(self): def test_port_group_create_get_delete(self):

View File

@ -19,7 +19,7 @@ class TestBareMetalVolumeconnector(base.BaseBaremetalTest):
min_microversion = '1.32' min_microversion = '1.32'
def setUp(self): def setUp(self):
super(TestBareMetalVolumeconnector, self).setUp() super().setUp()
self.node = self.create_node(provision_state='enroll') self.node = self.create_node(provision_state='enroll')
def test_volume_connector_create_get_delete(self): def test_volume_connector_create_get_delete(self):

View File

@ -19,7 +19,7 @@ class TestBareMetalVolumetarget(base.BaseBaremetalTest):
min_microversion = '1.32' min_microversion = '1.32'
def setUp(self): def setUp(self):
super(TestBareMetalVolumetarget, self).setUp() super().setUp()
self.node = self.create_node(provision_state='enroll') self.node = self.create_node(provision_state='enroll')
def test_volume_target_create_get_delete(self): def test_volume_target_create_get_delete(self):

View File

@ -44,7 +44,7 @@ class BaseFunctionalTest(base.TestCase):
_wait_for_timeout_key = '' _wait_for_timeout_key = ''
def setUp(self): def setUp(self):
super(BaseFunctionalTest, self).setUp() super().setUp()
self.conn = connection.Connection(config=TEST_CLOUD_REGION) self.conn = connection.Connection(config=TEST_CLOUD_REGION)
_disable_keep_alive(self.conn) _disable_keep_alive(self.conn)
@ -249,7 +249,7 @@ class BaseFunctionalTest(base.TestCase):
class KeystoneBaseFunctionalTest(BaseFunctionalTest): class KeystoneBaseFunctionalTest(BaseFunctionalTest):
def setUp(self): def setUp(self):
super(KeystoneBaseFunctionalTest, self).setUp() super().setUp()
use_keystone_v2 = os.environ.get('OPENSTACKSDK_USE_KEYSTONE_V2', False) use_keystone_v2 = os.environ.get('OPENSTACKSDK_USE_KEYSTONE_V2', False)
if use_keystone_v2: if use_keystone_v2:

View File

@ -17,7 +17,7 @@ class BaseBlockStorageTest(base.BaseFunctionalTest):
_wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE' _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE'
def setUp(self): def setUp(self):
super(BaseBlockStorageTest, self).setUp() super().setUp()
self._set_user_cloud(block_storage_api_version='2') self._set_user_cloud(block_storage_api_version='2')
self._set_operator_cloud(block_storage_api_version='2') self._set_operator_cloud(block_storage_api_version='2')

View File

@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v2 import base
class TestBackup(base.BaseBlockStorageTest): class TestBackup(base.BaseBlockStorageTest):
def setUp(self): def setUp(self):
super(TestBackup, self).setUp() super().setUp()
if not self.user_cloud.has_service('object-store'): if not self.user_cloud.has_service('object-store'):
self.skipTest('Object service is requred, but not available') self.skipTest('Object service is requred, but not available')
@ -62,7 +62,7 @@ class TestBackup(base.BaseBlockStorageTest):
self.VOLUME_ID, ignore_missing=False self.VOLUME_ID, ignore_missing=False
) )
self.assertIsNone(sot) self.assertIsNone(sot)
super(TestBackup, self).tearDown() super().tearDown()
def test_get(self): def test_get(self):
sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID) sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID)

View File

@ -18,7 +18,7 @@ from openstack.tests.functional.block_storage.v2 import base
class TestSnapshot(base.BaseBlockStorageTest): class TestSnapshot(base.BaseBlockStorageTest):
def setUp(self): def setUp(self):
super(TestSnapshot, self).setUp() super().setUp()
self.SNAPSHOT_NAME = self.getUniqueString() self.SNAPSHOT_NAME = self.getUniqueString()
self.SNAPSHOT_ID = None self.SNAPSHOT_ID = None
@ -65,7 +65,7 @@ class TestSnapshot(base.BaseBlockStorageTest):
self.VOLUME_ID, ignore_missing=False self.VOLUME_ID, ignore_missing=False
) )
self.assertIsNone(sot) self.assertIsNone(sot)
super(TestSnapshot, self).tearDown() super().tearDown()
def test_get(self): def test_get(self):
sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID)

View File

@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v2 import base
class TestStats(base.BaseBlockStorageTest): class TestStats(base.BaseBlockStorageTest):
def setUp(self): def setUp(self):
super(TestStats, self).setUp() super().setUp()
sot = self.operator_cloud.block_storage.backend_pools() sot = self.operator_cloud.block_storage.backend_pools()
for pool in sot: for pool in sot:

View File

@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v2 import base
class TestType(base.BaseBlockStorageTest): class TestType(base.BaseBlockStorageTest):
def setUp(self): def setUp(self):
super(TestType, self).setUp() super().setUp()
self.TYPE_NAME = self.getUniqueString() self.TYPE_NAME = self.getUniqueString()
self.TYPE_ID = None self.TYPE_ID = None
@ -34,7 +34,7 @@ class TestType(base.BaseBlockStorageTest):
self.TYPE_ID, ignore_missing=False self.TYPE_ID, ignore_missing=False
) )
self.assertIsNone(sot) self.assertIsNone(sot)
super(TestType, self).tearDown() super().tearDown()
def test_get(self): def test_get(self):
sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID) sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID)

View File

@ -16,7 +16,7 @@ from openstack.tests.functional.block_storage.v2 import base
class TestVolume(base.BaseBlockStorageTest): class TestVolume(base.BaseBlockStorageTest):
def setUp(self): def setUp(self):
super(TestVolume, self).setUp() super().setUp()
if not self.user_cloud.has_service('block-storage'): if not self.user_cloud.has_service('block-storage'):
self.skipTest('block-storage service not supported by cloud') self.skipTest('block-storage service not supported by cloud')
@ -43,7 +43,7 @@ class TestVolume(base.BaseBlockStorageTest):
self.VOLUME_ID, ignore_missing=False self.VOLUME_ID, ignore_missing=False
) )
self.assertIsNone(sot) self.assertIsNone(sot)
super(TestVolume, self).tearDown() super().tearDown()
def test_get(self): def test_get(self):
sot = self.user_cloud.block_storage.get_volume(self.VOLUME_ID) sot = self.user_cloud.block_storage.get_volume(self.VOLUME_ID)

View File

@ -17,7 +17,7 @@ class BaseBlockStorageTest(base.BaseFunctionalTest):
_wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE' _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE'
def setUp(self): def setUp(self):
super(BaseBlockStorageTest, self).setUp() super().setUp()
self._set_user_cloud(block_storage_api_version='3') self._set_user_cloud(block_storage_api_version='3')
if not self.user_cloud.has_service('block-storage', '3'): if not self.user_cloud.has_service('block-storage', '3'):
self.skipTest('block-storage service not supported by cloud') self.skipTest('block-storage service not supported by cloud')

View File

@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v3 import base
class TestBackup(base.BaseBlockStorageTest): class TestBackup(base.BaseBlockStorageTest):
def setUp(self): def setUp(self):
super(TestBackup, self).setUp() super().setUp()
if not self.user_cloud.has_service('object-store'): if not self.user_cloud.has_service('object-store'):
self.skipTest('Object service is requred, but not available') self.skipTest('Object service is requred, but not available')
@ -62,7 +62,7 @@ class TestBackup(base.BaseBlockStorageTest):
self.VOLUME_ID, ignore_missing=False self.VOLUME_ID, ignore_missing=False
) )
self.assertIsNone(sot) self.assertIsNone(sot)
super(TestBackup, self).tearDown() super().tearDown()
def test_get(self): def test_get(self):
sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID) sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID)

View File

@ -18,7 +18,7 @@ from openstack.tests.functional.block_storage.v3 import base
class TestSnapshot(base.BaseBlockStorageTest): class TestSnapshot(base.BaseBlockStorageTest):
def setUp(self): def setUp(self):
super(TestSnapshot, self).setUp() super().setUp()
self.SNAPSHOT_NAME = self.getUniqueString() self.SNAPSHOT_NAME = self.getUniqueString()
self.SNAPSHOT_ID = None self.SNAPSHOT_ID = None
@ -65,7 +65,7 @@ class TestSnapshot(base.BaseBlockStorageTest):
self.VOLUME_ID, ignore_missing=False self.VOLUME_ID, ignore_missing=False
) )
self.assertIsNone(sot) self.assertIsNone(sot)
super(TestSnapshot, self).tearDown() super().tearDown()
def test_get(self): def test_get(self):
sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID)

View File

@ -17,7 +17,7 @@ from openstack.tests.functional.block_storage.v3 import base
class TestType(base.BaseBlockStorageTest): class TestType(base.BaseBlockStorageTest):
def setUp(self): def setUp(self):
super(TestType, self).setUp() super().setUp()
self.TYPE_NAME = self.getUniqueString() self.TYPE_NAME = self.getUniqueString()
self.TYPE_ID = None self.TYPE_ID = None
@ -36,7 +36,7 @@ class TestType(base.BaseBlockStorageTest):
self.TYPE_ID, ignore_missing=False self.TYPE_ID, ignore_missing=False
) )
self.assertIsNone(sot) self.assertIsNone(sot)
super(TestType, self).tearDown() super().tearDown()
def test_get(self): def test_get(self):
sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID) sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID)

View File

@ -27,7 +27,7 @@ from openstack.tests.functional import base
class TestClusterTemplate(base.BaseFunctionalTest): class TestClusterTemplate(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestClusterTemplate, self).setUp() super().setUp()
if not self.user_cloud.has_service( if not self.user_cloud.has_service(
'container-infrastructure-management' 'container-infrastructure-management'
): ):

View File

@ -108,7 +108,7 @@ def wait_for_delete(client, client_args, check_interval=1, timeout=60):
class TestClustering(base.BaseFunctionalTest): class TestClustering(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestClustering, self).setUp() super().setUp()
self.skipTest('clustering service not supported by cloud') self.skipTest('clustering service not supported by cloud')
def test_create_profile(self): def test_create_profile(self):

View File

@ -32,7 +32,7 @@ class TestCompute(base.BaseFunctionalTest):
# but on a bad day, test_attach_detach_volume can take more time. # but on a bad day, test_attach_detach_volume can take more time.
self.TIMEOUT_SCALING_FACTOR = 1.5 self.TIMEOUT_SCALING_FACTOR = 1.5
super(TestCompute, self).setUp() super().setUp()
self.server_name = self.getUniqueString() self.server_name = self.getUniqueString()
def _cleanup_servers_and_volumes(self, server_name): def _cleanup_servers_and_volumes(self, server_name):
@ -522,7 +522,7 @@ class TestCompute(base.BaseFunctionalTest):
self.user_cloud.delete_server_metadata(self.server_name, ['key1']) self.user_cloud.delete_server_metadata(self.server_name, ['key1'])
updated_server = self.user_cloud.get_server(self.server_name) updated_server = self.user_cloud.get_server(self.server_name)
self.assertEqual(set(updated_server.metadata.items()), set([])) self.assertEqual(set(updated_server.metadata.items()), set())
self.assertRaises( self.assertRaises(
exceptions.NotFoundException, exceptions.NotFoundException,

View File

@ -39,8 +39,5 @@ class TestDevstack(base.BaseFunctionalTest):
] ]
def test_has_service(self): def test_has_service(self):
if ( if os.environ.get(f'OPENSTACKSDK_HAS_{self.env}', '0') == '1':
os.environ.get('OPENSTACKSDK_HAS_{env}'.format(env=self.env), '0')
== '1'
):
self.assertTrue(self.user_cloud.has_service(self.service)) self.assertTrue(self.user_cloud.has_service(self.service))

View File

@ -23,7 +23,7 @@ from openstack.tests.functional import base
class TestDomain(base.BaseFunctionalTest): class TestDomain(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestDomain, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
i_ver = self.operator_cloud.config.get_api_version('identity') i_ver = self.operator_cloud.config.get_api_version('identity')

View File

@ -38,7 +38,7 @@ class TestEndpoints(base.KeystoneBaseFunctionalTest):
] ]
def setUp(self): def setUp(self):
super(TestEndpoints, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")

View File

@ -25,7 +25,7 @@ from openstack.tests.functional import base
class TestFlavor(base.BaseFunctionalTest): class TestFlavor(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestFlavor, self).setUp() super().setUp()
# Generate a random name for flavors in this test # Generate a random name for flavors in this test
self.new_item_name = self.getUniqueString('flavor') self.new_item_name = self.getUniqueString('flavor')

View File

@ -33,7 +33,7 @@ from openstack.tests.functional import base
class TestFloatingIPPool(base.BaseFunctionalTest): class TestFloatingIPPool(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestFloatingIPPool, self).setUp() super().setUp()
if not self.user_cloud._has_nova_extension('os-floating-ip-pools'): if not self.user_cloud._has_nova_extension('os-floating-ip-pools'):
# Skipping this test is floating-ip-pool extension is not # Skipping this test is floating-ip-pool extension is not

View File

@ -23,7 +23,7 @@ from openstack.tests.functional import base
class TestGroup(base.BaseFunctionalTest): class TestGroup(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestGroup, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")

View File

@ -26,7 +26,7 @@ from openstack.tests.functional import base
class TestIdentity(base.KeystoneBaseFunctionalTest): class TestIdentity(base.KeystoneBaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestIdentity, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
self.role_prefix = 'test_role' + ''.join( self.role_prefix = 'test_role' + ''.join(

View File

@ -22,7 +22,7 @@ from openstack.tests.functional import base
class TestMagnumServices(base.BaseFunctionalTest): class TestMagnumServices(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestMagnumServices, self).setUp() super().setUp()
if not self.user_cloud.has_service( if not self.user_cloud.has_service(
'container-infrastructure-management' 'container-infrastructure-management'
): ):

View File

@ -23,7 +23,7 @@ from openstack.tests.functional import base
class TestNetwork(base.BaseFunctionalTest): class TestNetwork(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestNetwork, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")

View File

@ -29,7 +29,7 @@ from openstack.tests.functional import base
class TestObject(base.BaseFunctionalTest): class TestObject(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestObject, self).setUp() super().setUp()
if not self.user_cloud.has_service('object-store'): if not self.user_cloud.has_service('object-store'):
self.skipTest('Object service not supported by cloud') self.skipTest('Object service not supported by cloud')

View File

@ -28,7 +28,7 @@ from openstack.tests.functional import base
class TestPort(base.BaseFunctionalTest): class TestPort(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestPort, self).setUp() super().setUp()
# Skip Neutron tests if neutron is not present # Skip Neutron tests if neutron is not present
if not self.user_cloud.has_service('network'): if not self.user_cloud.has_service('network'):
self.skipTest('Network service not supported by cloud') self.skipTest('Network service not supported by cloud')
@ -118,13 +118,13 @@ class TestPort(base.BaseFunctionalTest):
updated_port = self.user_cloud.get_port(name_or_id=port['id']) updated_port = self.user_cloud.get_port(name_or_id=port['id'])
self.assertEqual(port.get('name'), new_port_name) self.assertEqual(port.get('name'), new_port_name)
port.pop('revision_number', None) port.pop('revision_number', None)
port.pop(u'revision_number', None) port.pop('revision_number', None)
port.pop('updated_at', None)
port.pop('updated_at', None) port.pop('updated_at', None)
port.pop(u'updated_at', None)
updated_port.pop('revision_number', None) updated_port.pop('revision_number', None)
updated_port.pop(u'revision_number', None) updated_port.pop('revision_number', None)
updated_port.pop('updated_at', None)
updated_port.pop('updated_at', None) updated_port.pop('updated_at', None)
updated_port.pop(u'updated_at', None)
self.assertEqual(port, updated_port) self.assertEqual(port, updated_port)

View File

@ -26,7 +26,7 @@ from openstack.tests.functional import base
class TestProject(base.KeystoneBaseFunctionalTest): class TestProject(base.KeystoneBaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestProject, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")

View File

@ -25,7 +25,7 @@ class TestProjectCleanup(base.BaseFunctionalTest):
_wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_CLEANUP' _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_CLEANUP'
def setUp(self): def setUp(self):
super(TestProjectCleanup, self).setUp() super().setUp()
if not self.user_cloud_alt: if not self.user_cloud_alt:
self.skipTest("Alternate demo cloud is required for this test") self.skipTest("Alternate demo cloud is required for this test")

View File

@ -24,7 +24,7 @@ from openstack.tests.functional import base
class TestQosBandwidthLimitRule(base.BaseFunctionalTest): class TestQosBandwidthLimitRule(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestQosBandwidthLimitRule, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
if not self.operator_cloud.has_service('network'): if not self.operator_cloud.has_service('network'):

View File

@ -24,7 +24,7 @@ from openstack.tests.functional import base
class TestQosDscpMarkingRule(base.BaseFunctionalTest): class TestQosDscpMarkingRule(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestQosDscpMarkingRule, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
if not self.operator_cloud.has_service('network'): if not self.operator_cloud.has_service('network'):

View File

@ -24,7 +24,7 @@ from openstack.tests.functional import base
class TestQosMinimumBandwidthRule(base.BaseFunctionalTest): class TestQosMinimumBandwidthRule(base.BaseFunctionalTest):
def setUp(self): def setUp(self):
super(TestQosMinimumBandwidthRule, self).setUp() super().setUp()
if not self.operator_cloud: if not self.operator_cloud:
self.skipTest("Operator cloud is required for this test") self.skipTest("Operator cloud is required for this test")
if not self.operator_cloud.has_service('network'): if not self.operator_cloud.has_service('network'):

Some files were not shown because too many files have changed in this diff Show More