pyupgrade changes for Python3.8+
Result of running $ pyupgrade --py38-plus $(git ls-files | grep ".py$") This was inspired by Nova [1] and Octavia [2] Fixed PEP8 errors introduced by pyupgrade by running: $ autopep8 --select=E127,E128,E501 --max-line-length 79 -r \ --in-place designate and manual updates. [1]: https://review.opendev.org/c/openstack/nova/+/896986 [2]: https://review.opendev.org/c/openstack/octavia/+/899263 Change-Id: Idfa757d7ba238012db116fdb3e98cc7c5ff4b169
This commit is contained in:
parent
7ef4c01e02
commit
68fc28527a
@ -60,7 +60,7 @@ class SampleHandler(NotificationHandler):
|
||||
zone_id = cfg.CONF[self.name].zone_id
|
||||
zone_name = cfg.CONF[self.name].zone_name
|
||||
|
||||
record_name = '%s.%s' % (payload['instance_id'], zone_name)
|
||||
record_name = '{}.{}'.format(payload['instance_id'], zone_name)
|
||||
|
||||
context = DesignateContext().elevated()
|
||||
context.all_tenants = True
|
||||
|
@ -200,7 +200,7 @@ def main():
|
||||
zone.to_file(args.write)
|
||||
else:
|
||||
zone.to_stdout()
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
LOG.error(e)
|
||||
|
||||
|
||||
|
@ -27,6 +27,6 @@ class ZonesController(rest.RestController):
|
||||
return '.zones'
|
||||
|
||||
def __init__(self):
|
||||
super(ZonesController, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
export = export.ExportController()
|
||||
|
@ -22,7 +22,7 @@ from designate.api.v2.controllers import errors
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RootController(object):
|
||||
class RootController:
|
||||
"""
|
||||
This is /admin/ Controller. Pecan will find all controllers via the object
|
||||
properties attached to this.
|
||||
|
@ -26,7 +26,7 @@ LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class BaseView(object):
|
||||
class BaseView:
|
||||
"""
|
||||
The Views are responsible for converting to/from the "internal" and
|
||||
"external" representations of collections and resources. This includes
|
||||
@ -40,7 +40,7 @@ class BaseView(object):
|
||||
_collection_name = None
|
||||
|
||||
def __init__(self):
|
||||
super(BaseView, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
self.base_uri = CONF['service:api']['api_base_uri'].rstrip('/')
|
||||
|
||||
@ -160,13 +160,13 @@ class BaseView(object):
|
||||
return links
|
||||
|
||||
def _get_base_href(self, parents=None):
|
||||
href = "%s/v2/%s" % (self.base_uri, self._collection_name)
|
||||
href = f"{self.base_uri}/v2/{self._collection_name}"
|
||||
|
||||
return href.rstrip('?')
|
||||
|
||||
def _get_resource_href(self, request, item, parents=None):
|
||||
base_href = self._get_base_href(parents)
|
||||
href = "%s/%s" % (base_href, item['id'])
|
||||
href = "{}/{}".format(base_href, item['id'])
|
||||
|
||||
return href.rstrip('?')
|
||||
|
||||
@ -178,7 +178,7 @@ class BaseView(object):
|
||||
|
||||
base_href = self._get_base_href(parents)
|
||||
|
||||
href = "%s?%s" % (base_href, parse.urlencode(params))
|
||||
href = f"{base_href}?{parse.urlencode(params)}"
|
||||
|
||||
return href.rstrip('?')
|
||||
|
||||
|
@ -130,7 +130,7 @@ class ContextMiddleware(base.Middleware):
|
||||
|
||||
class KeystoneContextMiddleware(ContextMiddleware):
|
||||
def __init__(self, application):
|
||||
super(KeystoneContextMiddleware, self).__init__(application)
|
||||
super().__init__(application)
|
||||
|
||||
LOG.info('Starting designate keystonecontext middleware')
|
||||
|
||||
@ -170,7 +170,7 @@ class KeystoneContextMiddleware(ContextMiddleware):
|
||||
|
||||
class NoAuthContextMiddleware(ContextMiddleware):
|
||||
def __init__(self, application):
|
||||
super(NoAuthContextMiddleware, self).__init__(application)
|
||||
super().__init__(application)
|
||||
|
||||
LOG.info('Starting designate noauthcontext middleware')
|
||||
|
||||
@ -188,7 +188,7 @@ class NoAuthContextMiddleware(ContextMiddleware):
|
||||
|
||||
class TestContextMiddleware(ContextMiddleware):
|
||||
def __init__(self, application, tenant_id=None, user_id=None):
|
||||
super(TestContextMiddleware, self).__init__(application)
|
||||
super().__init__(application)
|
||||
|
||||
LOG.critical('Starting designate testcontext middleware')
|
||||
LOG.critical('**** DO NOT USE IN PRODUCTION ****')
|
||||
@ -223,7 +223,7 @@ class TestContextMiddleware(ContextMiddleware):
|
||||
|
||||
class MaintenanceMiddleware(base.Middleware):
|
||||
def __init__(self, application):
|
||||
super(MaintenanceMiddleware, self).__init__(application)
|
||||
super().__init__(application)
|
||||
|
||||
LOG.info('Starting designate maintenance middleware')
|
||||
|
||||
@ -257,7 +257,7 @@ class NormalizeURIMiddleware(base.Middleware):
|
||||
|
||||
class FaultWrapperMiddleware(base.Middleware):
|
||||
def __init__(self, application):
|
||||
super(FaultWrapperMiddleware, self).__init__(application)
|
||||
super().__init__(application)
|
||||
|
||||
LOG.info('Starting designate faultwrapper middleware')
|
||||
|
||||
@ -338,7 +338,7 @@ class FaultWrapperMiddleware(base.Middleware):
|
||||
class APIv2ValidationErrorMiddleware(base.Middleware):
|
||||
|
||||
def __init__(self, application):
|
||||
super(APIv2ValidationErrorMiddleware, self).__init__(application)
|
||||
super().__init__(application)
|
||||
self.api_version = 'API_v2'
|
||||
LOG.info('Starting designate validation middleware')
|
||||
|
||||
|
@ -26,17 +26,17 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class Service(service.WSGIService):
|
||||
def __init__(self):
|
||||
super(Service, self).__init__(
|
||||
super().__init__(
|
||||
self.wsgi_application,
|
||||
self.service_name,
|
||||
cfg.CONF['service:api'].listen,
|
||||
)
|
||||
|
||||
def start(self):
|
||||
super(Service, self).start()
|
||||
super().start()
|
||||
|
||||
def stop(self, graceful=True):
|
||||
super(Service, self).stop(graceful)
|
||||
super().stop(graceful)
|
||||
|
||||
@property
|
||||
def service_name(self):
|
||||
|
@ -19,7 +19,7 @@ from pecan import expose
|
||||
from designate import exceptions
|
||||
|
||||
|
||||
class ErrorsController(object):
|
||||
class ErrorsController:
|
||||
|
||||
@expose(content_type='text/plain')
|
||||
@expose(content_type='text/dns')
|
||||
|
@ -29,7 +29,7 @@ from designate.api.v2.controllers import tsigkeys
|
||||
from designate.api.v2.controllers import zones
|
||||
|
||||
|
||||
class RootController(object):
|
||||
class RootController:
|
||||
"""
|
||||
This is /v2/ Controller. Pecan will find all controllers via the object
|
||||
properties attached to this.
|
||||
|
@ -31,7 +31,7 @@ from designate.api.v2.controllers.zones.tasks.xfr import XfrController
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TasksController(object):
|
||||
class TasksController:
|
||||
|
||||
transfer_accepts = TRA()
|
||||
transfer_requests = TRC()
|
||||
|
@ -27,7 +27,7 @@ GOOD_STATUSES = [
|
||||
def get_backend(target):
|
||||
cls = base.Backend.get_driver(target.type)
|
||||
|
||||
message = "Backend Driver '%s' loaded. Has status of '%s'" % (
|
||||
message = "Backend Driver '{}' loaded. Has status of '{}'".format(
|
||||
target.type, cls.__backend_status__
|
||||
)
|
||||
|
||||
|
@ -33,7 +33,7 @@ class Backend(DriverPlugin):
|
||||
__backend_status__ = 'untested'
|
||||
|
||||
def __init__(self, target):
|
||||
super(Backend, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
self.target = target
|
||||
self.options = target.options
|
||||
|
@ -28,7 +28,7 @@ from designate import exceptions
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AkamaiClient(object):
|
||||
class AkamaiClient:
|
||||
def __init__(self, client_token=None, client_secret=None,
|
||||
access_token=None, host=None):
|
||||
session = requests.Session()
|
||||
@ -164,7 +164,7 @@ class AkamaiBackend(base.Backend):
|
||||
__backend_status__ = 'untested'
|
||||
|
||||
def __init__(self, target):
|
||||
super(AkamaiBackend, self).__init__(target)
|
||||
super().__init__(target)
|
||||
|
||||
self._host = self.options.get('host', '127.0.0.1')
|
||||
self._port = int(self.options.get('port', 53))
|
||||
|
@ -40,7 +40,7 @@ class Bind9Backend(base.Backend):
|
||||
__backend_status__ = 'integrated'
|
||||
|
||||
def __init__(self, target):
|
||||
super(Bind9Backend, self).__init__(target)
|
||||
super().__init__(target)
|
||||
|
||||
self._host = self.options.get('host', '127.0.0.1')
|
||||
self._port = int(self.options.get('port', 53))
|
||||
@ -83,7 +83,7 @@ class Bind9Backend(base.Backend):
|
||||
for master in self.masters:
|
||||
host = master['host']
|
||||
port = master['port']
|
||||
masters.append('%s port %s' % (host, port))
|
||||
masters.append(f'{host} port {port}')
|
||||
|
||||
# Ensure different MiniDNS instances are targeted for AXFRs
|
||||
random.shuffle(masters)
|
||||
@ -113,7 +113,7 @@ class Bind9Backend(base.Backend):
|
||||
|
||||
rndc_op = [
|
||||
'showzone',
|
||||
'%s %s' % (zone['name'].rstrip('.'), view),
|
||||
'{} {}'.format(zone['name'].rstrip('.'), view),
|
||||
]
|
||||
try:
|
||||
self._execute_rndc(rndc_op)
|
||||
@ -137,10 +137,10 @@ class Bind9Backend(base.Backend):
|
||||
|
||||
rndc_op = [
|
||||
'delzone',
|
||||
'%s %s' % (zone['name'].rstrip('.'), view),
|
||||
'{} {}'.format(zone['name'].rstrip('.'), view),
|
||||
]
|
||||
if (self._clean_zonefile or (zone_params and
|
||||
zone_params.get('hard_delete'))):
|
||||
zone_params.get('hard_delete'))):
|
||||
rndc_op.insert(1, '-clean')
|
||||
|
||||
try:
|
||||
@ -174,7 +174,7 @@ class Bind9Backend(base.Backend):
|
||||
for master in self.masters:
|
||||
host = master['host']
|
||||
port = master['port']
|
||||
masters.append('%s port %s' % (host, port))
|
||||
masters.append(f'{host} port {port}')
|
||||
|
||||
# Ensure different MiniDNS instances are targeted for AXFRs
|
||||
random.shuffle(masters)
|
||||
@ -206,7 +206,7 @@ class Bind9Backend(base.Backend):
|
||||
try:
|
||||
rndc_call = self._rndc_call_base + rndc_op
|
||||
LOG.debug('Executing RNDC call: %r with timeout %s',
|
||||
rndc_call, self._rndc_timeout)
|
||||
rndc_call, self._rndc_timeout)
|
||||
utils.execute(*rndc_call, timeout=self._rndc_timeout)
|
||||
except (utils.processutils.ProcessExecutionError,
|
||||
subprocess.TimeoutExpired) as e:
|
||||
|
@ -34,7 +34,7 @@ class DesignateBackend(base.Backend):
|
||||
__backend_status__ = 'untested'
|
||||
|
||||
def __init__(self, target):
|
||||
super(DesignateBackend, self).__init__(target)
|
||||
super().__init__(target)
|
||||
|
||||
self.auth_url = self.options.get('auth_url')
|
||||
self.username = self.options.get('username')
|
||||
@ -76,7 +76,7 @@ class DesignateBackend(base.Backend):
|
||||
LOG.info('Creating zone %(d_id)s / %(d_name)s',
|
||||
{'d_id': zone['id'], 'd_name': zone['name']})
|
||||
|
||||
masters = ["%s:%s" % (i.host, i.port) for i in self.masters]
|
||||
masters = [f'{i.host}:{i.port}' for i in self.masters]
|
||||
self.client.zones.create(
|
||||
zone.name, 'SECONDARY', masters=masters)
|
||||
|
||||
|
@ -34,6 +34,7 @@ CFG_GROUP_NAME = 'backend:dynect'
|
||||
class DynClientError(exceptions.Backend):
|
||||
"""The base exception class for all HTTP exceptions.
|
||||
"""
|
||||
|
||||
def __init__(self, data=None, job_id=None, msgs=None,
|
||||
http_status=None, url=None, method=None, details=None):
|
||||
self.data = data
|
||||
@ -44,14 +45,16 @@ class DynClientError(exceptions.Backend):
|
||||
self.url = url
|
||||
self.method = method
|
||||
self.details = details
|
||||
formatted_string = "%s (HTTP %s to %s - %s) - %s" % (self.msgs,
|
||||
self.method,
|
||||
self.url,
|
||||
self.http_status,
|
||||
self.details)
|
||||
formatted_string = '{} (HTTP {} to {} - {}) - {}'.format(
|
||||
self.msgs,
|
||||
self.method,
|
||||
self.url,
|
||||
self.http_status,
|
||||
self.details
|
||||
)
|
||||
if job_id:
|
||||
formatted_string += " (Job-ID: %s)" % job_id
|
||||
super(DynClientError, self).__init__(formatted_string)
|
||||
formatted_string += f' (Job-ID: {job_id})'
|
||||
super().__init__(formatted_string)
|
||||
|
||||
@staticmethod
|
||||
def from_response(response, details=None):
|
||||
@ -90,12 +93,13 @@ class DynClientOperationBlocked(exceptions.BadRequest, DynClientError):
|
||||
error_type = 'operation_blocked'
|
||||
|
||||
|
||||
class DynClient(object):
|
||||
class DynClient:
|
||||
"""
|
||||
DynECT service client.
|
||||
|
||||
https://help.dynect.net/rest/
|
||||
"""
|
||||
|
||||
def __init__(self, customer_name, user_name, password,
|
||||
endpoint="https://api.dynect.net:443",
|
||||
api_version='3.5.6', headers=None, verify=True, retries=1,
|
||||
@ -141,7 +145,7 @@ class DynClient(object):
|
||||
]
|
||||
|
||||
for element in kwargs['headers']:
|
||||
header = "-H '%s: %s'" % (element, kwargs['headers'][element])
|
||||
header = "-H '{}: {}'".format(element, kwargs['headers'][element])
|
||||
string_parts.append(header)
|
||||
|
||||
LOG.debug("REQ: %s", " ".join(string_parts))
|
||||
@ -205,7 +209,7 @@ class DynClient(object):
|
||||
start_time = time.monotonic()
|
||||
resp = self.http.request(method, url, **kwargs)
|
||||
if self.timings:
|
||||
self.times.append(("%s %s" % (method, url),
|
||||
self.times.append((f"{method} {url}",
|
||||
start_time, time.monotonic()))
|
||||
self._http_log_resp(resp)
|
||||
|
||||
@ -303,7 +307,7 @@ class DynECTBackend(base.Backend):
|
||||
__backend_status__ = 'untested'
|
||||
|
||||
def __init__(self, target):
|
||||
super(DynECTBackend, self).__init__(target)
|
||||
super().__init__(target)
|
||||
|
||||
self.customer_name = self.options.get('customer_name')
|
||||
self.username = self.options.get('username')
|
||||
|
@ -31,7 +31,7 @@ class InfobloxBackend(base.Backend):
|
||||
__plugin_name__ = 'infoblox'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(InfobloxBackend, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.infoblox = object_manipulator.InfobloxObjectManipulator(
|
||||
connector.Infoblox(self.options))
|
||||
|
@ -27,7 +27,7 @@ CONF = cfg.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class Infoblox(object):
|
||||
class Infoblox:
|
||||
"""Infoblox class
|
||||
|
||||
Defines methods for getting, creating, updating and
|
||||
|
@ -27,14 +27,14 @@ class InfobloxExceptionBase(exceptions.Backend):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
super(InfobloxExceptionBase, self).__init__(self.message % kwargs)
|
||||
super().__init__(self.message % kwargs)
|
||||
self.msg = self.message % kwargs
|
||||
except Exception:
|
||||
if self.use_fatal_exceptions():
|
||||
raise
|
||||
else:
|
||||
# at least get the core message out if something happened
|
||||
super(InfobloxExceptionBase, self).__init__(self.message)
|
||||
super().__init__(self.message)
|
||||
|
||||
def __unicode__(self):
|
||||
return str(self.msg)
|
||||
@ -55,7 +55,7 @@ class InfobloxException(InfobloxExceptionBase):
|
||||
"""Generic Infoblox Exception."""
|
||||
def __init__(self, response, **kwargs):
|
||||
self.response = response
|
||||
super(InfobloxException, self).__init__(**kwargs)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
|
||||
class InfobloxIsMisconfigured(InfobloxExceptionBase):
|
||||
|
@ -20,7 +20,7 @@ from designate.i18n import _
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class InfobloxObjectManipulator(object):
|
||||
class InfobloxObjectManipulator:
|
||||
FIELDS = ['ttl', 'use_ttl']
|
||||
|
||||
def __init__(self, connector):
|
||||
@ -73,8 +73,8 @@ class InfobloxObjectManipulator(object):
|
||||
|
||||
def create_multi_tenant_dns_view(self, net_view, tenant):
|
||||
if not net_view:
|
||||
net_view = "%s.%s" % (self.connector.network_view, tenant)
|
||||
dns_view = "%s.%s" % (self.connector.dns_view, net_view)
|
||||
net_view = f"{self.connector.network_view}.{tenant}"
|
||||
dns_view = f"{self.connector.dns_view}.{net_view}"
|
||||
|
||||
try:
|
||||
self.create_network_view(
|
||||
|
@ -30,7 +30,7 @@ class NS1Backend(base.Backend):
|
||||
__backend_status__ = 'untested'
|
||||
|
||||
def __init__(self, target):
|
||||
super(NS1Backend, self).__init__(target)
|
||||
super().__init__(target)
|
||||
|
||||
self.api_endpoint = "https://" + self.options.get('api_endpoint')
|
||||
self.api_token = self.options.get('api_token')
|
||||
@ -43,7 +43,9 @@ class NS1Backend(base.Backend):
|
||||
}
|
||||
|
||||
def _build_url(self, zone):
|
||||
return "%s/v1/zones/%s" % (self.api_endpoint, zone.name.rstrip('.'))
|
||||
return '{}/v1/zones/{}'.format(
|
||||
self.api_endpoint, zone.name.rstrip('.')
|
||||
)
|
||||
|
||||
def _get_master(self):
|
||||
try:
|
||||
@ -103,16 +105,16 @@ class NS1Backend(base.Backend):
|
||||
# check if the zone was actually created
|
||||
if self._check_zone_exists(zone):
|
||||
LOG.info("%s was created with an error. Deleting zone",
|
||||
zone.name)
|
||||
zone.name)
|
||||
try:
|
||||
self.delete_zone(context, zone)
|
||||
except exceptions.Backend:
|
||||
LOG.error('Could not delete errored zone %s',
|
||||
zone.name)
|
||||
zone.name)
|
||||
raise exceptions.Backend(e)
|
||||
else:
|
||||
LOG.info("Can't create zone %s because it already exists",
|
||||
zone.name)
|
||||
zone.name)
|
||||
|
||||
def delete_zone(self, context, zone, zone_params=None):
|
||||
"""Delete a DNS zone"""
|
||||
|
@ -19,7 +19,6 @@
|
||||
# under the License.
|
||||
|
||||
import random
|
||||
import socket
|
||||
import ssl
|
||||
|
||||
import eventlet
|
||||
@ -40,7 +39,7 @@ class NSD4Backend(base.Backend):
|
||||
NSDCT_VERSION = 'NSDCT1'
|
||||
|
||||
def __init__(self, target):
|
||||
super(NSD4Backend, self).__init__(target)
|
||||
super().__init__(target)
|
||||
|
||||
self.host = self.options.get('host', '127.0.0.1')
|
||||
self.port = int(self.options.get('port', 8952))
|
||||
@ -56,7 +55,7 @@ class NSD4Backend(base.Backend):
|
||||
keyfile=self.keyfile,
|
||||
certfile=self.certfile)
|
||||
stream = sock.makefile()
|
||||
stream.write('%s %s\n' % (self.NSDCT_VERSION, command))
|
||||
stream.write(f'{self.NSDCT_VERSION} {command}\n')
|
||||
stream.flush()
|
||||
result = stream.read()
|
||||
stream.close()
|
||||
@ -68,7 +67,7 @@ class NSD4Backend(base.Backend):
|
||||
LOG.debug('Executing NSD4 control call: %s on %s',
|
||||
command, self.host)
|
||||
result = self._command(command)
|
||||
except (ssl.SSLError, socket.error) as e:
|
||||
except (ssl.SSLError, OSError) as e:
|
||||
LOG.debug('NSD4 control call failure: %s' % e)
|
||||
raise exceptions.Backend(e)
|
||||
if result.rstrip("\n") != 'ok':
|
||||
@ -80,12 +79,12 @@ class NSD4Backend(base.Backend):
|
||||
for master in self.masters:
|
||||
host = master['host']
|
||||
port = master['port']
|
||||
masters.append('%s port %s' % (host, port))
|
||||
masters.append(f'{host} port {port}')
|
||||
|
||||
# Ensure different MiniDNS instances are targeted for AXFRs
|
||||
random.shuffle(masters)
|
||||
|
||||
command = 'addzone %s %s' % (zone['name'], self.pattern)
|
||||
command = 'addzone {} {}'.format(zone['name'], self.pattern)
|
||||
|
||||
try:
|
||||
self._execute_nsd4(command)
|
||||
|
@ -32,7 +32,7 @@ class PDNS4Backend(base.Backend):
|
||||
__backend_status__ = 'integrated'
|
||||
|
||||
def __init__(self, target):
|
||||
super(PDNS4Backend, self).__init__(target)
|
||||
super().__init__(target)
|
||||
|
||||
self.api_endpoint = self.options.get('api_endpoint')
|
||||
self.api_token = self.options.get('api_token')
|
||||
@ -45,7 +45,7 @@ class PDNS4Backend(base.Backend):
|
||||
|
||||
def _build_url(self, zone=''):
|
||||
r_url = urllib.parse.urlparse(self.api_endpoint)
|
||||
return "%s://%s/api/v1/servers/localhost/zones%s%s" % (
|
||||
return "{}://{}/api/v1/servers/localhost/zones{}{}".format(
|
||||
r_url.scheme, r_url.netloc, '/' if zone else '', zone)
|
||||
|
||||
def _check_zone_exists(self, zone):
|
||||
|
@ -34,7 +34,7 @@ def reset():
|
||||
|
||||
@profiler.trace_cls("rpc")
|
||||
@rpc_logging(LOG, 'central')
|
||||
class CentralAPI(object):
|
||||
class CentralAPI:
|
||||
"""
|
||||
Client side of the central RPC API.
|
||||
|
||||
|
@ -64,7 +64,7 @@ class Service(service.RPCService):
|
||||
self._storage = None
|
||||
self._quota = None
|
||||
|
||||
super(Service, self).__init__(
|
||||
super().__init__(
|
||||
self.service_name, cfg.CONF['service:central'].topic,
|
||||
threads=cfg.CONF['service:central'].threads,
|
||||
)
|
||||
@ -103,12 +103,12 @@ class Service(service.RPCService):
|
||||
LOG.warning("Managed Resource Tenant ID is not properly "
|
||||
"configured")
|
||||
|
||||
super(Service, self).start()
|
||||
super().start()
|
||||
self.coordination.start()
|
||||
|
||||
def stop(self, graceful=True):
|
||||
self.coordination.stop()
|
||||
super(Service, self).stop(graceful)
|
||||
super().stop(graceful)
|
||||
|
||||
@property
|
||||
def worker_api(self):
|
||||
@ -233,8 +233,10 @@ class Service(service.RPCService):
|
||||
except Exception:
|
||||
continue
|
||||
else:
|
||||
msg = ('RecordSet belongs in a child zone: %s' %
|
||||
child_zone['name'])
|
||||
msg = (
|
||||
'RecordSet belongs in a child zone: {}'
|
||||
.format(child_zone['name'])
|
||||
)
|
||||
raise exceptions.InvalidRecordSetLocation(msg)
|
||||
|
||||
def _is_valid_recordset_records(self, recordset):
|
||||
@ -1812,7 +1814,7 @@ class Service(service.RPCService):
|
||||
}
|
||||
|
||||
records = self.find_records(elevated_context, criterion)
|
||||
records = dict([(r['managed_extra'], r) for r in records])
|
||||
records = {r['managed_extra']: r for r in records}
|
||||
|
||||
invalid = []
|
||||
data = {}
|
||||
@ -1873,7 +1875,8 @@ class Service(service.RPCService):
|
||||
def _get_floatingip(self, context, region, floatingip_id, fips):
|
||||
if (region, floatingip_id) not in fips:
|
||||
raise exceptions.NotFound(
|
||||
'FloatingIP %s in %s is not associated for project "%s"' % (
|
||||
'FloatingIP {} in {} is not associated for project '
|
||||
'"{}"'.format(
|
||||
floatingip_id, region, context.project_id
|
||||
)
|
||||
)
|
||||
@ -2023,7 +2026,7 @@ class Service(service.RPCService):
|
||||
elevated_context, criterion=criterion
|
||||
)
|
||||
except exceptions.RecordNotFound:
|
||||
msg = 'No such FloatingIP %s:%s' % (region, floatingip_id)
|
||||
msg = f'No such FloatingIP {region}:{floatingip_id}'
|
||||
raise exceptions.NotFound(msg)
|
||||
|
||||
self._delete_or_update_managed_recordset(
|
||||
@ -2309,10 +2312,10 @@ class Service(service.RPCService):
|
||||
return updated_pool
|
||||
|
||||
# Find the current NS hostnames
|
||||
existing_ns = set([n.hostname for n in original_pool_ns_records])
|
||||
existing_ns = {n.hostname for n in original_pool_ns_records}
|
||||
|
||||
# Find the desired NS hostnames
|
||||
request_ns = set([n.hostname for n in pool.ns_records])
|
||||
request_ns = {n.hostname for n in pool.ns_records}
|
||||
|
||||
# Get the NS's to be created and deleted, ignoring the ones that
|
||||
# are in both sets, as those haven't changed.
|
||||
@ -2790,7 +2793,7 @@ class Service(service.RPCService):
|
||||
zone_import.status = 'COMPLETE'
|
||||
zone_import.zone_id = zone.id
|
||||
zone_import.message = (
|
||||
'%(name)s imported' % {'name': zone.name}
|
||||
f'{zone.name} imported'
|
||||
)
|
||||
except exceptions.DuplicateZone:
|
||||
zone_import.status = 'ERROR'
|
||||
@ -2842,7 +2845,7 @@ class Service(service.RPCService):
|
||||
criterion['task_type'] = 'IMPORT'
|
||||
|
||||
return self.storage.find_zone_imports(context, criterion, marker,
|
||||
limit, sort_key, sort_dir)
|
||||
limit, sort_key, sort_dir)
|
||||
|
||||
@rpc.expected_exceptions()
|
||||
def get_zone_import(self, context, zone_import_id):
|
||||
@ -2912,7 +2915,7 @@ class Service(service.RPCService):
|
||||
|
||||
@rpc.expected_exceptions()
|
||||
def find_zone_exports(self, context, criterion=None, marker=None,
|
||||
limit=None, sort_key=None, sort_dir=None):
|
||||
limit=None, sort_key=None, sort_dir=None):
|
||||
|
||||
if policy.enforce_new_defaults():
|
||||
target = {constants.RBAC_PROJECT_ID: context.project_id}
|
||||
@ -2928,7 +2931,7 @@ class Service(service.RPCService):
|
||||
criterion['task_type'] = 'EXPORT'
|
||||
|
||||
return self.storage.find_zone_exports(context, criterion, marker,
|
||||
limit, sort_key, sort_dir)
|
||||
limit, sort_key, sort_dir)
|
||||
|
||||
@rpc.expected_exceptions()
|
||||
def get_zone_export(self, context, zone_export_id):
|
||||
|
@ -53,7 +53,7 @@ def methods_of(obj):
|
||||
|
||||
def get_available_commands():
|
||||
em = ExtensionManager('designate.manage')
|
||||
return dict([(e.name, e.plugin) for e in em.extensions])
|
||||
return {e.name: e.plugin for e in em.extensions}
|
||||
|
||||
|
||||
def add_command_parsers(subparsers):
|
||||
|
@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class ZoneLockLocal(threading.local):
|
||||
def __init__(self):
|
||||
super(ZoneLockLocal, self).__init__()
|
||||
super().__init__()
|
||||
self._held = set()
|
||||
|
||||
def hold(self, name):
|
||||
|
@ -25,7 +25,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
class NotificationThreadLocal(threading.local):
|
||||
def __init__(self):
|
||||
super(NotificationThreadLocal, self).__init__()
|
||||
super().__init__()
|
||||
self.stack = 0
|
||||
self.queue = collections.deque()
|
||||
|
||||
|
@ -20,7 +20,7 @@ import designate.exceptions
|
||||
|
||||
class ExceptionThreadLocal(threading.local):
|
||||
def __init__(self):
|
||||
super(ExceptionThreadLocal, self).__init__()
|
||||
super().__init__()
|
||||
self.depth = 0
|
||||
|
||||
def reset_depth(self):
|
||||
|
@ -30,7 +30,7 @@ if profiler_opts:
|
||||
profiler_opts.set_defaults(CONF)
|
||||
|
||||
|
||||
class WsgiMiddleware(object):
|
||||
class WsgiMiddleware:
|
||||
|
||||
def __init__(self, application, **kwargs):
|
||||
self.application = application
|
||||
|
@ -46,7 +46,7 @@ class DesignateContext(context.RequestContext):
|
||||
edit_managed_records=False, hide_counts=False,
|
||||
client_addr=None, user_auth_plugin=None,
|
||||
hard_delete=False, delete_shares=False, **kwargs):
|
||||
super(DesignateContext, self).__init__(**kwargs)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.user_auth_plugin = user_auth_plugin
|
||||
self.service_catalog = service_catalog
|
||||
@ -68,7 +68,7 @@ class DesignateContext(context.RequestContext):
|
||||
return self.from_dict(d)
|
||||
|
||||
def to_dict(self):
|
||||
d = super(DesignateContext, self).to_dict()
|
||||
d = super().to_dict()
|
||||
|
||||
# Override the user_identity field to account for TSIG. When a TSIG key
|
||||
# is used as authentication e.g. via MiniDNS, it will act as a form
|
||||
@ -232,7 +232,7 @@ class _ContextAuthPlugin(plugin.BaseAuthPlugin):
|
||||
auth data.
|
||||
"""
|
||||
def __init__(self, auth_token, sc):
|
||||
super(_ContextAuthPlugin, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
self.auth_token = auth_token
|
||||
self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc)
|
||||
@ -257,7 +257,7 @@ class _ContextAuthPlugin(plugin.BaseAuthPlugin):
|
||||
urlkw[k] = kwargs[k]
|
||||
|
||||
endpoint = endpoint_override or self.service_catalog.url_for(**urlkw)
|
||||
return super(_ContextAuthPlugin, self).get_endpoint_data(
|
||||
return super().get_endpoint_data(
|
||||
session, endpoint_override=endpoint,
|
||||
discover_versions=discover_versions, **kwargs)
|
||||
|
||||
|
@ -35,7 +35,7 @@ def _retry_if_tooz_error(exception):
|
||||
return isinstance(exception, tooz.coordination.ToozError)
|
||||
|
||||
|
||||
class Coordination(object):
|
||||
class Coordination:
|
||||
def __init__(self, name, tg, grouping_enabled=False):
|
||||
# NOTE(eandersson): Workaround until tooz handles the conversion.
|
||||
if not isinstance(name, bytes):
|
||||
@ -128,7 +128,7 @@ class Coordination(object):
|
||||
)
|
||||
|
||||
|
||||
class Partitioner(object):
|
||||
class Partitioner:
|
||||
def __init__(self, coordinator, group_id, my_id, partitions):
|
||||
self._coordinator = coordinator
|
||||
self._group_id = group_id
|
||||
|
@ -30,7 +30,7 @@ CONF = designate.conf.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DNSMiddleware(object):
|
||||
class DNSMiddleware:
|
||||
"""Base DNS Middleware class with some utility methods"""
|
||||
def __init__(self, application):
|
||||
self.application = application
|
||||
@ -69,7 +69,7 @@ class SerializationMiddleware(DNSMiddleware):
|
||||
"""DNS Middleware to serialize/deserialize DNS Packets"""
|
||||
|
||||
def __init__(self, application, tsig_keyring=None):
|
||||
super(SerializationMiddleware, self).__init__(application)
|
||||
super().__init__(application)
|
||||
self.tsig_keyring = tsig_keyring
|
||||
|
||||
def __call__(self, request):
|
||||
@ -148,7 +148,7 @@ class TsigInfoMiddleware(DNSMiddleware):
|
||||
"""Middleware which looks up the information available for a TsigKey"""
|
||||
|
||||
def __init__(self, application, storage):
|
||||
super(TsigInfoMiddleware, self).__init__(application)
|
||||
super().__init__(application)
|
||||
self.storage = storage
|
||||
|
||||
def process_request(self, request):
|
||||
|
@ -41,7 +41,7 @@ class TsigKeyring(dict):
|
||||
"""Implements the DNSPython KeyRing API, backed by the Designate DB"""
|
||||
|
||||
def __init__(self, storage):
|
||||
super(TsigKeyring, self).__init__()
|
||||
super().__init__()
|
||||
self.storage = storage
|
||||
|
||||
def __getitem__(self, key):
|
||||
@ -63,7 +63,7 @@ class TsigKeyring(dict):
|
||||
return default
|
||||
|
||||
|
||||
class ZoneLock(object):
|
||||
class ZoneLock:
|
||||
"""A Lock across all zones that enforces a rate limit on NOTIFYs"""
|
||||
|
||||
def __init__(self, delay):
|
||||
@ -209,28 +209,28 @@ def do_axfr(zone_name, servers, source=None):
|
||||
port=srv['port'], source=source
|
||||
)
|
||||
raw_zone = dns.zone.from_xfr(xfr, relativize=False)
|
||||
LOG.debug("AXFR Successful for %s", raw_zone.origin.to_text())
|
||||
LOG.debug('AXFR Successful for %s', raw_zone.origin.to_text())
|
||||
return raw_zone
|
||||
except eventlet.Timeout as t:
|
||||
if t == to:
|
||||
LOG.error("AXFR timed out for %(name)s from %(host)s",
|
||||
LOG.error('AXFR timed out for %(name)s from %(host)s',
|
||||
log_info)
|
||||
continue
|
||||
except dns.exception.FormError:
|
||||
LOG.error("Zone %(name)s is not present on %(host)s."
|
||||
"Trying next server.", log_info)
|
||||
except socket.error:
|
||||
LOG.error("Connection error when doing AXFR for %(name)s from "
|
||||
"%(host)s", log_info)
|
||||
LOG.error('Zone %(name)s is not present on %(host)s.'
|
||||
'Trying next server.', log_info)
|
||||
except OSError:
|
||||
LOG.error('Connection error when doing AXFR for %(name)s from '
|
||||
'%(host)s', log_info)
|
||||
except Exception:
|
||||
LOG.exception("Problem doing AXFR %(name)s from %(host)s. "
|
||||
"Trying next server.", log_info)
|
||||
LOG.exception('Problem doing AXFR %(name)s from %(host)s. '
|
||||
'Trying next server.', log_info)
|
||||
finally:
|
||||
to.cancel()
|
||||
|
||||
raise exceptions.XFRFailure(
|
||||
"XFR failed for %(name)s. No servers in %(servers)s was reached." %
|
||||
{"name": zone_name, "servers": servers}
|
||||
'XFR failed for %(name)s. No servers in %(servers)s was reached.' %
|
||||
{'name': zone_name, 'servers': servers}
|
||||
)
|
||||
|
||||
|
||||
|
@ -26,7 +26,7 @@ class DesignateException(Exception):
|
||||
self.errors = kwargs.pop('errors', None)
|
||||
self.object = kwargs.pop('object', None)
|
||||
|
||||
super(DesignateException, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
if args and isinstance(args[0], str):
|
||||
self.error_message = args[0]
|
||||
@ -41,13 +41,13 @@ class RelationNotLoaded(DesignateException):
|
||||
error_type = 'relation_not_loaded'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
self.relation = kwargs.pop('relation', None)
|
||||
|
||||
super(RelationNotLoaded, self).__init__(*args, **kwargs)
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.error_message = ("%(relation)s is not loaded on %(object)s" %
|
||||
{"relation": self.relation, "object": self.object.obj_name()})
|
||||
self.error_message = ('{relation} is not loaded on {object}'.format(
|
||||
relation=self.relation, object=self.object.obj_name()
|
||||
))
|
||||
|
||||
def __str__(self):
|
||||
return self.error_message
|
||||
|
@ -89,7 +89,7 @@ def check_explicit_underscore_import(logical_line, filename):
|
||||
custom_underscore_check.match(logical_line)):
|
||||
UNDERSCORE_IMPORT_FILES.append(filename)
|
||||
elif (translated_log.match(logical_line) or
|
||||
string_translation.match(logical_line)):
|
||||
string_translation.match(logical_line)):
|
||||
yield(0, "D703: Found use of _() without explicit import of _!")
|
||||
|
||||
|
||||
|
@ -40,7 +40,7 @@ class HeartbeatEmitter(plugin.DriverPlugin):
|
||||
__plugin_type__ = 'heartbeat_emitter'
|
||||
|
||||
def __init__(self, service_name, **kwargs):
|
||||
super(HeartbeatEmitter, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
self._status = 'UP'
|
||||
self._stats = {}
|
||||
@ -100,7 +100,7 @@ class RpcEmitter(HeartbeatEmitter):
|
||||
__plugin_name__ = 'rpc'
|
||||
|
||||
def __init__(self, service_name, rpc_api=None, **kwargs):
|
||||
super(RpcEmitter, self).__init__(service_name, **kwargs)
|
||||
super().__init__(service_name, **kwargs)
|
||||
self.rpc_api = rpc_api
|
||||
|
||||
def transmit(self, status):
|
||||
|
@ -34,7 +34,7 @@ def name(name):
|
||||
return _decorator
|
||||
|
||||
|
||||
class Commands(object):
|
||||
class Commands:
|
||||
def __init__(self):
|
||||
self.context = context.DesignateContext.get_admin_context(
|
||||
request_id='designate-manage'
|
||||
|
@ -73,7 +73,7 @@ class DatabaseCommands(base.Commands):
|
||||
stringio_buffer=latest_buffer))
|
||||
latest = latest_buffer.getvalue().replace('\n', ' ')
|
||||
latest_buffer.close()
|
||||
print("Current: %s Latest: %s" % (current, latest))
|
||||
print(f'Current: {current} Latest: {latest}')
|
||||
|
||||
def sync(self, db_url=None, stringio_buffer=sys.stdout):
|
||||
alembic_command.upgrade(
|
||||
|
@ -36,7 +36,7 @@ CONF = cfg.CONF
|
||||
|
||||
class PoolCommands(base.Commands):
|
||||
def __init__(self):
|
||||
super(PoolCommands, self).__init__()
|
||||
super().__init__()
|
||||
self.central_api = None
|
||||
self.dry_run = False
|
||||
self.skip_verify_drivers = False
|
||||
@ -258,7 +258,7 @@ class PoolCommands(base.Commands):
|
||||
|
||||
@staticmethod
|
||||
def _load_config(filename):
|
||||
with open(filename, 'r') as stream:
|
||||
with open(filename) as stream:
|
||||
return yaml.safe_load(stream)
|
||||
|
||||
@staticmethod
|
||||
|
@ -56,7 +56,7 @@ class TLDCommands(base.Commands):
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(TLDCommands, self).__init__()
|
||||
super().__init__()
|
||||
|
||||
def _startup(self):
|
||||
rpc.init(cfg.CONF)
|
||||
|
@ -40,7 +40,7 @@ CONF.import_opt('default_pool_id', 'designate.central',
|
||||
TSIG_RRSIZE = 10 + 64 + 160 + 1
|
||||
|
||||
|
||||
class RequestHandler(object):
|
||||
class RequestHandler:
|
||||
def __init__(self, storage, tg):
|
||||
self._worker_api = None
|
||||
|
||||
@ -72,18 +72,15 @@ class RequestHandler(object):
|
||||
# It is permissible for a server to send an AXFR response when
|
||||
# receiving an IXFR request.
|
||||
if q_rrset.rdtype in (dns.rdatatype.AXFR, dns.rdatatype.IXFR):
|
||||
for response in self._handle_axfr(request):
|
||||
yield response
|
||||
yield from self._handle_axfr(request)
|
||||
return
|
||||
|
||||
else:
|
||||
for response in self._handle_record_query(request):
|
||||