Fix new pep8/pylint warnings

Fixed: Consider using {} instead of a call to 'dict' (use-dict-literal)
Fixed: Unnecessary parens after '=' keyword (superfluous-parens)
Fixed: Metaclass class method __new__ should have 'mcs' as first
       argument (bad-mcs-classmethod-argument)

Added: Raising too general exception: Exception (broad-exception-raised)
to the ignore list
This new warning should be addressed in a follow-up patch, not in a
quick fix for unblocking the CI.

Change-Id: I1fdb804d7b561bb3a746d14a51b50edcd445dbe6
This commit is contained in:
Gregory Thiemonge 2023-02-03 09:05:07 +01:00
parent b687bd0a4c
commit ff8c3aa7e2
21 changed files with 124 additions and 121 deletions

View File

@ -32,6 +32,8 @@ disable=
unused-argument, unused-argument,
unused-variable, unused-variable,
useless-super-delegation, useless-super-delegation,
# TODO(gthiemonge) Re-enable this checker and fix too general exceptions
broad-exception-raised,
# "C" Coding convention violations # "C" Coding convention violations
invalid-name, invalid-name,
line-too-long, line-too-long,

View File

@ -176,9 +176,9 @@ class AmphoraInfo(object):
if item[0] == consts.IFLA_IFNAME: if item[0] == consts.IFLA_IFNAME:
interface_name = item[1] interface_name = item[1]
if item[0] == 'IFLA_STATS64': if item[0] == 'IFLA_STATS64':
networks[interface_name] = dict( networks[interface_name] = {
network_tx=item[1]['tx_bytes'], 'network_tx': item[1]['tx_bytes'],
network_rx=item[1]['rx_bytes']) 'network_rx': item[1]['rx_bytes']}
return networks return networks
def get_interface(self, ip_addr): def get_interface(self, ip_addr):
@ -186,13 +186,13 @@ class AmphoraInfo(object):
interface = network_utils.get_interface_name( interface = network_utils.get_interface_name(
ip_addr, net_ns=consts.AMPHORA_NAMESPACE) ip_addr, net_ns=consts.AMPHORA_NAMESPACE)
except exceptions.InvalidIPAddress: except exceptions.InvalidIPAddress:
return webob.Response(json=dict(message="Invalid IP address"), return webob.Response(json={'message': "Invalid IP address"},
status=400) status=400)
except exceptions.NotFound: except exceptions.NotFound:
return webob.Response( return webob.Response(
json=dict(message="Error interface not found for IP address"), json={'message': "Error interface not found for IP address"},
status=404) status=404)
return webob.Response(json=dict(message='OK', interface=interface), return webob.Response(json={'message': 'OK', 'interface': interface},
status=200) status=200)
def _get_active_tuned_profiles(self) -> str: def _get_active_tuned_profiles(self) -> str:

View File

@ -124,9 +124,9 @@ class Keepalived(object):
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.debug('Failed to enable octavia-keepalived service: ' LOG.debug('Failed to enable octavia-keepalived service: '
'%(err)s %(output)s', {'err': e, 'output': e.output}) '%(err)s %(output)s', {'err': e, 'output': e.output})
return webob.Response(json=dict( return webob.Response(json={
message="Error enabling octavia-keepalived service", 'message': "Error enabling octavia-keepalived service",
details=e.output), status=500) 'details': e.output}, status=500)
res = webob.Response(json={'message': 'OK'}, status=200) res = webob.Response(json={'message': 'OK'}, status=200)
res.headers['ETag'] = stream.get_md5() res.headers['ETag'] = stream.get_md5()
@ -138,9 +138,9 @@ class Keepalived(object):
if action not in [consts.AMP_ACTION_START, if action not in [consts.AMP_ACTION_START,
consts.AMP_ACTION_STOP, consts.AMP_ACTION_STOP,
consts.AMP_ACTION_RELOAD]: consts.AMP_ACTION_RELOAD]:
return webob.Response(json=dict( return webob.Response(json={
message='Invalid Request', 'message': 'Invalid Request',
details="Unknown action: {0}".format(action)), status=400) 'details': "Unknown action: {0}".format(action)}, status=400)
if action == consts.AMP_ACTION_START: if action == consts.AMP_ACTION_START:
keepalived_pid_path = util.keepalived_pid_path() keepalived_pid_path = util.keepalived_pid_path()
@ -165,11 +165,12 @@ class Keepalived(object):
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.debug('Failed to %s octavia-keepalived service: %s %s', LOG.debug('Failed to %s octavia-keepalived service: %s %s',
action, e, e.output) action, e, e.output)
return webob.Response(json=dict( return webob.Response(json={
message="Failed to {0} octavia-keepalived service".format( 'message': "Failed to {0} octavia-keepalived service".format(
action), details=e.output), status=500) action),
'details': e.output}, status=500)
return webob.Response( return webob.Response(
json=dict(message='OK', json={'message': 'OK',
details='keepalived {action}ed'.format(action=action)), 'details': 'keepalived {action}ed'.format(action=action)},
status=202) status=202)

View File

@ -148,10 +148,10 @@ class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase):
LOG.debug('Failed to enable ' LOG.debug('Failed to enable '
'octavia-keepalivedlvs service: ' 'octavia-keepalivedlvs service: '
'%(err)s', {'err': str(e)}) '%(err)s', {'err': str(e)})
return webob.Response(json=dict( return webob.Response(json={
message="Error enabling " 'message': ("Error enabling "
"octavia-keepalivedlvs service", "octavia-keepalivedlvs service"),
details=e.output), status=500) 'details': e.output}, status=500)
if NEED_CHECK: if NEED_CHECK:
# inject the check script for keepalived process # inject the check script for keepalived process
@ -178,10 +178,10 @@ class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase):
def _check_lvs_listener_exists(self, listener_id): def _check_lvs_listener_exists(self, listener_id):
if not os.path.exists(util.keepalived_lvs_cfg_path(listener_id)): if not os.path.exists(util.keepalived_lvs_cfg_path(listener_id)):
raise exceptions.HTTPException( raise exceptions.HTTPException(
response=webob.Response(json=dict( response=webob.Response(json={
message='UDP Listener Not Found', 'message': 'UDP Listener Not Found',
details="No UDP listener with UUID: {0}".format( 'details': "No UDP listener with UUID: {0}".format(
listener_id)), status=404)) listener_id)}, status=404))
def get_lvs_listener_config(self, listener_id): def get_lvs_listener_config(self, listener_id):
"""Gets the keepalivedlvs config """Gets the keepalivedlvs config
@ -200,9 +200,9 @@ class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase):
if action not in [consts.AMP_ACTION_START, if action not in [consts.AMP_ACTION_START,
consts.AMP_ACTION_STOP, consts.AMP_ACTION_STOP,
consts.AMP_ACTION_RELOAD]: consts.AMP_ACTION_RELOAD]:
return webob.Response(json=dict( return webob.Response(json={
message='Invalid Request', 'message': 'Invalid Request',
details="Unknown action: {0}".format(action)), status=400) 'details': "Unknown action: {0}".format(action)}, status=400)
# When octavia requests a reload of keepalived, force a restart since # When octavia requests a reload of keepalived, force a restart since
# a keepalived reload doesn't restore members in their initial state. # a keepalived reload doesn't restore members in their initial state.
@ -225,16 +225,16 @@ class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase):
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.debug('Failed to %s keepalivedlvs listener %s', LOG.debug('Failed to %s keepalivedlvs listener %s',
listener_id + ' : ' + action, str(e)) listener_id + ' : ' + action, str(e))
return webob.Response(json=dict( return webob.Response(json={
message=("Failed to {0} keepalivedlvs listener {1}" 'message': ("Failed to {0} keepalivedlvs listener {1}"
.format(action, listener_id)), .format(action, listener_id)),
details=e.output), status=500) 'details': e.output}, status=500)
return webob.Response( return webob.Response(
json=dict(message='OK', json={'message': 'OK',
details='keepalivedlvs listener {listener_id} ' 'details': 'keepalivedlvs listener {listener_id} '
'{action}ed'.format(listener_id=listener_id, '{action}ed'.format(listener_id=listener_id,
action=action)), action=action)},
status=202) status=202)
def _check_lvs_listener_status(self, listener_id): def _check_lvs_listener_status(self, listener_id):
@ -286,9 +286,9 @@ class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase):
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT) subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.error("Failed to stop keepalivedlvs service: %s", str(e)) LOG.error("Failed to stop keepalivedlvs service: %s", str(e))
return webob.Response(json=dict( return webob.Response(json={
message="Error stopping keepalivedlvs", 'message': "Error stopping keepalivedlvs",
details=e.output), status=500) 'details': e.output}, status=500)
# Since the lvs check script based on the keepalived pid file for # Since the lvs check script based on the keepalived pid file for
# checking whether it is alived. So here, we had stop the keepalived # checking whether it is alived. So here, we had stop the keepalived
@ -319,11 +319,11 @@ class KeepalivedLvs(lvs_listener_base.LvsListenerApiServerBase):
LOG.error("Failed to disable " LOG.error("Failed to disable "
"octavia-keepalivedlvs-%(list)s service: " "octavia-keepalivedlvs-%(list)s service: "
"%(err)s", {'list': listener_id, 'err': str(e)}) "%(err)s", {'list': listener_id, 'err': str(e)})
return webob.Response(json=dict( return webob.Response(json={
message=( 'message': (
"Error disabling octavia-keepalivedlvs-" "Error disabling octavia-keepalivedlvs-"
"{0} service".format(listener_id)), "{0} service".format(listener_id)),
details=e.output), status=500) 'details': e.output}, status=500)
# delete init script ,config file and log file for that listener # delete init script ,config file and log file for that listener
if os.path.exists(init_path): if os.path.exists(init_path):

View File

@ -137,7 +137,7 @@ class Loadbalancer(object):
# Save the last config that failed validation for debugging # Save the last config that failed validation for debugging
os.rename(name, ''.join([name, '-failed'])) os.rename(name, ''.join([name, '-failed']))
return webob.Response( return webob.Response(
json=dict(message="Invalid request", details=e.output), json={'message': "Invalid request", 'details': e.output},
status=400) status=400)
# file ok - move it # file ok - move it
@ -167,15 +167,15 @@ class Loadbalancer(object):
except util.UnknownInitError: except util.UnknownInitError:
LOG.error("Unknown init system found.") LOG.error("Unknown init system found.")
return webob.Response(json=dict( return webob.Response(json={
message="Unknown init system in amphora", 'message': "Unknown init system in amphora",
details="The amphora image is running an unknown init " 'details': "The amphora image is running an unknown init "
"system. We can't create the init configuration " "system. We can't create the init configuration "
"file for the load balancing process."), status=500) "file for the load balancing process."}, status=500)
if init_system == consts.INIT_SYSTEMD: if init_system == consts.INIT_SYSTEMD:
# mode 00644 # mode 00644
mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
else: else:
# mode 00755 # mode 00755
mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | mode = (stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
@ -215,9 +215,9 @@ class Loadbalancer(object):
LOG.error("Failed to enable haproxy-%(lb_id)s service: " LOG.error("Failed to enable haproxy-%(lb_id)s service: "
"%(err)s %(out)s", {'lb_id': lb_id, 'err': e, "%(err)s %(out)s", {'lb_id': lb_id, 'err': e,
'out': e.output}) 'out': e.output})
return webob.Response(json=dict( return webob.Response(json={
message="Error enabling haproxy-{0} service".format( 'message': "Error enabling haproxy-{0} service".format(
lb_id), details=e.output), status=500) lb_id), 'details': e.output}, status=500)
res = webob.Response(json={'message': 'OK'}, status=202) res = webob.Response(json={'message': 'OK'}, status=202)
res.headers['ETag'] = stream.get_md5() res.headers['ETag'] = stream.get_md5()
@ -229,9 +229,9 @@ class Loadbalancer(object):
if action not in [consts.AMP_ACTION_START, if action not in [consts.AMP_ACTION_START,
consts.AMP_ACTION_STOP, consts.AMP_ACTION_STOP,
consts.AMP_ACTION_RELOAD]: consts.AMP_ACTION_RELOAD]:
return webob.Response(json=dict( return webob.Response(json={
message='Invalid Request', 'message': 'Invalid Request',
details="Unknown action: {0}".format(action)), status=400) 'details': "Unknown action: {0}".format(action)}, status=400)
self._check_lb_exists(lb_id) self._check_lb_exists(lb_id)
is_vrrp = (CONF.controller_worker.loadbalancer_topology == is_vrrp = (CONF.controller_worker.loadbalancer_topology ==
@ -268,9 +268,9 @@ class Loadbalancer(object):
"Failed to %(action)s haproxy-%(lb_id)s service: %(err)s " "Failed to %(action)s haproxy-%(lb_id)s service: %(err)s "
"%(out)s", {'action': action, 'lb_id': lb_id, "%(out)s", {'action': action, 'lb_id': lb_id,
'err': e, 'out': e.output}) 'err': e, 'out': e.output})
return webob.Response(json=dict( return webob.Response(json={
message="Error {0}ing haproxy".format(action), 'message': "Error {0}ing haproxy".format(action),
details=e.output), status=500) 'details': e.output}, status=500)
# If we are not in active/standby we need to send an IP # If we are not in active/standby we need to send an IP
# advertisement (GARP or NA). Keepalived handles this for # advertisement (GARP or NA). Keepalived handles this for
@ -281,17 +281,17 @@ class Loadbalancer(object):
if action in [consts.AMP_ACTION_STOP, if action in [consts.AMP_ACTION_STOP,
consts.AMP_ACTION_RELOAD]: consts.AMP_ACTION_RELOAD]:
return webob.Response(json=dict( return webob.Response(json={
message='OK', 'message': 'OK',
details='Listener {lb_id} {action}ed'.format( 'details': 'Listener {lb_id} {action}ed'.format(
lb_id=lb_id, action=action)), status=202) lb_id=lb_id, action=action)}, status=202)
details = ( details = (
'Configuration file is valid\n' 'Configuration file is valid\n'
'haproxy daemon for {0} started'.format(lb_id) 'haproxy daemon for {0} started'.format(lb_id)
) )
return webob.Response(json=dict(message='OK', details=details), return webob.Response(json={'message': 'OK', 'details': details},
status=202) status=202)
def delete_lb(self, lb_id): def delete_lb(self, lb_id):
@ -309,9 +309,9 @@ class Loadbalancer(object):
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.error("Failed to stop haproxy-%s service: %s %s", LOG.error("Failed to stop haproxy-%s service: %s %s",
lb_id, e, e.output) lb_id, e, e.output)
return webob.Response(json=dict( return webob.Response(json={
message="Error stopping haproxy", 'message': "Error stopping haproxy",
details=e.output), status=500) 'details': e.output}, status=500)
# parse config and delete stats socket # parse config and delete stats socket
try: try:
@ -354,9 +354,9 @@ class Loadbalancer(object):
LOG.error("Failed to disable haproxy-%(lb_id)s service: " LOG.error("Failed to disable haproxy-%(lb_id)s service: "
"%(err)s %(out)s", {'lb_id': lb_id, 'err': e, "%(err)s %(out)s", {'lb_id': lb_id, 'err': e,
'out': e.output}) 'out': e.output})
return webob.Response(json=dict( return webob.Response(json={
message="Error disabling haproxy-{0} service".format( 'message': "Error disabling haproxy-{0} service".format(
lb_id), details=e.output), status=500) lb_id), 'details': e.output}, status=500)
# delete the directory + init script for that listener # delete the directory + init script for that listener
shutil.rmtree(util.haproxy_dir(lb_id)) shutil.rmtree(util.haproxy_dir(lb_id))
@ -408,7 +408,7 @@ class Loadbalancer(object):
crt_file.write(b) crt_file.write(b)
b = stream.read(BUFFER) b = stream.read(BUFFER)
resp = webob.Response(json=dict(message='OK')) resp = webob.Response(json={'message': 'OK'})
resp.headers['ETag'] = stream.get_md5() resp.headers['ETag'] = stream.get_md5()
return resp return resp
@ -418,16 +418,16 @@ class Loadbalancer(object):
cert_path = self._cert_file_path(lb_id, filename) cert_path = self._cert_file_path(lb_id, filename)
path_exists = os.path.exists(cert_path) path_exists = os.path.exists(cert_path)
if not path_exists: if not path_exists:
return webob.Response(json=dict( return webob.Response(json={
message='Certificate Not Found', 'message': 'Certificate Not Found',
details="No certificate with filename: {f}".format( 'details': "No certificate with filename: {f}".format(
f=filename)), status=404) f=filename)}, status=404)
with open(cert_path, 'r', encoding='utf-8') as crt_file: with open(cert_path, 'r', encoding='utf-8') as crt_file:
cert = crt_file.read() cert = crt_file.read()
md5sum = md5(octavia_utils.b(cert), md5sum = md5(octavia_utils.b(cert),
usedforsecurity=False).hexdigest() # nosec usedforsecurity=False).hexdigest() # nosec
resp = webob.Response(json=dict(md5sum=md5sum)) resp = webob.Response(json={'md5sum': md5sum})
resp.headers['ETag'] = md5sum resp.headers['ETag'] = md5sum
return resp return resp
@ -435,7 +435,7 @@ class Loadbalancer(object):
self._check_ssl_filename_format(filename) self._check_ssl_filename_format(filename)
if os.path.exists(self._cert_file_path(lb_id, filename)): if os.path.exists(self._cert_file_path(lb_id, filename)):
os.remove(self._cert_file_path(lb_id, filename)) os.remove(self._cert_file_path(lb_id, filename))
return webob.Response(json=dict(message='OK')) return webob.Response(json={'message': 'OK'})
def _get_listeners_on_lb(self, lb_id): def _get_listeners_on_lb(self, lb_id):
if os.path.exists(util.pid_path(lb_id)): if os.path.exists(util.pid_path(lb_id)):
@ -456,17 +456,17 @@ class Loadbalancer(object):
# check if we know about that lb # check if we know about that lb
if lb_id not in util.get_loadbalancers(): if lb_id not in util.get_loadbalancers():
raise exceptions.HTTPException( raise exceptions.HTTPException(
response=webob.Response(json=dict( response=webob.Response(json={
message='Loadbalancer Not Found', 'message': 'Loadbalancer Not Found',
details="No loadbalancer with UUID: {0}".format( 'details': "No loadbalancer with UUID: {0}".format(
lb_id)), status=404)) lb_id)}, status=404))
def _check_ssl_filename_format(self, filename): def _check_ssl_filename_format(self, filename):
# check if the format is (xxx.)*xxx.pem # check if the format is (xxx.)*xxx.pem
if not re.search(r'(\w.)+pem', filename): if not re.search(r'(\w.)+pem', filename):
raise exceptions.HTTPException( raise exceptions.HTTPException(
response=webob.Response(json=dict( response=webob.Response(json={
message='Filename has wrong format'), status=400)) 'message': 'Filename has wrong format'}, status=400))
def _cert_dir(self, lb_id): def _cert_dir(self, lb_id):
return os.path.join(util.CONF.haproxy_amphora.base_cert_dir, lb_id) return os.path.join(util.CONF.haproxy_amphora.base_cert_dir, lb_id)

View File

@ -95,9 +95,9 @@ class BaseOS(object):
LOG.error('Failed to set up %s due to error: %s %s', interface, LOG.error('Failed to set up %s due to error: %s %s', interface,
e, e.output) e, e.output)
raise exceptions.HTTPException( raise exceptions.HTTPException(
response=webob.Response(json=dict( response=webob.Response(json={
message='Error plugging {0}'.format(name), 'message': 'Error plugging {0}'.format(name),
details=e.output), status=500)) 'details': e.output}, status=500))
class Ubuntu(BaseOS): class Ubuntu(BaseOS):

View File

@ -88,7 +88,7 @@ class Plug(object):
rendered_vips = self.render_vips(vips) rendered_vips = self.render_vips(vips)
except ValueError as e: except ValueError as e:
vip_error_message = "Invalid VIP: {}".format(e) vip_error_message = "Invalid VIP: {}".format(e)
return webob.Response(json=dict(message=vip_error_message), return webob.Response(json={'message': vip_error_message},
status=400) status=400)
try: try:
@ -96,7 +96,7 @@ class Plug(object):
gateway, host_routes) gateway, host_routes)
except ValueError as e: except ValueError as e:
return webob.Response( return webob.Response(
json=dict(message="Invalid VRRP Address: {}".format(e)), json={'message': "Invalid VRRP Address: {}".format(e)},
status=400) status=400)
# Check if the interface is already in the network namespace # Check if the interface is already in the network namespace
@ -104,7 +104,7 @@ class Plug(object):
# network namespace # network namespace
if self._netns_interface_exists(mac_address): if self._netns_interface_exists(mac_address):
return webob.Response( return webob.Response(
json=dict(message="Interface already exists"), status=409) json={'message': "Interface already exists"}, status=409)
# Check that the interface has been fully plugged # Check that the interface has been fully plugged
self._interface_by_mac(mac_address) self._interface_by_mac(mac_address)
@ -136,9 +136,9 @@ class Plug(object):
vips=", ".join(v['ip_address'] for v in rendered_vips) vips=", ".join(v['ip_address'] for v in rendered_vips)
) )
return webob.Response(json=dict( return webob.Response(json={
message="OK", 'message': "OK",
details=vip_message), status=202) 'details': vip_message}, status=202)
def _check_ip_addresses(self, fixed_ips): def _check_ip_addresses(self, fixed_ips):
if fixed_ips: if fixed_ips:
@ -153,8 +153,8 @@ class Plug(object):
try: try:
self._check_ip_addresses(fixed_ips=fixed_ips) self._check_ip_addresses(fixed_ips=fixed_ips)
except socket.error: except socket.error:
return webob.Response(json=dict( return webob.Response(json={
message="Invalid network port"), status=400) 'message': "Invalid network port"}, status=400)
# Check if the interface is already in the network namespace # Check if the interface is already in the network namespace
# Do not attempt to re-plug the network if it is already in the # Do not attempt to re-plug the network if it is already in the
@ -196,12 +196,12 @@ class Plug(object):
fixed_ips=fixed_ips, fixed_ips=fixed_ips,
mtu=mtu) mtu=mtu)
self._osutils.bring_interface_up(existing_interface, 'network') self._osutils.bring_interface_up(existing_interface, 'network')
return webob.Response(json=dict( return webob.Response(json={
message="OK", 'message': "OK",
details="Updated existing interface {interface}".format( 'details': "Updated existing interface {interface}".format(
# TODO(rm_work): Everything in this should probably use # TODO(rm_work): Everything in this should probably use
# HTTP code 200, but continuing to use 202 for consistency. # HTTP code 200, but continuing to use 202 for consistency.
interface=existing_interface)), status=202) interface=existing_interface)}, status=202)
# This is the interface as it was initially plugged into the # This is the interface as it was initially plugged into the
# default network namespace, this will likely always be eth1 # default network namespace, this will likely always be eth1
@ -237,10 +237,10 @@ class Plug(object):
self._osutils.bring_interface_up(netns_interface, 'network') self._osutils.bring_interface_up(netns_interface, 'network')
return webob.Response(json=dict( return webob.Response(json={
message="OK", 'message': "OK",
details="Plugged on interface {interface}".format( 'details': "Plugged on interface {interface}".format(
interface=netns_interface)), status=202) interface=netns_interface)}, status=202)
def _interface_by_mac(self, mac): def _interface_by_mac(self, mac):
try: try:
@ -263,8 +263,8 @@ class Plug(object):
with os.fdopen(os.open(filename, flags), 'w') as rescan_file: with os.fdopen(os.open(filename, flags), 'w') as rescan_file:
rescan_file.write('1') rescan_file.write('1')
raise exceptions.HTTPException( raise exceptions.HTTPException(
response=webob.Response(json=dict( response=webob.Response(json={
details="No suitable network interface found"), status=404)) 'details': "No suitable network interface found"}, status=404))
def _update_plugged_interfaces_file(self, interface, mac_address): def _update_plugged_interfaces_file(self, interface, mac_address):
# write interfaces to plugged_interfaces file and prevent duplicates # write interfaces to plugged_interfaces file and prevent duplicates

View File

@ -248,9 +248,9 @@ class Server(object):
except Exception as e: except Exception as e:
LOG.error("Unable to update amphora-agent configuration: %s", LOG.error("Unable to update amphora-agent configuration: %s",
str(e)) str(e))
return webob.Response(json=dict( return webob.Response(json={
message="Unable to update amphora-agent configuration.", 'message': "Unable to update amphora-agent configuration.",
details=str(e)), status=500) 'details': str(e)}, status=500)
return webob.Response(json={'message': 'OK'}, status=202) return webob.Response(json={'message': 'OK'}, status=202)

View File

@ -246,7 +246,7 @@ def get_os_init_system():
def install_netns_systemd_service(): def install_netns_systemd_service():
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
# mode 00644 # mode 00644
mode = (stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) mode = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
# TODO(bcafarel): implement this for other init systems # TODO(bcafarel): implement this for other init systems
# netns handling depends on a separate unit file # netns handling depends on a separate unit file

View File

@ -129,7 +129,7 @@ class HAProxyQuery(object):
continue continue
if line['pxname'] not in final_results: if line['pxname'] not in final_results:
final_results[line['pxname']] = dict(members={}) final_results[line['pxname']] = {'members': {}}
if line['svname'] == 'BACKEND': if line['svname'] == 'BACKEND':
# BACKEND describes a pool of servers in HAProxy # BACKEND describes a pool of servers in HAProxy

View File

@ -413,7 +413,7 @@ class UpdateHealthDb:
try: try:
self.amphora_health_repo.replace( self.amphora_health_repo.replace(
lock_session, health['id'], lock_session, health['id'],
last_update=(datetime.datetime.utcnow())) last_update=datetime.datetime.utcnow())
lock_session.commit() lock_session.commit()
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():

View File

@ -101,7 +101,7 @@ class URLPathType(wtypes.UserType):
class BaseMeta(wtypes.BaseMeta): class BaseMeta(wtypes.BaseMeta):
def __new__(cls, name, bases, dct): def __new__(mcs, name, bases, dct):
def get_tenant_id(self): def get_tenant_id(self):
tenant_id = getattr(self, '_tenant_id', wtypes.Unset) tenant_id = getattr(self, '_tenant_id', wtypes.Unset)
# If tenant_id was explicitly set to Unset, return that # If tenant_id was explicitly set to Unset, return that
@ -127,7 +127,7 @@ class BaseMeta(wtypes.BaseMeta):
get_tenant_id, set_tenant_id) get_tenant_id, set_tenant_id)
# This will let us know if tenant_id was explicitly set to Unset # This will let us know if tenant_id was explicitly set to Unset
dct['_unset_tenant'] = False dct['_unset_tenant'] = False
return super(BaseMeta, cls).__new__(cls, name, bases, dct) return super(BaseMeta, mcs).__new__(mcs, name, bases, dct)
class BaseType(wtypes.Base, metaclass=BaseMeta): class BaseType(wtypes.Base, metaclass=BaseMeta):

View File

@ -107,7 +107,7 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary, def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary,
additional_vip_dicts=None): additional_vip_dicts=None):
if additional_vip_dicts: if additional_vip_dicts:
msg = ('Amphora v1 driver does not support additional_vips.') msg = 'Amphora v1 driver does not support additional_vips.'
raise exceptions.UnsupportedOptionError( raise exceptions.UnsupportedOptionError(
user_fault_string=msg, user_fault_string=msg,
operator_fault_string=msg) operator_fault_string=msg)

View File

@ -248,7 +248,7 @@ class HealthMonitorController(base.BaseController):
db_hm = self._validate_create_hm(lock_session, hm_dict) db_hm = self._validate_create_hm(lock_session, hm_dict)
# Prepare the data for the driver data model # Prepare the data for the driver data model
provider_healthmon = (driver_utils.db_HM_to_provider_HM(db_hm)) provider_healthmon = driver_utils.db_HM_to_provider_HM(db_hm)
# Dispatch to the driver # Dispatch to the driver
LOG.info("Sending create Health Monitor %s to provider %s", LOG.info("Sending create Health Monitor %s to provider %s",

View File

@ -135,7 +135,7 @@ class ListenersController(base.BaseController):
constants.LISTENER_PROTOCOLS_SUPPORTING_HEADER_INSERTION): constants.LISTENER_PROTOCOLS_SUPPORTING_HEADER_INSERTION):
raise exceptions.InvalidOption( raise exceptions.InvalidOption(
value='insert-headers', value='insert-headers',
option=('a %s protocol listener.' % listener_protocol)) option='a %s protocol listener.' % listener_protocol)
if list(set(insert_header_list) - ( if list(set(insert_header_list) - (
set(constants.SUPPORTED_HTTP_HEADERS + set(constants.SUPPORTED_HTTP_HEADERS +
constants.SUPPORTED_SSL_HEADERS))): constants.SUPPORTED_SSL_HEADERS))):
@ -154,7 +154,7 @@ class ListenersController(base.BaseController):
headers.append(header_name) headers.append(header_name)
raise exceptions.InvalidOption( raise exceptions.InvalidOption(
value=headers, value=headers,
option=('%s protocol listener.' % listener_protocol)) option='%s protocol listener.' % listener_protocol)
def _validate_cidr_compatible_with_vip(self, vip, allowed_cidrs): def _validate_cidr_compatible_with_vip(self, vip, allowed_cidrs):
for cidr in allowed_cidrs: for cidr in allowed_cidrs:

View File

@ -229,7 +229,7 @@ def validate_l7rule_ssl_types(l7rule):
rule_type) rule_type)
# log or raise the key must be splited by '-' # log or raise the key must be splited by '-'
elif not dn_regex.match(req_key): elif not dn_regex.match(req_key):
msg = ('Invalid L7rule distinguished name field.') msg = 'Invalid L7rule distinguished name field.'
if msg: if msg:
raise exceptions.InvalidL7Rule(msg=msg) raise exceptions.InvalidL7Rule(msg=msg)

View File

@ -86,7 +86,7 @@ class LoadBalancerFlows(object):
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
lb_create_flow.add( lb_create_flow.add(
self.get_post_lb_amp_association_flow( self.get_post_lb_amp_association_flow(
post_amp_prefix, topology, mark_active=(not listeners))) post_amp_prefix, topology, mark_active=not listeners))
if listeners: if listeners:
lb_create_flow.add(*self._create_listeners_flow()) lb_create_flow.add(*self._create_listeners_flow())

View File

@ -1355,7 +1355,7 @@ class UpdatePoolInDB(BaseDatabaseTask):
try: try:
self.repos.update_pool_and_sp( self.repos.update_pool_and_sp(
db_apis.get_session(), pool.id, db_apis.get_session(), pool.id,
dict(provisioning_status=constants.ERROR)) {'provisioning_status': constants.ERROR})
except Exception as e: except Exception as e:
LOG.error("Failed to update pool %(pool)s provisioning_status to " LOG.error("Failed to update pool %(pool)s provisioning_status to "
"ERROR due to: %(except)s", {'pool': pool.id, "ERROR due to: %(except)s", {'pool': pool.id,

View File

@ -92,7 +92,7 @@ class LoadBalancerFlows(object):
post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW
lb_create_flow.add( lb_create_flow.add(
self.get_post_lb_amp_association_flow( self.get_post_lb_amp_association_flow(
post_amp_prefix, topology, mark_active=(not listeners))) post_amp_prefix, topology, mark_active=not listeners))
if listeners: if listeners:
lb_create_flow.add(*self._create_listeners_flow()) lb_create_flow.add(*self._create_listeners_flow())

View File

@ -1501,7 +1501,7 @@ class UpdatePoolInDB(BaseDatabaseTask):
try: try:
self.repos.update_pool_and_sp( self.repos.update_pool_and_sp(
db_apis.get_session(), pool_id, db_apis.get_session(), pool_id,
dict(provisioning_status=constants.ERROR)) {'provisioning_status': constants.ERROR})
except Exception as e: except Exception as e:
LOG.error("Failed to update pool %(pool)s provisioning_status to " LOG.error("Failed to update pool %(pool)s provisioning_status to "
"ERROR due to: %(except)s", {'pool': pool_id, "ERROR due to: %(except)s", {'pool': pool_id,

View File

@ -55,7 +55,7 @@ class NoopManager(object):
LOG.debug('Distributor %s delete_distributor', self.__class__.__name__) LOG.debug('Distributor %s delete_distributor', self.__class__.__name__)
delete_distributor_flow = linear_flow.Flow('delete-distributor') delete_distributor_flow = linear_flow.Flow('delete-distributor')
delete_distributor_flow.add(NoopProvidesRequiresTask( delete_distributor_flow.add(NoopProvidesRequiresTask(
'delete-distributor-task', requires=('distributor_id'))) 'delete-distributor-task', requires='distributor_id'))
return delete_distributor_flow return delete_distributor_flow
def get_add_vip_subflow(self): def get_add_vip_subflow(self):