Merge "Add Python 3.7 support"

This commit is contained in:
Zuul 2019-05-16 06:30:28 +00:00 committed by Gerrit Code Review
commit 09020b6bfc
35 changed files with 152 additions and 234 deletions

View File

@ -54,7 +54,8 @@ disable=
too-many-statements,
multiple-statements,
duplicate-except,
keyword-arg-before-vararg
keyword-arg-before-vararg,
useless-object-inheritance
[BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores

View File

@ -57,7 +57,6 @@ class UdpListenerApiServerBase(object):
fail to create.
"""
pass
@abc.abstractmethod
def get_udp_listener_config(self, listener_id):
@ -69,7 +68,6 @@ class UdpListenerApiServerBase(object):
:raises Exception: If the listener is failed to find.
"""
pass
@abc.abstractmethod
def manage_udp_listener(self, listener_id, action):
@ -82,7 +80,6 @@ class UdpListenerApiServerBase(object):
:raises Exception: If the listener is failed to find.
"""
pass
@abc.abstractmethod
def get_all_udp_listeners_status(self):
@ -96,7 +93,6 @@ class UdpListenerApiServerBase(object):
:raises Exception: If the listener pid located directory is not exist
"""
pass
@abc.abstractmethod
def get_udp_listener_status(self, listener_id):
@ -108,7 +104,6 @@ class UdpListenerApiServerBase(object):
:raises Exception: If the listener is failed to find.
"""
pass
@abc.abstractmethod
def delete_udp_listener(self, listener_id):
@ -120,4 +115,3 @@ class UdpListenerApiServerBase(object):
:raises Exception: If unsupport initial system of amphora.
"""
pass

View File

@ -37,14 +37,13 @@ def init_path(listener_id, init_system):
if init_system == consts.INIT_SYSTEMD:
return os.path.join(consts.SYSTEMD_DIR,
'haproxy-{0}.service'.format(listener_id))
elif init_system == consts.INIT_UPSTART:
if init_system == consts.INIT_UPSTART:
return os.path.join(consts.UPSTART_DIR,
'haproxy-{0}.conf'.format(listener_id))
elif init_system == consts.INIT_SYSVINIT:
if init_system == consts.INIT_SYSVINIT:
return os.path.join(consts.SYSVINIT_DIR,
'haproxy-{0}'.format(listener_id))
else:
raise UnknownInitError()
raise UnknownInitError()
def keepalived_lvs_dir():
@ -56,16 +55,15 @@ def keepalived_lvs_init_path(init_system, listener_id):
return os.path.join(consts.SYSTEMD_DIR,
consts.KEEPALIVED_SYSTEMD_PREFIX %
str(listener_id))
elif init_system == consts.INIT_UPSTART:
if init_system == consts.INIT_UPSTART:
return os.path.join(consts.UPSTART_DIR,
consts.KEEPALIVED_UPSTART_PREFIX %
str(listener_id))
elif init_system == consts.INIT_SYSVINIT:
if init_system == consts.INIT_SYSVINIT:
return os.path.join(consts.SYSVINIT_DIR,
consts.KEEPALIVED_SYSVINIT_PREFIX %
str(listener_id))
else:
raise UnknownInitError()
raise UnknownInitError()
def keepalived_backend_check_script_dir():
@ -132,12 +130,11 @@ def keepalived_dir():
def keepalived_init_path(init_system):
if init_system == consts.INIT_SYSTEMD:
return os.path.join(consts.SYSTEMD_DIR, consts.KEEPALIVED_SYSTEMD)
elif init_system == consts.INIT_UPSTART:
if init_system == consts.INIT_UPSTART:
return os.path.join(consts.UPSTART_DIR, consts.KEEPALIVED_UPSTART)
elif init_system == consts.INIT_SYSVINIT:
if init_system == consts.INIT_SYSVINIT:
return os.path.join(consts.SYSVINIT_DIR, consts.KEEPALIVED_SYSVINIT)
else:
raise UnknownInitError()
raise UnknownInitError()
def keepalived_pid_path():
@ -266,6 +263,6 @@ def get_listener_protocol(listener_id):
"""
if os.path.exists(config_path(listener_id)):
return consts.PROTOCOL_TCP
elif os.path.exists(keepalived_lvs_cfg_path(listener_id)):
if os.path.exists(keepalived_lvs_cfg_path(listener_id)):
return consts.PROTOCOL_UDP
return None

View File

@ -24,7 +24,7 @@ LOG = logging.getLogger(__name__)
def round_robin_addr(addrinfo_list):
if len(addrinfo_list) <= 0:
if not addrinfo_list:
return None
addrinfo = addrinfo_list.pop(0)
addrinfo_list.append(addrinfo)

View File

@ -198,7 +198,7 @@ def get_udp_listener_pool_status(listener_id):
ns_name) = get_udp_listener_resource_ipports_nsname(listener_id)
if 'Pool' not in resource_ipport_mapping:
return {}
elif 'Members' not in resource_ipport_mapping:
if 'Members' not in resource_ipport_mapping:
return {'lvs': {
'uuid': resource_ipport_mapping['Pool']['id'],
'status': constants.DOWN,

View File

@ -38,7 +38,6 @@ class AmphoraLoadBalancerDriver(object):
Builds a new configuration, pushes it to the amphora, and reloads
the listener on one amphora.
"""
pass
@abc.abstractmethod
def update(self, listener, vip):
@ -54,7 +53,6 @@ class AmphoraLoadBalancerDriver(object):
At this moment, we just build the basic structure for testing, will
add more function along with the development.
"""
pass
@abc.abstractmethod
def stop(self, listener, vip):
@ -70,7 +68,6 @@ class AmphoraLoadBalancerDriver(object):
At this moment, we just build the basic structure for testing, will
add more function along with the development.
"""
pass
@abc.abstractmethod
def start(self, listener, vip, amphora):
@ -88,7 +85,6 @@ class AmphoraLoadBalancerDriver(object):
At this moment, we just build the basic structure for testing, will
add more function along with the development.
"""
pass
@abc.abstractmethod
def delete(self, listener, vip):
@ -104,7 +100,6 @@ class AmphoraLoadBalancerDriver(object):
At this moment, we just build the basic structure for testing, will
add more function along with the development.
"""
pass
@abc.abstractmethod
def get_info(self, amphora):
@ -121,7 +116,6 @@ class AmphoraLoadBalancerDriver(object):
"packages":{"ha proxy":"1.5"}}
some information might come from querying the amphora
"""
pass
@abc.abstractmethod
def get_diagnostics(self, amphora):
@ -138,7 +132,6 @@ class AmphoraLoadBalancerDriver(object):
are healthy the idea is that those tests are triggered more infrequent
than the health gathering.
"""
pass
@abc.abstractmethod
def finalize_amphora(self, amphora):
@ -154,7 +147,6 @@ class AmphoraLoadBalancerDriver(object):
to accept listeners. Please keep in mind that amphora might be kept in
an offline pool after this call.
"""
pass
def post_vip_plug(self, amphora, load_balancer, amphorae_network_config):
"""Called after network driver has allocated and plugged the VIP
@ -171,7 +163,6 @@ class AmphoraLoadBalancerDriver(object):
This is to do any additional work needed on the amphorae to plug
the vip, such as bring up interfaces.
"""
pass
def post_network_plug(self, amphora, port):
"""Called after amphora added to network
@ -186,7 +177,6 @@ class AmphoraLoadBalancerDriver(object):
access said network. Ex: creating an interface on an amphora for a
neutron network to utilize.
"""
pass
def start_health_check(self, health_mixin):
"""Start health checks.
@ -197,7 +187,6 @@ class AmphoraLoadBalancerDriver(object):
Starts listener process and calls HealthMixin to update
databases information.
"""
pass
def stop_health_check(self):
"""Stop health checks.
@ -205,7 +194,6 @@ class AmphoraLoadBalancerDriver(object):
Stops listener process and calls HealthMixin to update
databases information.
"""
pass
def upload_cert_amp(self, amphora, pem_file):
"""Upload cert info to the amphora.
@ -217,7 +205,6 @@ class AmphoraLoadBalancerDriver(object):
Upload cert file to amphora for Controller Communication.
"""
pass
def update_agent_config(self, amphora, agent_config):
"""Upload and update the amphora agent configuration.
@ -227,7 +214,6 @@ class AmphoraLoadBalancerDriver(object):
:param agent_config: The new amphora agent configuration file.
:type agent_config: string
"""
pass
@six.add_metaclass(abc.ABCMeta)
@ -250,7 +236,6 @@ class HealthMixin(object):
only items whose health has changed need to be submitted
awesome update code
"""
pass
@six.add_metaclass(abc.ABCMeta)
@ -272,7 +257,6 @@ class StatsMixin(object):
elements are named to keep it extsnsible for future versions
awesome update code and code to send to ceilometer
"""
pass
@six.add_metaclass(abc.ABCMeta)
@ -289,7 +273,6 @@ class VRRPDriverMixin(object):
:param loadbalancer: loadbalancer object
"""
pass
@abc.abstractmethod
def stop_vrrp_service(self, loadbalancer):
@ -297,7 +280,6 @@ class VRRPDriverMixin(object):
:param loadbalancer: loadbalancer object
"""
pass
@abc.abstractmethod
def start_vrrp_service(self, loadbalancer):
@ -305,7 +287,6 @@ class VRRPDriverMixin(object):
:param loadbalancer: loadbalancer object
"""
pass
@abc.abstractmethod
def reload_vrrp_service(self, loadbalancer):
@ -313,7 +294,6 @@ class VRRPDriverMixin(object):
:param loadbalancer: loadbalancer object
"""
pass
@abc.abstractmethod
def get_vrrp_interface(self, amphora):
@ -321,4 +301,3 @@ class VRRPDriverMixin(object):
:param amphora: amphora object
"""
pass

View File

@ -74,7 +74,7 @@ class KeepalivedJinjaTemplater(object):
vip = loadbalancer.vip.ip_address
vip_addr = ipaddress.ip_address(
vip if isinstance(vip, six.text_type) else six.u(vip))
vip_ipv6 = True if vip_addr.version == 6 else False
vip_ipv6 = vip_addr.version == 6
# Normalize and validate the VIP subnet CIDR
vip_network_cidr = None

View File

@ -330,7 +330,7 @@ class PaginationHelper(object):
default = PaginationHelper._get_default_column_value(
model_attr.property.columns[0].type)
attr = sa_sql.expression.case(
[(model_attr != None, # noqa: E711
[(model_attr != None, # noqa: E711 # pylint: disable=singleton-comparison
model_attr), ], else_=default)
crit_attrs.append((attr == marker_values[j]))
@ -338,7 +338,7 @@ class PaginationHelper(object):
default = PaginationHelper._get_default_column_value(
model_attr.property.columns[0].type)
attr = sa_sql.expression.case(
[(model_attr != None, # noqa: E711
[(model_attr != None, # noqa: E711 # pylint: disable=singleton-comparison
model_attr), ], else_=default)
this_sort_dir = self.sort_keys[i][1]
if this_sort_dir == constants.DESC:

View File

@ -75,7 +75,7 @@ class URLPathType(wtypes.UserType):
class BaseMeta(wtypes.BaseMeta):
def __new__(mcs, name, bases, dct):
def __new__(cls, name, bases, dct):
def get_tenant_id(self):
tenant_id = getattr(self, '_tenant_id', wtypes.Unset)
# If tenant_id was explicitly set to Unset, return that
@ -101,7 +101,7 @@ class BaseMeta(wtypes.BaseMeta):
get_tenant_id, set_tenant_id)
# This will let us know if tenant_id was explicitly set to Unset
dct['_unset_tenant'] = False
return super(BaseMeta, mcs).__new__(mcs, name, bases, dct)
return super(BaseMeta, cls).__new__(cls, name, bases, dct)
@six.add_metaclass(BaseMeta)

View File

@ -154,8 +154,8 @@ class FlavorProfileController(base.BaseController):
try:
flavorprofile_dict = flavorprofile.to_dict(render_unsets=False)
if flavorprofile_dict:
db_flavor_profile = self.repositories.flavor_profile.update(
lock_session, id, **flavorprofile_dict)
self.repositories.flavor_profile.update(lock_session, id,
**flavorprofile_dict)
lock_session.commit()
except Exception:
with excutils.save_and_reraise_exception():

View File

@ -111,8 +111,8 @@ class FlavorsController(base.BaseController):
try:
flavor_dict = flavor.to_dict(render_unsets=False)
if flavor_dict:
db_flavor = self.repositories.flavor.update(lock_session, id,
**flavor_dict)
self.repositories.flavor.update(lock_session, id,
**flavor_dict)
lock_session.commit()
except Exception:
with excutils.save_and_reraise_exception():

View File

@ -184,7 +184,7 @@ class HealthMonitorController(base.BaseController):
udp_connect_min_interval_health_monitor)
if conf_set < 0:
return
elif request.delay < conf_set:
if request.delay < conf_set:
raise exceptions.ValidationException(detail=_(
"The request delay value %(delay)s should be larger than "
"%(conf_set)s for %(type)s health monitor type.") % {

View File

@ -246,8 +246,8 @@ class ListenersController(base.BaseController):
constraint_list = ['uq_listener_load_balancer_id_protocol_port']
if ['id'] == de.columns:
raise exceptions.IDAlreadyExists()
elif (set(column_list) == set(de.columns) or
set(constraint_list) == set(de.columns)):
if (set(column_list) == set(de.columns) or
set(constraint_list) == set(de.columns)):
raise exceptions.DuplicateListenerEntry(
port=listener_dict.get('protocol_port'))
except odb_exceptions.DBError:

View File

@ -644,11 +644,11 @@ class LoadBalancersController(base.BaseController):
if is_children:
controller = remainder[0]
remainder = remainder[1:]
if controller == 'status' or controller == 'statuses':
if controller in ('status', 'statuses'):
return StatusController(lb_id=id), remainder
elif controller == 'stats':
if controller == 'stats':
return StatisticsController(lb_id=id), remainder
elif controller == 'failover':
if controller == 'failover':
return FailoverController(lb_id=id), remainder
return None

View File

@ -123,8 +123,8 @@ class MemberController(base.BaseController):
constraint_list = ['uq_member_pool_id_address_protocol_port']
if ['id'] == de.columns:
raise exceptions.IDAlreadyExists()
elif (set(column_list) == set(de.columns) or
set(constraint_list) == set(de.columns)):
if (set(column_list) == set(de.columns) or
set(constraint_list) == set(de.columns)):
raise exceptions.DuplicateMemberEntry(
ip_address=member_dict.get('ip_address'),
port=member_dict.get('protocol_port'))

View File

@ -163,11 +163,11 @@ class PoolsController(base.BaseController):
"only accepts: type, persistence_timeout, "
"persistence_granularity.") % (
constants.SESSION_PERSISTENCE_SOURCE_IP))
elif request.session_persistence.cookie_name:
if request.session_persistence.cookie_name:
raise exceptions.ValidationException(detail=_(
"Cookie names are not supported for %s pools.") %
constants.PROTOCOL_UDP)
elif request.session_persistence.type in [
if request.session_persistence.type in [
constants.SESSION_PERSISTENCE_HTTP_COOKIE,
constants.SESSION_PERSISTENCE_APP_COOKIE]:
raise exceptions.ValidationException(detail=_(

View File

@ -25,19 +25,15 @@ class Cert(object):
@abc.abstractmethod
def get_certificate(self):
"""Returns the certificate."""
pass
@abc.abstractmethod
def get_intermediates(self):
"""Returns the intermediate certificates as a list."""
pass
@abc.abstractmethod
def get_private_key(self):
"""Returns the private key for the certificate."""
pass
@abc.abstractmethod
def get_private_key_passphrase(self):
"""Returns the passphrase for the private key."""
pass

View File

@ -42,7 +42,6 @@ class CertGenerator(object):
:return: PEM Encoded Signed certificate
:raises Exception: If certificate signing fails
"""
pass
@abc.abstractmethod
def generate_cert_key_pair(self, cn, validity, bit_length, passphrase):
@ -57,4 +56,3 @@ class CertGenerator(object):
certificate data
:raises Exception: If generation fails
"""
pass

View File

@ -39,10 +39,8 @@ class AmphoraAgent(gunicorn.app.base.BaseApplication):
super(AmphoraAgent, self).__init__()
def load_config(self):
config = dict(
[(key, value) for key, value in self.options.items()
if key in self.cfg.settings and value is not None]
)
config = {key: value for key, value in self.options.items()
if key in self.cfg.settings and value is not None}
for key, value in config.items():
self.cfg.set(key.lower(), value)

View File

@ -96,19 +96,18 @@ class BaseDataModel(object):
'Listener', 'Amphora', 'L7Policy',
'L7Rule']:
return obj.__class__.__name__ + obj.id
elif obj.__class__.__name__ in ['SessionPersistence', 'HealthMonitor']:
if obj.__class__.__name__ in ['SessionPersistence', 'HealthMonitor']:
return obj.__class__.__name__ + obj.pool_id
elif obj.__class__.__name__ in ['ListenerStatistics']:
if obj.__class__.__name__ in ['ListenerStatistics']:
return obj.__class__.__name__ + obj.listener_id + obj.amphora_id
elif obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
if obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
return obj.__class__.__name__ + obj.load_balancer_id
elif obj.__class__.__name__ in ['AmphoraHealth']:
if obj.__class__.__name__ in ['AmphoraHealth']:
return obj.__class__.__name__ + obj.amphora_id
elif obj.__class__.__name__ in ['SNI']:
if obj.__class__.__name__ in ['SNI']:
return (obj.__class__.__name__ +
obj.listener_id + obj.tls_container_id)
else:
raise NotImplementedError
raise NotImplementedError
def _find_in_graph(self, key, _visited_nodes=None):
"""Locates an object with the given unique key in the current
@ -120,26 +119,25 @@ class BaseDataModel(object):
if mykey in _visited_nodes:
# Seen this node already, don't traverse further
return None
elif mykey == key:
if mykey == key:
return self
else:
_visited_nodes.append(mykey)
attr_names = [attr_name for attr_name in dir(self)
if not attr_name.startswith('_')]
for attr_name in attr_names:
attr = getattr(self, attr_name)
if isinstance(attr, BaseDataModel):
result = attr._find_in_graph(
key, _visited_nodes=_visited_nodes)
if result is not None:
return result
elif isinstance(attr, (collections.InstrumentedList, list)):
for item in attr:
if isinstance(item, BaseDataModel):
result = item._find_in_graph(
key, _visited_nodes=_visited_nodes)
if result is not None:
return result
_visited_nodes.append(mykey)
attr_names = [attr_name for attr_name in dir(self)
if not attr_name.startswith('_')]
for attr_name in attr_names:
attr = getattr(self, attr_name)
if isinstance(attr, BaseDataModel):
result = attr._find_in_graph(
key, _visited_nodes=_visited_nodes)
if result is not None:
return result
elif isinstance(attr, (collections.InstrumentedList, list)):
for item in attr:
if isinstance(item, BaseDataModel):
result = item._find_in_graph(
key, _visited_nodes=_visited_nodes)
if result is not None:
return result
# If we are here we didn't find it.
return None

View File

@ -50,7 +50,6 @@ class ComputeBase(object):
:raises ComputeBuildException: if compute failed to build amphora
:returns: UUID of amphora
"""
pass
@abc.abstractmethod
def delete(self, compute_id):
@ -58,7 +57,6 @@ class ComputeBase(object):
:param compute_id: The id of the amphora to delete
"""
pass
@abc.abstractmethod
def status(self, compute_id):
@ -67,7 +65,6 @@ class ComputeBase(object):
:param compute_id: the ID of the desired amphora
:returns: The compute "status" response ("ONLINE" or "OFFLINE")
"""
pass
@abc.abstractmethod
def get_amphora(self, compute_id):
@ -77,7 +74,6 @@ class ComputeBase(object):
:returns: the amphora object
:returns: fault message or None
"""
pass
@abc.abstractmethod
def create_server_group(self, name, policy):
@ -87,7 +83,6 @@ class ComputeBase(object):
:param policy: the policy of the server group
:returns: the server group object
"""
pass
@abc.abstractmethod
def delete_server_group(self, server_group_id):
@ -95,7 +90,6 @@ class ComputeBase(object):
:param server_group_id: the uuid of a server group
"""
pass
@abc.abstractmethod
def attach_network_or_port(self, compute_id, network_id=None,
@ -109,7 +103,6 @@ class ComputeBase(object):
:return: nova interface
:raises: Exception
"""
pass
@abc.abstractmethod
def detach_port(self, compute_id, port_id):
@ -120,7 +113,6 @@ class ComputeBase(object):
:return: None
:raises: Exception
"""
pass
@abc.abstractmethod
def validate_flavor(self, flavor_id):
@ -131,4 +123,3 @@ class ComputeBase(object):
:raises: NotFound
:raises: NotImplementedError
"""
pass

View File

@ -315,71 +315,71 @@ class UpdateHealthDb(update_base.HealthUpdateBase):
# finish processing the status updates from all of the listeners.
potential_offline_pools[pool_id] = db_pool_dict['operating_status']
return lb_status
pool = pools[pool_id]
processed_pools.append(pool_id)
# UP = HAProxy backend has working or no servers
if pool.get('status') == constants.UP:
pool_status = constants.ONLINE
# DOWN = HAProxy backend has no working servers
elif pool.get('status') == constants.DOWN:
pool_status = constants.ERROR
lb_status = constants.ERROR
else:
pool = pools[pool_id]
LOG.warning(('Pool %(pool)s reported status of '
'%(status)s'),
{'pool': pool_id,
'status': pool.get('status')})
processed_pools.append(pool_id)
# Deal with the members that are reporting from
# the Amphora
members = pool['members']
for member_id in db_pool_dict.get('members', {}):
member_status = None
member_db_status = (
db_pool_dict['members'][member_id]['operating_status'])
# UP = HAProxy backend has working or no servers
if pool.get('status') == constants.UP:
pool_status = constants.ONLINE
# DOWN = HAProxy backend has no working servers
elif pool.get('status') == constants.DOWN:
pool_status = constants.ERROR
lb_status = constants.ERROR
if member_id not in members:
if member_db_status != constants.NO_MONITOR:
member_status = constants.OFFLINE
else:
LOG.warning(('Pool %(pool)s reported status of '
'%(status)s'),
{'pool': pool_id,
'status': pool.get('status')})
status = members[member_id]
# Deal with the members that are reporting from
# the Amphora
members = pool['members']
for member_id in db_pool_dict.get('members', {}):
member_status = None
member_db_status = (
db_pool_dict['members'][member_id]['operating_status'])
if member_id not in members:
if member_db_status != constants.NO_MONITOR:
member_status = constants.OFFLINE
# Member status can be "UP" or "UP #/#"
# (transitional)
if status.startswith(constants.UP):
member_status = constants.ONLINE
# Member status can be "DOWN" or "DOWN #/#"
# (transitional)
elif status.startswith(constants.DOWN):
member_status = constants.ERROR
if pool_status == constants.ONLINE:
pool_status = constants.DEGRADED
if lb_status == constants.ONLINE:
lb_status = constants.DEGRADED
elif status == constants.DRAIN:
member_status = constants.DRAINING
elif status == constants.MAINT:
member_status = constants.OFFLINE
elif status == constants.NO_CHECK:
member_status = constants.NO_MONITOR
else:
status = members[member_id]
LOG.warning('Member %(mem)s reported '
'status of %(status)s',
{'mem': member_id,
'status': status})
# Member status can be "UP" or "UP #/#"
# (transitional)
if status.startswith(constants.UP):
member_status = constants.ONLINE
# Member status can be "DOWN" or "DOWN #/#"
# (transitional)
elif status.startswith(constants.DOWN):
member_status = constants.ERROR
if pool_status == constants.ONLINE:
pool_status = constants.DEGRADED
if lb_status == constants.ONLINE:
lb_status = constants.DEGRADED
elif status == constants.DRAIN:
member_status = constants.DRAINING
elif status == constants.MAINT:
member_status = constants.OFFLINE
elif status == constants.NO_CHECK:
member_status = constants.NO_MONITOR
else:
LOG.warning('Member %(mem)s reported '
'status of %(status)s',
{'mem': member_id,
'status': status})
try:
if (member_status is not None and
member_status != member_db_status):
self._update_status(
session, self.member_repo, constants.MEMBER,
member_id, member_status, member_db_status)
except sqlalchemy.orm.exc.NoResultFound:
LOG.error("Member %s is not able to update "
"in DB", member_id)
try:
if (member_status is not None and
member_status != member_db_status):
self._update_status(
session, self.member_repo, constants.MEMBER,
member_id, member_status, member_db_status)
except sqlalchemy.orm.exc.NoResultFound:
LOG.error("Member %s is not able to update "
"in DB", member_id)
try:
if (pool_status is not None and

View File

@ -85,8 +85,6 @@ class ListenersUpdate(BaseAmphoraTask):
for listener in loadbalancer.listeners:
self.task_utils.mark_listener_prov_status_error(listener.id)
return None
class ListenerStop(BaseAmphoraTask):
"""Task to stop the listener on the vip."""
@ -103,8 +101,6 @@ class ListenerStop(BaseAmphoraTask):
self.task_utils.mark_listener_prov_status_error(listener.id)
return None
class ListenerStart(BaseAmphoraTask):
"""Task to start the listener on the vip."""
@ -121,8 +117,6 @@ class ListenerStart(BaseAmphoraTask):
self.task_utils.mark_listener_prov_status_error(listener.id)
return None
class ListenersStart(BaseAmphoraTask):
"""Task to start all listeners on the vip."""
@ -140,8 +134,6 @@ class ListenersStart(BaseAmphoraTask):
for listener in listeners:
self.task_utils.mark_listener_prov_status_error(listener.id)
return None
class ListenerDelete(BaseAmphoraTask):
"""Task to delete the listener on the vip."""

View File

@ -202,7 +202,7 @@ class ComputeActiveWait(BaseComputeTask):
if CONF.haproxy_amphora.build_rate_limit != -1:
self.rate_limit.remove_from_build_req_queue(amphora_id)
return amp
elif amp.status == constants.ERROR:
if amp.status == constants.ERROR:
raise exceptions.ComputeBuildException(fault=fault)
time.sleep(CONF.controller_worker.amp_active_wait_sec)

View File

@ -463,7 +463,6 @@ class DeallocateVIP(BaseNetworkTask):
vip = loadbalancer.vip
vip.load_balancer = loadbalancer
self.network_driver.deallocate_vip(vip)
return
class UpdateVIP(BaseNetworkTask):
@ -612,7 +611,6 @@ class ApplyQos(BaseNetworkTask):
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id,
is_revert=True,
request_qos_id=request_qos_id)
return
class ApplyQosAmphora(BaseNetworkTask):
@ -627,13 +625,12 @@ class ApplyQosAmphora(BaseNetworkTask):
except Exception:
if not is_revert:
raise
else:
LOG.warning('Failed to undo qos policy %(qos_id)s '
'on vrrp port: %(port)s from '
'amphorae: %(amp)s',
{'qos_id': request_qos_id,
'port': amp_data.vrrp_port_id,
'amp': [amp.id for amp in amp_data]})
LOG.warning('Failed to undo qos policy %(qos_id)s '
'on vrrp port: %(port)s from '
'amphorae: %(amp)s',
{'qos_id': request_qos_id,
'port': amp_data.vrrp_port_id,
'amp': [amp.id for amp in amp_data]})
def execute(self, loadbalancer, amp_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip."""

View File

@ -33,21 +33,20 @@ class OctaviaBase(models.ModelBase):
'Listener', 'Amphora', 'L7Policy',
'L7Rule', 'Flavor', 'FlavorProfile']:
return obj.__class__.__name__ + obj.id
elif obj.__class__.__name__ in ['SessionPersistence', 'HealthMonitor']:
if obj.__class__.__name__ in ['SessionPersistence', 'HealthMonitor']:
return obj.__class__.__name__ + obj.pool_id
elif obj.__class__.__name__ in ['ListenerStatistics']:
if obj.__class__.__name__ in ['ListenerStatistics']:
return obj.__class__.__name__ + obj.listener_id + obj.amphora_id
elif obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
if obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
return obj.__class__.__name__ + obj.load_balancer_id
elif obj.__class__.__name__ in ['AmphoraHealth']:
if obj.__class__.__name__ in ['AmphoraHealth']:
return obj.__class__.__name__ + obj.amphora_id
elif obj.__class__.__name__ in ['SNI']:
if obj.__class__.__name__ in ['SNI']:
return (obj.__class__.__name__ +
obj.listener_id + obj.tls_container_id)
elif obj.__class__.__name__ in ['Quotas']:
if obj.__class__.__name__ in ['Quotas']:
return obj.__class__.__name__ + obj.project_id
else:
raise NotImplementedError
raise NotImplementedError
def to_data_model(self, _graph_nodes=None):
"""Converts to a data model graph.

View File

@ -411,7 +411,7 @@ class Repositories(object):
quotas.in_use_load_balancer = lb_count
return False
return True
elif _class == data_models.Listener:
if _class == data_models.Listener:
# Decide which quota to use
if quotas.listener is None:
listener_quota = CONF.quotas.default_listener_quota
@ -432,7 +432,7 @@ class Repositories(object):
quotas.in_use_listener = listener_count
return False
return True
elif _class == data_models.Pool:
if _class == data_models.Pool:
# Decide which quota to use
if quotas.pool is None:
pool_quota = CONF.quotas.default_pool_quota
@ -453,7 +453,7 @@ class Repositories(object):
quotas.in_use_pool = pool_count
return False
return True
elif _class == data_models.HealthMonitor:
if _class == data_models.HealthMonitor:
# Decide which quota to use
if quotas.health_monitor is None:
hm_quota = CONF.quotas.default_health_monitor_quota
@ -474,7 +474,7 @@ class Repositories(object):
quotas.in_use_health_monitor = hm_count
return False
return True
elif _class == data_models.Member:
if _class == data_models.Member:
# Decide which quota to use
if quotas.member is None:
member_quota = CONF.quotas.default_member_quota

View File

@ -38,7 +38,6 @@ class DistributorDriver(object):
instantiate one.
The flow must store the generated distibutor ID in the flow.
"""
pass
@abc.abstractmethod
def get_delete_distributor_subflow(self):
@ -52,7 +51,6 @@ class DistributorDriver(object):
(if applicable for the driver) and cleans up any associated database
records.
"""
pass
@abc.abstractmethod
def get_add_vip_subflow(self):
@ -72,7 +70,6 @@ class DistributorDriver(object):
distributor by perfoming the necessary steps to plug the VIP and
configure the distributor to start receiving requests on this VIP.
"""
pass
@abc.abstractmethod
def get_remove_vip_subflow(self):
@ -88,7 +85,6 @@ class DistributorDriver(object):
distributor by reconfiguring the distributor and unplugging the
associated port.
"""
pass
@abc.abstractmethod
def get_register_amphorae_subflow(self):
@ -106,7 +102,6 @@ class DistributorDriver(object):
distributor. Amphora should be ready to receive requests prior to
this call being made.
"""
pass
@abc.abstractmethod
def get_drain_amphorae_subflow(self):
@ -124,7 +119,6 @@ class DistributorDriver(object):
list. Existing connections will continue to pass traffic to the
amphorae in this list.
"""
pass
@abc.abstractmethod
def get_unregister_amphorae_subflow(self):
@ -141,4 +135,3 @@ class DistributorDriver(object):
from the distributor. Amphorae in this list will immediately stop
receiving traffic.
"""
pass

View File

@ -98,7 +98,6 @@ class AbstractNetworkDriver(object):
:return: octavia.common.data_models.VIP
:raises: AllocateVIPException, PortNotFound, SubnetNotFound
"""
pass
@abc.abstractmethod
def deallocate_vip(self, vip):
@ -109,7 +108,6 @@ class AbstractNetworkDriver(object):
:raises: DeallocateVIPException, VIPInUseException,
VIPConfiigurationNotFound
"""
pass
@abc.abstractmethod
def plug_vip(self, load_balancer, vip):
@ -125,7 +123,6 @@ class AbstractNetworkDriver(object):
receive traffic to load balance.
:raises: PlugVIPException, PortNotFound
"""
pass
@abc.abstractmethod
def unplug_vip(self, load_balancer, vip):
@ -139,7 +136,6 @@ class AbstractNetworkDriver(object):
:return: octavia.common.data_models.VIP instance
:raises: UnplugVIPException, PluggedVIPNotFound
"""
pass
@abc.abstractmethod
def plug_network(self, compute_id, network_id, ip_address=None):
@ -166,7 +162,6 @@ class AbstractNetworkDriver(object):
:raises: UnplugNetworkException, AmphoraNotFound, NetworkNotFound,
NetworkException
"""
pass
@abc.abstractmethod
def get_plugged_networks(self, compute_id):
@ -189,7 +184,6 @@ class AbstractNetworkDriver(object):
:raises: MissingVIPSecurityGroup
:return: None
"""
pass
@abc.abstractmethod
def get_network(self, network_id):
@ -199,7 +193,6 @@ class AbstractNetworkDriver(object):
:return: octavia.network.data_models.Network
:raises: NetworkException, NetworkNotFound
"""
pass
@abc.abstractmethod
def get_subnet(self, subnet_id):
@ -209,7 +202,6 @@ class AbstractNetworkDriver(object):
:return: octavia.network.data_models.Subnet
:raises: NetworkException, SubnetNotFound
"""
pass
@abc.abstractmethod
def get_port(self, port_id):
@ -219,7 +211,6 @@ class AbstractNetworkDriver(object):
:return: octavia.network.data_models.Port
:raises: NetworkException, PortNotFound
"""
pass
@abc.abstractmethod
def get_network_by_name(self, network_name):
@ -229,7 +220,6 @@ class AbstractNetworkDriver(object):
:return: octavia.network.data_models.Network
:raises: NetworkException, NetworkNotFound
"""
pass
@abc.abstractmethod
def get_subnet_by_name(self, subnet_name):
@ -239,7 +229,6 @@ class AbstractNetworkDriver(object):
:return: octavia.network.data_models.Subnet
:raises: NetworkException, SubnetNotFound
"""
pass
@abc.abstractmethod
def get_port_by_name(self, port_name):
@ -249,7 +238,6 @@ class AbstractNetworkDriver(object):
:return: octavia.network.data_models.Port
:raises: NetworkException, PortNotFound
"""
pass
@abc.abstractmethod
def get_port_by_net_id_device_id(self, network_id, device_id):
@ -260,7 +248,6 @@ class AbstractNetworkDriver(object):
:return: octavia.network.data_models.Port
:raises: NetworkException, PortNotFound
"""
pass
@abc.abstractmethod
def failover_preparation(self, amphora):
@ -270,7 +257,6 @@ class AbstractNetworkDriver(object):
:return: None
:raises: PortNotFound
"""
pass
@abc.abstractmethod
def plug_port(self, amphora, port):
@ -281,7 +267,6 @@ class AbstractNetworkDriver(object):
:return: None
:raises: PlugNetworkException, AmphoraNotFound, NetworkNotFound
"""
pass
@abc.abstractmethod
def get_network_configs(self, load_balancer, amphora=None):
@ -303,7 +288,6 @@ class AbstractNetworkDriver(object):
keyed off of the amphora id the config is associated with.
:raises: NotFound, NetworkNotFound, SubnetNotFound, PortNotFound
"""
pass
@abc.abstractmethod
def wait_for_port_detach(self, amphora):
@ -318,7 +302,6 @@ class AbstractNetworkDriver(object):
:raises TimeoutException: Port did not detach in interval.
:raises PortNotFound: Port was not found by neutron.
"""
pass
@abc.abstractmethod
def update_vip_sg(self, load_balancer, vip):
@ -327,7 +310,6 @@ class AbstractNetworkDriver(object):
:param load_balancer: Load Balancer to rpepare the VIP for
:param vip: The VIP to plug
"""
pass
@abc.abstractmethod
def plug_aap_port(self, load_balancer, vip, amphora, subnet):
@ -338,7 +320,6 @@ class AbstractNetworkDriver(object):
:param amphora: The amphora to plug the VIP into
:param subnet: The subnet to plug the aap into
"""
pass
@abc.abstractmethod
def unplug_aap_port(self, vip, amphora, subnet):
@ -348,7 +329,6 @@ class AbstractNetworkDriver(object):
:param amphora: The amphora to plug the VIP into
:param subnet: The subnet to plug the aap into
"""
pass
@abc.abstractmethod
def qos_enabled(self):

View File

@ -492,10 +492,9 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
except nova_client_exceptions.NotFound as e:
if 'Instance' in str(e):
raise base.AmphoraNotFound(str(e))
elif 'Network' in str(e):
if 'Network' in str(e):
raise base.NetworkNotFound(str(e))
else:
raise base.PlugNetworkException(str(e))
raise base.PlugNetworkException(str(e))
except Exception:
message = _('Error plugging amphora (compute_id: {compute_id}) '
'into network {network_id}.').format(
@ -568,10 +567,9 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
except nova_client_exceptions.NotFound as e:
if 'Instance' in str(e):
raise base.AmphoraNotFound(str(e))
elif 'Network' in str(e):
if 'Network' in str(e):
raise base.NetworkNotFound(str(e))
else:
raise base.PlugNetworkException(str(e))
raise base.PlugNetworkException(str(e))
except nova_client_exceptions.Conflict:
LOG.info('Port %(portid)s is already plugged, '
'skipping', {'portid': port.id})

View File

@ -204,11 +204,11 @@ class BaseNeutronDriver(base.AbstractNetworkDriver):
if not resource['%ss' % resource_type]:
# no items found
raise neutron_client_exceptions.NotFound()
elif unique_item:
if unique_item:
return conversion_function(resource['%ss' % resource_type][0])
else:
return list(map(conversion_function,
resource['%ss' % resource_type]))
return list(map(conversion_function,
resource['%ss' % resource_type]))
except neutron_client_exceptions.NotFound:
message = _('{resource_type} not found '
'({resource_type} Filters: {filters}.').format(

View File

@ -18,6 +18,7 @@ classifier =
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
[files]
packages =

View File

@ -8,7 +8,8 @@ flake8-import-order==0.12 # LGPLv3
mock>=2.0.0 # BSD
python-subunit>=1.0.0 # Apache-2.0/BSD
oslotest>=3.2.0 # Apache-2.0
pylint==1.9.2 # GPLv2
pylint==1.9.2;python_version<'3.0' # GPLv2
pylint>=1.9.2;python_version>='3.0' # GPLv2
testrepository>=0.0.18 # Apache-2.0/BSD
testtools>=2.2.0 # MIT
testresources>=2.0.0 # Apache-2.0/BSD

View File

@ -1,6 +1,6 @@
[tox]
minversion = 2.5.0
envlist = docs,py36,py27,functional,pep8,specs
envlist = docs,py37,py36,py27,functional-37,functional-36,functional,pep8,specs
skipsdist = True
[testenv]
@ -53,6 +53,10 @@ setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional
basepython = python3.6
setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional
[testenv:functional-py37]
basepython = python3.7
setenv = OS_TEST_PATH={toxinidir}/octavia/tests/functional
[testenv:debug]
basepython = python3
commands = oslo_debug_helper {posargs}

View File

@ -8,6 +8,7 @@
- openstack-lower-constraints-jobs
- openstack-python-jobs
- openstack-python36-jobs
- openstack-python37-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check: