Fix broken Victoria branch

1.Upgrade pylint to 2.4.4, add exclusions to the tests, and
  fix some lint errors in the code

2. Fix user creation with GRANT in MySQL 8.0(Ubuntu Focal)
In Ubuntu Bionic (18.04) mysql 5.7 version used to create
the user implicitly when using using the GRANT.
Ubuntu Focal (20.04) has mysql 8.0 and with mysql 8.0 there
is no implicit user creation with GRANT. We need to
create the user first before using GRANT command.
See also commit I97b0dcbb88c6ef7c22e3c55970211bed792bbd0d

3. Remove fwaas from the zuul.yaml
4. Remove DB migration test which is failing ue to FWaaS migration
with py38
5. Fix cover tests python version in .tox
6. fix requirememnts

Change-Id: I22654a5d5ccaad3185ae3365a90afba1ce870695
This commit is contained in:
asarfaty 2020-09-16 16:37:10 +02:00
parent a3b61c2d27
commit 50afa71853
47 changed files with 210 additions and 230 deletions

View File

@ -94,7 +94,21 @@ disable=
too-many-statements, too-many-statements,
cyclic-import, cyclic-import,
no-name-in-module, no-name-in-module,
bad-super-call bad-super-call,
# new for python3 version of pylint
consider-using-set-comprehension,
unnecessary-pass,
useless-object-inheritance,
raise-missing-from,
super-with-arguments,
inconsistent-return-statements,
unnecessary-comprehension,
consider-using-in,
consider-using-get,
assignment-from-none,
invalid-overridden-method,
raising-format-tuple,
comparison-with-callable
[BASIC] [BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores # Variable names can be 1 to 31 characters long, with lowercase and underscores

View File

@ -14,7 +14,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -26,7 +25,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -38,7 +36,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -50,7 +47,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -62,7 +58,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -78,7 +73,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -90,7 +84,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -102,7 +95,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -114,7 +106,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service
@ -129,7 +120,6 @@
- x/networking-l2gw - x/networking-l2gw
- openstack/networking-sfc - openstack/networking-sfc
- x/vmware-nsxlib - x/vmware-nsxlib
- openstack/neutron-fwaas
- openstack/neutron-dynamic-routing - openstack/neutron-dynamic-routing
- openstack/neutron-vpnaas - openstack/neutron-vpnaas
- x/tap-as-a-service - x/tap-as-a-service

View File

@ -17,7 +17,7 @@ openstackdocstheme==1.18.1
oslo.concurrency==3.26.0 oslo.concurrency==3.26.0
oslo.config==5.2.0 oslo.config==5.2.0
oslo.context==2.19.2 oslo.context==2.19.2
oslo.db==4.37.0 oslo.db==4.44.0
oslo.i18n==3.15.3 oslo.i18n==3.15.3
oslo.log==3.36.0 oslo.log==3.36.0
oslo.messaging==5.29.0 oslo.messaging==5.29.0
@ -33,9 +33,9 @@ pbr==4.0.0
pika-pool==0.1.3 pika-pool==0.1.3
pika==0.10.0 pika==0.10.0
prettytable==0.7.2 prettytable==0.7.2
psycopg2==2.7 psycopg2==2.8
PyMySQL==0.7.6 PyMySQL==0.10.0
pylint==1.7.1 pylint==2.4.4
python-openstackclient==5.3.0 python-openstackclient==5.3.0
reno==2.5.0 reno==2.5.0
requests==2.14.2 requests==2.14.2

View File

@ -17,7 +17,7 @@ python-openstackclient>=5.3.0 # Apache-2.0
oslo.concurrency>=3.26.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0
oslo.context>=2.19.2 # Apache-2.0 oslo.context>=2.19.2 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0
oslo.db>=4.37.0 # Apache-2.0 oslo.db>=4.44.0 # Apache-2.0
oslo.i18n>=3.15.3 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0
oslo.log>=3.36.0 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0
oslo.policy>=1.30.0 # Apache-2.0 oslo.policy>=1.30.0 # Apache-2.0

View File

@ -7,9 +7,9 @@ coverage!=4.4,>=4.0 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD fixtures>=3.0.0 # Apache-2.0/BSD
flake8>=2.6.0 flake8>=2.6.0
flake8-import-order==0.12 # LGPLv3 flake8-import-order==0.12 # LGPLv3
psycopg2>=2.7 # LGPL/ZPL psycopg2>=2.8 # LGPL/ZPL
PyMySQL>=0.7.6 # MIT License PyMySQL>=0.10.0 # MIT License
oslotest>=3.2.0 # Apache-2.0 oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0
testtools>=2.2.0 # MIT testtools>=2.2.0 # MIT
pylint==1.7.6 # GPLv2 pylint>=2.4.4 # GPLv2

View File

@ -23,8 +23,8 @@ sudo -H mysqladmin -u root password $DB_ROOT_PW
sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e "
DELETE FROM mysql.user WHERE User=''; DELETE FROM mysql.user WHERE User='';
FLUSH PRIVILEGES; FLUSH PRIVILEGES;
GRANT ALL PRIVILEGES ON *.* CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW';
TO '$DB_USER'@'%' identified by '$DB_PW' WITH GRANT OPTION;" GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;"
# Now create our database. # Now create our database.
mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e "

View File

@ -128,7 +128,6 @@ whitelist_externals =
commands = bandit -r vmware_nsx -n 5 -ll commands = bandit -r vmware_nsx -n 5 -ll
[testenv:cover] [testenv:cover]
basepython = python3.6
envdir = {toxworkdir}/shared envdir = {toxworkdir}/shared
setenv = {[testenv]setenv} setenv = {[testenv]setenv}
{[testenv:common]setenv} {[testenv:common]setenv}

View File

@ -141,7 +141,7 @@ class EventletApiRequest(request.ApiRequest):
if attempt <= self._retries: if attempt <= self._retries:
if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN): if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN):
continue continue
elif req.status == httplib.SERVICE_UNAVAILABLE: if req.status == httplib.SERVICE_UNAVAILABLE:
timeout = 0.5 timeout = 0.5
continue continue
# else fall through to return the error code # else fall through to return the error code

View File

@ -100,8 +100,7 @@ def fiveZeroThree(response=None):
def fourZeroThree(response=None): def fourZeroThree(response=None):
if 'read-only' in response.body: if 'read-only' in response.body:
raise ReadOnlyMode() raise ReadOnlyMode()
else: raise Forbidden()
raise Forbidden()
def zero(self, response=None): def zero(self, response=None):

View File

@ -155,7 +155,7 @@ class ApiRequest(object, metaclass=abc.ABCMeta):
if response.status not in [httplib.MOVED_PERMANENTLY, if response.status not in [httplib.MOVED_PERMANENTLY,
httplib.TEMPORARY_REDIRECT]: httplib.TEMPORARY_REDIRECT]:
break break
elif redirects >= self._redirects: if redirects >= self._redirects:
LOG.info("[%d] Maximum redirects exceeded, aborting " LOG.info("[%d] Maximum redirects exceeded, aborting "
"request", self._rid()) "request", self._rid())
break break

View File

@ -103,8 +103,7 @@ class ConfiguredAvailabilityZones(object):
opt_value=default_availability_zones, opt_value=default_availability_zones,
reason=_("The default AZ is not defined in the NSX " reason=_("The default AZ is not defined in the NSX "
"plugin")) "plugin"))
else: self._default_az = self.availability_zones[default_az_name]
self._default_az = self.availability_zones[default_az_name]
else: else:
self._default_az = self.availability_zones[self.default_name] self._default_az = self.availability_zones[self.default_name]

View File

@ -179,7 +179,7 @@ class ExtendedSecurityGroupPropertiesMixin(object):
def _is_policy_security_group(self, context, security_group_id): def _is_policy_security_group(self, context, security_group_id):
sg_prop = self._get_security_group_properties(context, sg_prop = self._get_security_group_properties(context,
security_group_id) security_group_id)
return True if sg_prop.policy else False return bool(sg_prop.policy)
def _get_security_group_policy(self, context, security_group_id): def _get_security_group_policy(self, context, security_group_id):
sg_prop = self._get_security_group_properties(context, sg_prop = self._get_security_group_properties(context,

View File

@ -52,8 +52,7 @@ def lsn_get_for_network(context, network_id, raise_on_err=True):
LOG.error(msg, network_id) LOG.error(msg, network_id)
raise p_exc.LsnNotFound(entity='network', raise p_exc.LsnNotFound(entity='network',
entity_id=network_id) entity_id=network_id)
else: LOG.warning(msg, network_id)
LOG.warning(msg, network_id)
def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id): def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id):

View File

@ -64,7 +64,7 @@ def get_nsx_switch_ids(session, cluster, neutron_network_id):
if not nsx_switches: if not nsx_switches:
LOG.warning("Unable to find NSX switches for Neutron network " LOG.warning("Unable to find NSX switches for Neutron network "
"%s", neutron_network_id) "%s", neutron_network_id)
return return []
nsx_switch_ids = [] nsx_switch_ids = []
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
for nsx_switch in nsx_switches: for nsx_switch in nsx_switches:
@ -104,10 +104,9 @@ class LsnManager(object):
network_id) network_id)
raise p_exc.LsnNotFound(entity='network', raise p_exc.LsnNotFound(entity='network',
entity_id=network_id) entity_id=network_id)
else: LOG.warning('Unable to find Logical Service Node for '
LOG.warning('Unable to find Logical Service Node for ' 'the requested network %s.',
'the requested network %s.', network_id)
network_id)
def lsn_create(self, context, network_id): def lsn_create(self, context, network_id):
"""Create a LSN associated to the network.""" """Create a LSN associated to the network."""
@ -147,11 +146,10 @@ class LsnManager(object):
raise p_exc.LsnPortNotFound(lsn_id=lsn_id, raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
entity='subnet', entity='subnet',
entity_id=subnet_id) entity_id=subnet_id)
else: LOG.warning('Unable to find Logical Service Node Port '
LOG.warning('Unable to find Logical Service Node Port ' 'for LSN %(lsn_id)s and subnet '
'for LSN %(lsn_id)s and subnet ' '%(subnet_id)s',
'%(subnet_id)s', {'lsn_id': lsn_id, 'subnet_id': subnet_id})
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
return (lsn_id, None) return (lsn_id, None)
else: else:
return (lsn_id, lsn_port_id) return (lsn_id, lsn_port_id)
@ -174,11 +172,10 @@ class LsnManager(object):
raise p_exc.LsnPortNotFound(lsn_id=lsn_id, raise p_exc.LsnPortNotFound(lsn_id=lsn_id,
entity='MAC', entity='MAC',
entity_id=mac) entity_id=mac)
else: LOG.warning('Unable to find Logical Service Node '
LOG.warning('Unable to find Logical Service Node ' 'Port for LSN %(lsn_id)s and mac address '
'Port for LSN %(lsn_id)s and mac address ' '%(mac)s',
'%(mac)s', {'lsn_id': lsn_id, 'mac': mac})
{'lsn_id': lsn_id, 'mac': mac})
return (lsn_id, None) return (lsn_id, None)
else: else:
return (lsn_id, lsn_port_id) return (lsn_id, lsn_port_id)

View File

@ -50,7 +50,7 @@ class DhcpMetadataBuilder(object):
def router_id_get(self, context, subnet=None): def router_id_get(self, context, subnet=None):
"""Return the router and interface used for the subnet.""" """Return the router and interface used for the subnet."""
if not subnet: if not subnet:
return return None
network_id = subnet['network_id'] network_id = subnet['network_id']
filters = { filters = {
'network_id': [network_id], 'network_id': [network_id],

View File

@ -208,8 +208,7 @@ class DvsManager(VCManagerBase):
# NOTE(garyk): update cache # NOTE(garyk): update cache
return val return val
raise exceptions.NetworkNotFound(net_id=net_id) raise exceptions.NetworkNotFound(net_id=net_id)
else: return self._get_portgroup(net_id)
return self._get_portgroup(net_id)
def _is_vlan_network_by_moref(self, moref): def _is_vlan_network_by_moref(self, moref):
""" """
@ -990,4 +989,3 @@ class ClusterManager(VCManagerBase):
class VCManager(DvsManager, VMManager, ClusterManager): class VCManager(DvsManager, VMManager, ClusterManager):
"""Management class for all vc related tasks.""" """Management class for all vc related tasks."""
pass

View File

@ -71,7 +71,7 @@ def lsn_for_network_get(cluster, network_id):
cluster=cluster)['results'] cluster=cluster)['results']
if not results: if not results:
raise exception.NotFound() raise exception.NotFound()
elif len(results) == 1: if len(results) == 1:
return results[0]['uuid'] return results[0]['uuid']
@ -127,7 +127,7 @@ def _lsn_port_get(cluster, lsn_id, filters):
cluster=cluster)['results'] cluster=cluster)['results']
if not results: if not results:
raise exception.NotFound() raise exception.NotFound()
elif len(results) == 1: if len(results) == 1:
return results[0]['uuid'] return results[0]['uuid']

View File

@ -52,8 +52,8 @@ class NsxPluginBase(db_base_plugin_v2.NeutronDbPluginV2,
address_scope_db.AddressScopeDbMixin): address_scope_db.AddressScopeDbMixin):
"""Common methods for NSX-V, NSX-V3 and NSX-P plugins""" """Common methods for NSX-V, NSX-V3 and NSX-P plugins"""
@property @staticmethod
def plugin_type(self): def plugin_type():
return "Unknown" return "Unknown"
@staticmethod @staticmethod

View File

@ -33,7 +33,7 @@ class NsxV3AvailabilityZone(common_az.ConfiguredAvailabilityZone):
def get_az_opts(self): def get_az_opts(self):
# Should be implemented by children # Should be implemented by children
pass return {}
def init_from_config_section(self, az_name, mandatory_dhcp=True): def init_from_config_section(self, az_name, mandatory_dhcp=True):
az_info = self.get_az_opts() az_info = self.get_az_opts()

View File

@ -197,7 +197,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def _setup_rpc(self): def _setup_rpc(self):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return
@property @property
def support_external_port_tagging(self): def support_external_port_tagging(self):
@ -209,7 +209,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def update_port_nsx_tags(self, context, port_id, tags, is_delete=False): def update_port_nsx_tags(self, context, port_id, tags, is_delete=False):
"""Can be implemented by each plugin to update the backend port tags""" """Can be implemented by each plugin to update the backend port tags"""
pass return
def start_rpc_listeners(self): def start_rpc_listeners(self):
if self.start_rpc_listeners_called: if self.start_rpc_listeners_called:
@ -247,6 +247,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if len(tag_parts) != 2: if len(tag_parts) != 2:
LOG.warning("Skipping tag %s for port %s: wrong format", LOG.warning("Skipping tag %s for port %s: wrong format",
external_tag, port_id) external_tag, port_id)
return {}
else: else:
return {'scope': tag_parts[0][:nsxlib_utils.MAX_RESOURCE_TYPE_LEN], return {'scope': tag_parts[0][:nsxlib_utils.MAX_RESOURCE_TYPE_LEN],
'tag': tag_parts[1][:nsxlib_utils.MAX_TAG_LEN]} 'tag': tag_parts[1][:nsxlib_utils.MAX_TAG_LEN]}
@ -264,6 +265,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if tags_plugin: if tags_plugin:
extra_tags = tags_plugin.get_tags(context, 'ports', port_id) extra_tags = tags_plugin.get_tags(context, 'ports', port_id)
return self._translate_external_tags(extra_tags['tags'], port_id) return self._translate_external_tags(extra_tags['tags'], port_id)
return None
def _get_interface_subnet(self, context, interface_info): def _get_interface_subnet(self, context, interface_info):
is_port, is_sub = self._validate_interface_info(interface_info) is_port, is_sub = self._validate_interface_info(interface_info)
@ -284,6 +286,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if subnet_id: if subnet_id:
return self.get_subnet(context, subnet_id) return self.get_subnet(context, subnet_id)
return None
def _get_interface_network_id(self, context, interface_info, subnet=None): def _get_interface_network_id(self, context, interface_info, subnet=None):
if subnet: if subnet:
@ -397,11 +400,10 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if validators.is_attr_set(address_pairs): if validators.is_attr_set(address_pairs):
if not port_security: if not port_security:
raise addr_exc.AddressPairAndPortSecurityRequired() raise addr_exc.AddressPairAndPortSecurityRequired()
else: self._validate_address_pairs(address_pairs)
self._validate_address_pairs(address_pairs) self._validate_number_of_address_pairs(port_data)
self._validate_number_of_address_pairs(port_data) self._process_create_allowed_address_pairs(context, port_data,
self._process_create_allowed_address_pairs(context, port_data, address_pairs)
address_pairs)
else: else:
port_data[addr_apidef.ADDRESS_PAIRS] = [] port_data[addr_apidef.ADDRESS_PAIRS] = []
@ -471,7 +473,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# has address pairs in request # has address pairs in request
if has_addr_pairs: if has_addr_pairs:
raise addr_exc.AddressPairAndPortSecurityRequired() raise addr_exc.AddressPairAndPortSecurityRequired()
elif not delete_addr_pairs: if not delete_addr_pairs:
# check if address pairs are in db # check if address pairs are in db
updated_port[addr_apidef.ADDRESS_PAIRS] = ( updated_port[addr_apidef.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id)) self.get_allowed_address_pairs(context, id))
@ -640,6 +642,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
subnet = self.get_subnet(context.elevated(), subnet = self.get_subnet(context.elevated(),
fixed_ip_list[i]['subnet_id']) fixed_ip_list[i]['subnet_id'])
return subnet['ip_version'] return subnet['ip_version']
return None
ipver1 = get_fixed_ip_version(0) ipver1 = get_fixed_ip_version(0)
ipver2 = get_fixed_ip_version(1) ipver2 = get_fixed_ip_version(1)
@ -935,23 +938,23 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def _ens_qos_supported(self): def _ens_qos_supported(self):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return False
def _has_native_dhcp_metadata(self): def _has_native_dhcp_metadata(self):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return False
def _get_nsx_net_tz_id(self, nsx_net): def _get_nsx_net_tz_id(self, nsx_net):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return 0
def _get_network_nsx_id(self, context, neutron_id): def _get_network_nsx_id(self, context, neutron_id):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return 0
def _get_tier0_uplink_cidrs(self, tier0_id): def _get_tier0_uplink_cidrs(self, tier0_id):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return []
def _is_ens_tz_net(self, context, net_id): def _is_ens_tz_net(self, context, net_id):
"""Return True if the network is based on an END transport zone""" """Return True if the network is based on an END transport zone"""
@ -967,7 +970,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def _is_overlay_network(self, context, network_id): def _is_overlay_network(self, context, network_id):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return False
def _generate_segment_id(self, context, physical_network, net_data, def _generate_segment_id(self, context, physical_network, net_data,
restricted_vlans): restricted_vlans):
@ -1169,7 +1172,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
"""Validate that the network TZ matches the mdproxy edge cluster """Validate that the network TZ matches the mdproxy edge cluster
Should be implemented by each plugin. Should be implemented by each plugin.
""" """
pass return
def _network_is_nsx_net(self, context, network_id): def _network_is_nsx_net(self, context, network_id):
bindings = nsx_db.get_network_bindings(context.session, network_id) bindings = nsx_db.get_network_bindings(context.session, network_id)
@ -1194,7 +1197,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
db_entry = context.session.query(models_v2.Network).filter_by( db_entry = context.session.query(models_v2.Network).filter_by(
id=network_id).first() id=network_id).first()
if db_entry: if db_entry:
return True if db_entry.vlan_transparent else False return bool(db_entry.vlan_transparent)
def _is_backend_port(self, context, port_data, delete=False): def _is_backend_port(self, context, port_data, delete=False):
# Can be implemented by each plugin # Can be implemented by each plugin
@ -1315,7 +1318,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
as the subnets attached to the Tier1 router as the subnets attached to the Tier1 router
Should be implemented by each plugin. Should be implemented by each plugin.
""" """
pass return
def _get_router_gw_info(self, context, router_id): def _get_router_gw_info(self, context, router_id):
router = self.get_router(context, router_id) router = self.get_router(context, router_id)
@ -1423,16 +1426,14 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# Advertise NAT routes if enable SNAT to support FIP. In the NoNAT # Advertise NAT routes if enable SNAT to support FIP. In the NoNAT
# use case, only NSX connected routes need to be advertised. # use case, only NSX connected routes need to be advertised.
actions['advertise_route_nat_flag'] = ( actions['advertise_route_nat_flag'] = bool(new_enable_snat)
True if new_enable_snat else False) actions['advertise_route_connected_flag'] = bool(not new_enable_snat)
actions['advertise_route_connected_flag'] = (
True if not new_enable_snat else False)
# the purpose of this var is to be able to differ between # the purpose of this var is to be able to differ between
# adding a gateway w/o snat and adding snat (when adding/removing gw # adding a gateway w/o snat and adding snat (when adding/removing gw
# the snat option is on by default). # the snat option is on by default).
new_with_snat = True if (new_enable_snat and newaddr) else False new_with_snat = bool(new_enable_snat and newaddr)
has_gw = True if newaddr else False has_gw = bool(newaddr)
if sr_currently_exists: if sr_currently_exists:
# currently there is a service router on the backend # currently there is a service router on the backend
@ -2023,7 +2024,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def _get_net_dhcp_relay(self, context, net_id): def _get_net_dhcp_relay(self, context, net_id):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return None
def _get_ipv6_subnet(self, context, network): def _get_ipv6_subnet(self, context, network):
for subnet in network.subnets: for subnet in network.subnets:
@ -2281,7 +2282,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def _get_neutron_net_ids_by_nsx_id(self, context, nsx_id): def _get_neutron_net_ids_by_nsx_id(self, context, nsx_id):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return []
def _validate_number_of_subnet_static_routes(self, subnet_input): def _validate_number_of_subnet_static_routes(self, subnet_input):
s = subnet_input['subnet'] s = subnet_input['subnet']
@ -2673,7 +2674,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
count += 1 count += 1
if count > 1: if count > 1:
return False return False
return True if count == 1 else False return bool(count == 1)
def _cidrs_overlap(self, cidr0, cidr1): def _cidrs_overlap(self, cidr0, cidr1):
return cidr0.first <= cidr1.last and cidr1.first <= cidr0.last return cidr0.first <= cidr1.last and cidr1.first <= cidr0.last
@ -2906,7 +2907,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def verify_sr_at_backend(self, context, router_id): def verify_sr_at_backend(self, context, router_id):
"""Should be implemented by each plugin""" """Should be implemented by each plugin"""
pass return
class TagsCallbacks(object): class TagsCallbacks(object):

View File

@ -130,6 +130,8 @@ def get_client_cert_provider(conf_path=cfg.CONF.nsx_v3):
# when new connection is opened, and deleted immediately after. # when new connection is opened, and deleted immediately after.
return get_DbCertProvider(conf_path) return get_DbCertProvider(conf_path)
return None
def get_nsxlib_wrapper(nsx_username=None, nsx_password=None, basic_auth=False, def get_nsxlib_wrapper(nsx_username=None, nsx_password=None, basic_auth=False,
plugin_conf=None, allow_overwrite_header=False, plugin_conf=None, allow_overwrite_header=False,

View File

@ -488,10 +488,9 @@ class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin,
addr_apidef.ADDRESS_PAIRS)): addr_apidef.ADDRESS_PAIRS)):
if not port_security: if not port_security:
raise addr_exc.AddressPairAndPortSecurityRequired() raise addr_exc.AddressPairAndPortSecurityRequired()
else: self._process_create_allowed_address_pairs(
self._process_create_allowed_address_pairs( context, neutron_db,
context, neutron_db, port_data[addr_apidef.ADDRESS_PAIRS])
port_data[addr_apidef.ADDRESS_PAIRS])
else: else:
# remove ATTR_NOT_SPECIFIED # remove ATTR_NOT_SPECIFIED
port_data[addr_apidef.ADDRESS_PAIRS] = [] port_data[addr_apidef.ADDRESS_PAIRS] = []
@ -544,7 +543,7 @@ class NsxDvsV2(addr_pair_db.AllowedAddressPairsMixin,
# has address pairs in request # has address pairs in request
if has_addr_pairs: if has_addr_pairs:
raise addr_exc.AddressPairAndPortSecurityRequired() raise addr_exc.AddressPairAndPortSecurityRequired()
elif not delete_addr_pairs: if not delete_addr_pairs:
# check if address pairs are in db # check if address pairs are in db
ret_port[addr_apidef.ADDRESS_PAIRS] = ( ret_port[addr_apidef.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id)) self.get_allowed_address_pairs(context, id))

View File

@ -69,12 +69,11 @@ class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
if self.is_default(): if self.is_default():
raise cfg.RequiredOptError(config_name, raise cfg.RequiredOptError(config_name,
group=cfg.OptGroup('nsx_p')) group=cfg.OptGroup('nsx_p'))
else: msg = (_("No %(res)s provided for availability "
msg = (_("No %(res)s provided for availability " "zone %(az)s") % {
"zone %(az)s") % { 'res': config_name,
'res': config_name, 'az': self.name})
'az': self.name}) raise nsx_exc.NsxPluginException(err_msg=msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
return None return None
try: try:
@ -101,13 +100,12 @@ class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
if self.is_default(): if self.is_default():
raise cfg.RequiredOptError(config_name, raise cfg.RequiredOptError(config_name,
group=cfg.OptGroup('nsx_p')) group=cfg.OptGroup('nsx_p'))
else: msg = (_("Could not find %(res)s %(id)s for availability "
msg = (_("Could not find %(res)s %(id)s for availability " "zone %(az)s") % {
"zone %(az)s") % { 'res': config_name,
'res': config_name, 'id': name_or_id,
'id': name_or_id, 'az': self.name})
'az': self.name}) raise nsx_exc.NsxPluginException(err_msg=msg)
raise nsx_exc.NsxPluginException(err_msg=msg)
def translate_configured_names_to_uuids(self, nsxpolicy, nsxlib=None, def translate_configured_names_to_uuids(self, nsxpolicy, nsxlib=None,
search_scope=None): search_scope=None):

View File

@ -1105,7 +1105,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
def _is_dhcp_network(self, context, net_id): def _is_dhcp_network(self, context, net_id):
dhcp_port = self._get_net_dhcp_port(context, net_id) dhcp_port = self._get_net_dhcp_port(context, net_id)
return True if dhcp_port else False return bool(dhcp_port)
def _get_segment_subnets(self, context, net_id, net_az=None, def _get_segment_subnets(self, context, net_id, net_az=None,
interface_subnets=None, interface_subnets=None,
@ -1164,7 +1164,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
not validators.is_attr_set(dns_nameservers)): not validators.is_attr_set(dns_nameservers)):
# Use pre-configured dns server # Use pre-configured dns server
dns_nameservers = net_az.nameservers dns_nameservers = net_az.nameservers
is_ipv6 = True if dhcp_subnet.get('ip_version') == 6 else False is_ipv6 = bool(dhcp_subnet.get('ip_version') == 6)
server_ip = "%s/%s" % (dhcp_server_ip, cidr_prefix) server_ip = "%s/%s" % (dhcp_server_ip, cidr_prefix)
kwargs = {'server_address': server_ip, kwargs = {'server_address': server_ip,
'dns_servers': dns_nameservers} 'dns_servers': dns_nameservers}
@ -2457,7 +2457,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
def service_router_has_loadbalancers(self, context, router_id): def service_router_has_loadbalancers(self, context, router_id):
service = lb_utils.get_router_nsx_lb_service(self.nsxpolicy, router_id) service = lb_utils.get_router_nsx_lb_service(self.nsxpolicy, router_id)
return True if service else False return bool(service)
def service_router_has_vpnaas(self, context, router_id): def service_router_has_vpnaas(self, context, router_id):
"""Return True if there is a vpn service attached to this router""" """Return True if there is a vpn service attached to this router"""
@ -2663,7 +2663,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
self._run_under_transaction(_do_add_nat) self._run_under_transaction(_do_add_nat)
# always advertise ipv6 subnets if gateway is set # always advertise ipv6 subnets if gateway is set
advertise_ipv6_subnets = True if info else False advertise_ipv6_subnets = bool(info)
self._update_router_advertisement_rules(router_id, self._update_router_advertisement_rules(router_id,
router_subnets, router_subnets,
advertise_ipv6_subnets) advertise_ipv6_subnets)
@ -2762,8 +2762,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
"deletion, but going on with the deletion anyway: " "deletion, but going on with the deletion anyway: "
"%s", router_id, e) "%s", router_id, e)
ret_val = super(NsxPolicyPlugin, self).delete_router( super(NsxPolicyPlugin, self).delete_router(context, router_id)
context, router_id)
try: try:
self.nsxpolicy.tier1.delete_locale_service(router_id) self.nsxpolicy.tier1.delete_locale_service(router_id)
@ -2783,8 +2782,6 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
"Neutron database: %(e)s") % {'id': router_id, 'e': e}) "Neutron database: %(e)s") % {'id': router_id, 'e': e})
nsx_exc.NsxPluginException(err_msg=msg) nsx_exc.NsxPluginException(err_msg=msg)
return ret_val
def _get_static_route_id(self, route): def _get_static_route_id(self, route):
return "%s-%s" % (route['destination'].replace('/', '_'), return "%s-%s" % (route['destination'].replace('/', '_'),
route['nexthop']) route['nexthop'])

View File

@ -89,7 +89,7 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
def update_router(self, context, router_id, router): def update_router(self, context, router_id, router):
r = router['router'] r = router['router']
self._validate_no_size(r) self._validate_no_size(r)
is_routes_update = True if 'routes' in r else False is_routes_update = bool('routes' in r)
gw_info = self.plugin._extract_external_gw(context, router, gw_info = self.plugin._extract_external_gw(context, router,
is_extract=True) is_extract=True)
super(nsx_v.NsxVPluginV2, self.plugin).update_router( super(nsx_v.NsxVPluginV2, self.plugin).update_router(
@ -236,9 +236,8 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
if router_id in dist_routers: if router_id in dist_routers:
# attach to the same router again # attach to the same router again
raise n_exc.InvalidInput(error_message=err_msg) raise n_exc.InvalidInput(error_message=err_msg)
else: # attach to multiple routers
# attach to multiple routers raise l3_exc.RouterInterfaceAttachmentConflict(reason=err_msg)
raise l3_exc.RouterInterfaceAttachmentConflict(reason=err_msg)
# Validate that the subnet is not a v6 one # Validate that the subnet is not a v6 one
subnet = self.plugin.get_subnet(context.elevated(), subnet_id) subnet = self.plugin.get_subnet(context.elevated(), subnet_id)
if (subnet.get('ip_version') == 6 or if (subnet.get('ip_version') == 6 or

View File

@ -48,7 +48,7 @@ class RouterExclusiveDriver(router_driver.RouterBaseDriver):
def update_router(self, context, router_id, router): def update_router(self, context, router_id, router):
r = router['router'] r = router['router']
is_routes_update = True if 'routes' in r else False is_routes_update = bool('routes' in r)
gw_info = self.plugin._extract_external_gw(context, router, gw_info = self.plugin._extract_external_gw(context, router,
is_extract=True) is_extract=True)
@ -105,7 +105,7 @@ class RouterExclusiveDriver(router_driver.RouterBaseDriver):
# Add DB attributes to the router data structure # Add DB attributes to the router data structure
# before creating it as an exclusive router # before creating it as an exclusive router
router_attr = self._build_router_data_from_db(router_db, router) router_attr = self._build_router_data_from_db(router_db, router)
allow_metadata = True if self.plugin.metadata_proxy_handler else False allow_metadata = bool(self.plugin.metadata_proxy_handler)
self.create_router(context, self.create_router(context,
router_attr, router_attr,
allow_metadata=allow_metadata, allow_metadata=allow_metadata,

View File

@ -82,7 +82,7 @@ class RouterTypeManager(stevedore.named.NamedExtensionManager):
if driver: if driver:
return rt return rt
raise nsx_exc.NoRouterAvailable() raise nsx_exc.NoRouterAvailable()
elif context.is_admin: if context.is_admin:
driver = self.drivers.get(router_type) driver = self.drivers.get(router_type)
if driver: if driver:
return router_type return router_type

View File

@ -1787,21 +1787,19 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
raise n_exc.BadRequest( raise n_exc.BadRequest(
resource='networks', resource='networks',
msg=msg) msg=msg)
else: set_len = len(ip_addresses)
set_len = len(ip_addresses) ip_addresses.add(ap['ip_address'])
ip_addresses.add(ap['ip_address']) if len(ip_addresses) == set_len:
if len(ip_addresses) == set_len: msg = _('IP address %(ip)s is allowed '
msg = _('IP address %(ip)s is allowed ' 'by more than 1 logical port. '
'by more than 1 logical port. ' 'This is not supported by the '
'This is not supported by the ' 'backend. Port security cannot '
'backend. Port security cannot ' 'be enabled for network %(net)s') % {
'be enabled for network %(net)s') % { 'ip': ap['ip_address'], 'net': id}
'ip': ap['ip_address'], 'net': id} LOG.error(msg)
LOG.error(msg) raise n_exc.BadRequest(
raise n_exc.BadRequest( resource='networks', msg=msg)
resource='networks', valid_ports.append(port)
msg=msg)
valid_ports.append(port)
try: try:
sg_policy_id, predefined = ( sg_policy_id, predefined = (
self._prepare_spoofguard_policy( self._prepare_spoofguard_policy(
@ -1848,7 +1846,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
net_morefs = nsx_db.get_nsx_switch_ids(context.session, id) net_morefs = nsx_db.get_nsx_switch_ids(context.session, id)
else: else:
net_morefs = [] net_morefs = []
backend_network = True if len(net_morefs) > 0 else False backend_network = bool(len(net_morefs) > 0)
self._validate_network_qos(context, net_attrs, backend_network) self._validate_network_qos(context, net_attrs, backend_network)
# PortSecurity validation checks # PortSecurity validation checks
@ -2822,7 +2820,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# (even if not compute port to be on the safe side) # (even if not compute port to be on the safe side)
self._delete_dhcp_static_binding( self._delete_dhcp_static_binding(
context, neutron_db_port, context, neutron_db_port,
log_error=(True if compute_port else False)) log_error=bool(compute_port))
def base_delete_subnet(self, context, subnet_id): def base_delete_subnet(self, context, subnet_id):
with locking.LockManager.get_lock('neutron-base-subnet'): with locking.LockManager.get_lock('neutron-base-subnet'):
@ -3350,7 +3348,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
if r.get('router_type') == constants.SHARED: if r.get('router_type') == constants.SHARED:
msg = _("Cannot specify router-size for shared router") msg = _("Cannot specify router-size for shared router")
raise n_exc.BadRequest(resource="router", msg=msg) raise n_exc.BadRequest(resource="router", msg=msg)
elif r.get('distributed') is True: if r.get('distributed') is True:
msg = _("Cannot specify router-size for distributed router") msg = _("Cannot specify router-size for distributed router")
raise n_exc.BadRequest(resource="router", msg=msg) raise n_exc.BadRequest(resource="router", msg=msg)
else: else:
@ -3556,42 +3554,41 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
if r["distributed"]: if r["distributed"]:
err_msg = _('Unable to update distributed mode') err_msg = _('Unable to update distributed mode')
raise n_exc.InvalidInput(error_message=err_msg) raise n_exc.InvalidInput(error_message=err_msg)
else: # should migrate the router because its type changed
# should migrate the router because its type changed new_router_type = router['router']['router_type']
new_router_type = router['router']['router_type'] self._validate_router_size(router)
self._validate_router_size(router) self._validate_router_migration(
self._validate_router_migration( context, router_id, new_router_type, r)
context, router_id, new_router_type, r)
# remove the router from the old pool, and free resources # remove the router from the old pool, and free resources
old_router_driver = \ old_router_driver = \
self._router_managers.get_tenant_router_driver( self._router_managers.get_tenant_router_driver(
context, r['router_type']) context, r['router_type'])
old_router_driver.detach_router(context, router_id, router) old_router_driver.detach_router(context, router_id, router)
# update the router-type # update the router-type
with db_api.CONTEXT_WRITER.using(context): with db_api.CONTEXT_WRITER.using(context):
router_db = self._get_router(context, router_id) router_db = self._get_router(context, router_id)
self._process_nsx_router_create( self._process_nsx_router_create(
context, router_db, router['router']) context, router_db, router['router'])
# update availability zone # update availability zone
router['router']['availability_zone_hints'] = r.get( router['router']['availability_zone_hints'] = r.get(
'availability_zone_hints') 'availability_zone_hints')
# add the router to the new pool # add the router to the new pool
appliance_size = router['router'].get(ROUTER_SIZE) appliance_size = router['router'].get(ROUTER_SIZE)
new_router_driver = \ new_router_driver = \
self._router_managers.get_tenant_router_driver( self._router_managers.get_tenant_router_driver(
context, new_router_type) context, new_router_type)
new_router_driver.attach_router( new_router_driver.attach_router(
context, context,
router_id, router_id,
router, router,
appliance_size=appliance_size) appliance_size=appliance_size)
# continue to update the router with the new driver # continue to update the router with the new driver
# but remove the router-size that was already updated # but remove the router-size that was already updated
router['router'].pop(ROUTER_SIZE, None) router['router'].pop(ROUTER_SIZE, None)
if (validators.is_attr_set(gw_info) and if (validators.is_attr_set(gw_info) and
not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)): not gw_info.get('enable_snat', cfg.CONF.enable_snat_by_default)):
@ -4475,7 +4472,7 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
raise n_exc.InvalidInput(error_message=msg) raise n_exc.InvalidInput(error_message=msg)
new_policy = security_group.get(sg_policy.POLICY) new_policy = security_group.get(sg_policy.POLICY)
sg_with_policy = True if new_policy else False sg_with_policy = bool(new_policy)
else: else:
# called from update_security_group. # called from update_security_group.
# Check if the existing security group has policy or not # Check if the existing security group has policy or not
@ -4528,8 +4525,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
self._validate_security_group(context, sg_data, default_sg) self._validate_security_group(context, sg_data, default_sg)
with db_api.CONTEXT_WRITER.using(context): with db_api.CONTEXT_WRITER.using(context):
is_provider = True if sg_data.get(provider_sg.PROVIDER) else False is_provider = bool(sg_data.get(provider_sg.PROVIDER))
is_policy = True if sg_data.get(sg_policy.POLICY) else False is_policy = bool(sg_data.get(sg_policy.POLICY))
if is_provider or is_policy: if is_provider or is_policy:
new_sg = self.create_security_group_without_rules( new_sg = self.create_security_group_without_rules(
context, security_group, default_sg, is_provider) context, security_group, default_sg, is_provider)

View File

@ -167,10 +167,9 @@ def parse_backup_edge_pool_opt_per_az(az):
if r['edge_size'] in edge_pool_dict.keys(): if r['edge_size'] in edge_pool_dict.keys():
raise n_exc.Invalid(_("Duplicate edge pool configuration for " raise n_exc.Invalid(_("Duplicate edge pool configuration for "
"availability zone %s") % az.name) "availability zone %s") % az.name)
else: edge_pool_dict[r['edge_size']] = {
edge_pool_dict[r['edge_size']] = { 'minimum_pooled_edges': r['minimum_pooled_edges'],
'minimum_pooled_edges': r['minimum_pooled_edges'], 'maximum_pooled_edges': r['maximum_pooled_edges']}
'maximum_pooled_edges': r['maximum_pooled_edges']}
return edge_pool_dicts return edge_pool_dicts
@ -633,16 +632,15 @@ class EdgeManager(object):
raise nsx_exc.NsxPluginException( raise nsx_exc.NsxPluginException(
err_msg=(_("update dhcp interface for net %s " err_msg=(_("update dhcp interface for net %s "
"failed") % network_id)) "failed") % network_id))
else: # Occurs when there are DB inconsistency
# Occurs when there are DB inconsistency sb["is_overlapped"] = True
sb["is_overlapped"] = True LOG.error("unexpected sub intf %(id)s on edge "
LOG.error("unexpected sub intf %(id)s on edge " "%(edge_id)s overlaps with new net "
"%(edge_id)s overlaps with new net " "%(net_id)s. we would update with "
"%(net_id)s. we would update with " "deleting it for DB consistency",
"deleting it for DB consistency", {'id': ls_id,
{'id': ls_id, 'edge_id': edge_id,
'edge_id': edge_id, 'net_id': network_id})
'net_id': network_id})
iface_list = [sub for sub in sub_interfaces iface_list = [sub for sub in sub_interfaces
if not sub.get('is_overlapped', False)] if not sub.get('is_overlapped', False)]

View File

@ -1851,8 +1851,7 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
nsx_lib_exc.SecurityGroupMaximumCapacityReached): nsx_lib_exc.SecurityGroupMaximumCapacityReached):
raise nsx_exc.SecurityGroupMaximumCapacityReached( raise nsx_exc.SecurityGroupMaximumCapacityReached(
err_msg=e.msg) err_msg=e.msg)
else: raise e
raise e
# Update DHCP bindings. # Update DHCP bindings.
if cfg.CONF.nsx_v3.native_dhcp_metadata: if cfg.CONF.nsx_v3.native_dhcp_metadata:
@ -2179,12 +2178,11 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
self._update_router_gw_info(context, router_id, {}) self._update_router_gw_info(context, router_id, {})
nsx_router_id = nsx_db.get_nsx_router_id(context.session, nsx_router_id = nsx_db.get_nsx_router_id(context.session,
router_id) router_id)
ret_val = super(NsxV3Plugin, self).delete_router(context, super(NsxV3Plugin, self).delete_router(context, router_id)
router_id)
# if delete was called due to create error, there might not be a # if delete was called due to create error, there might not be a
# backend id # backend id
if not nsx_router_id: if not nsx_router_id:
return ret_val return
# Remove logical router from the NSX backend # Remove logical router from the NSX backend
# It is safe to do now as db-level checks for resource deletion were # It is safe to do now as db-level checks for resource deletion were
@ -2206,8 +2204,6 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
"failed. The object was however removed from the " "failed. The object was however removed from the "
"Neutron database", router_id) "Neutron database", router_id)
return ret_val
@nsx_plugin_common.api_replay_mode_wrapper @nsx_plugin_common.api_replay_mode_wrapper
def update_router(self, context, router_id, router): def update_router(self, context, router_id, router):
gw_info = self._extract_external_gw(context, router, is_extract=False) gw_info = self._extract_external_gw(context, router, is_extract=False)
@ -3064,8 +3060,7 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
# backend reboot. The exception raised should reflect # backend reboot. The exception raised should reflect
# short-term availability issue (500) rather than 404 # short-term availability issue (500) rather than 404
raise nsx_exc.NsxPluginTemporaryError(err_msg=msg) raise nsx_exc.NsxPluginTemporaryError(err_msg=msg)
else: raise ex
raise ex
return secgroup_db return secgroup_db

View File

@ -308,14 +308,14 @@ class NsxvFlowClassifierDriver(fc_driver.FlowClassifierDriverBase):
msg = _("Failed to find redirect rule %s " msg = _("Failed to find redirect rule %s "
"on backed") % flow_classifier['id'] "on backed") % flow_classifier['id']
raise exc.FlowClassifierException(message=msg) raise exc.FlowClassifierException(message=msg)
else:
# The flowclassifier plugin currently supports updating only # The flowclassifier plugin currently supports updating only
# name or description # name or description
name = redirect_rule.find('name') name = redirect_rule.find('name')
name.text = self._rule_name(flow_classifier) name.text = self._rule_name(flow_classifier)
notes = redirect_rule.find('notes') notes = redirect_rule.find('notes')
notes.text = flow_classifier.get('description') or '' notes.text = flow_classifier.get('description') or ''
self.update_redirect_section_in_backed(section) self.update_redirect_section_in_backed(section)
@log_helpers.log_method_call @log_helpers.log_method_call
def delete_flow_classifier(self, context): def delete_flow_classifier(self, context):

View File

@ -152,8 +152,7 @@ class NsxvIpamSubnet(common.NsxAbstractIpamSubnet, common.NsxIpamBase):
# No more IP addresses available on the pool # No more IP addresses available on the pool
raise ipam_exc.IpAddressGenerationFailure( raise ipam_exc.IpAddressGenerationFailure(
subnet_id=self._subnet_id) subnet_id=self._subnet_id)
else: raise ipam_exc.IPAllocationFailed()
raise ipam_exc.IPAllocationFailed()
return ip_address return ip_address
def backend_deallocate(self, address): def backend_deallocate(self, address):

View File

@ -185,9 +185,8 @@ class Nsxv3IpamSubnet(common.NsxAbstractIpamSubnet):
msg = (_("NSX-V3 IPAM failed to allocate: pool %s was not " msg = (_("NSX-V3 IPAM failed to allocate: pool %s was not "
"found") % self._nsx_pool_id) "found") % self._nsx_pool_id)
raise ipam_exc.IpamValueInvalid(message=msg) raise ipam_exc.IpamValueInvalid(message=msg)
else: # another backend error
# another backend error raise ipam_exc.IPAllocationFailed()
raise ipam_exc.IPAllocationFailed()
except Exception as e: except Exception as e:
LOG.error("NSX IPAM failed to allocate ip %(ip)s of subnet " LOG.error("NSX IPAM failed to allocate ip %(ip)s of subnet "
"%(id)s: %(e)s", "%(id)s: %(e)s",

View File

@ -115,7 +115,6 @@ class NsxvL2GatewayDriver(l2gateway_db.L2GatewayMixin):
devices[0]['device_name'] = edge_id devices[0]['device_name'] = edge_id
l2_gateway[self.gateway_resource]['devices'] = devices l2_gateway[self.gateway_resource]['devices'] = devices
return
def update_l2_gateway_precommit(self, context, l2_gateway): def update_l2_gateway_precommit(self, context, l2_gateway):
pass pass
@ -176,7 +175,6 @@ class NsxvL2GatewayDriver(l2gateway_db.L2GatewayMixin):
"rolling back changes on neutron.") "rolling back changes on neutron.")
raise l2gw_exc.L2GatewayServiceDriverError( raise l2gw_exc.L2GatewayServiceDriverError(
method='create_l2_gateway_connection_postcommit') method='create_l2_gateway_connection_postcommit')
return
def create_l2_gateway_connection(self, context, l2_gateway_connection): def create_l2_gateway_connection(self, context, l2_gateway_connection):
"""Create a L2 gateway connection.""" """Create a L2 gateway connection."""
@ -186,7 +184,6 @@ class NsxvL2GatewayDriver(l2gateway_db.L2GatewayMixin):
gw_db = self._get_l2_gateway(context, l2gw_id) gw_db = self._get_l2_gateway(context, l2gw_id)
if gw_db.network_connections: if gw_db.network_connections:
raise nsx_exc.NsxL2GWInUse(gateway_id=l2gw_id) raise nsx_exc.NsxL2GWInUse(gateway_id=l2gw_id)
return
def delete_l2_gateway_connection_precommit(self, context, def delete_l2_gateway_connection_precommit(self, context,
l2_gateway_connection): l2_gateway_connection):

View File

@ -345,8 +345,7 @@ def remove_service_tag_callback(lb_id):
msg = _("This LB service should be deleted") msg = _("This LB service should be deleted")
raise n_exc.BadRequest(resource='lbaas-loadbalancer-delete', raise n_exc.BadRequest(resource='lbaas-loadbalancer-delete',
msg=msg) msg=msg)
else: body['tags'].remove(match_tag)
body['tags'].remove(match_tag)
return _update_calback return _update_calback

View File

@ -242,7 +242,7 @@ class NSXOctaviaDriver(driver_base.ProviderDriver):
def obj_to_dict(self, obj, is_update=False, project_id=None): def obj_to_dict(self, obj, is_update=False, project_id=None):
obj_type = obj.__class__.__name__ obj_type = obj.__class__.__name__
# create a dictionary out of the object # create a dictionary out of the object
render_unsets = False if is_update else True render_unsets = bool(not is_update)
obj_dict = obj.to_dict(recurse=True, render_unsets=render_unsets) obj_dict = obj.to_dict(recurse=True, render_unsets=render_unsets)
# Update the dictionary to match what the nsx driver expects # Update the dictionary to match what the nsx driver expects

View File

@ -143,7 +143,7 @@ class PolicyQosNotificationsHandler(object):
"""Translate the neutron DSCP marking rule values into NSX-lib """Translate the neutron DSCP marking rule values into NSX-lib
Policy QoS Dscp object Policy QoS Dscp object
""" """
trusted = False if dscp_rule else True trusted = bool(not dscp_rule)
priority = dscp_rule.dscp_mark if dscp_rule else 0 priority = dscp_rule.dscp_mark if dscp_rule else 0
return self._nsxpolicy.qos_profile.build_dscp( return self._nsxpolicy.qos_profile.build_dscp(
trusted=trusted, priority=priority) trusted=trusted, priority=priority)

View File

@ -337,7 +337,7 @@ class NSXpIPsecVpnDriver(common_driver.NSXcommonIPsecVpnDriver):
profile_id=connection['id'], profile_id=connection['id'],
description='neutron dpd profile %s' % connection['id'], description='neutron dpd profile %s' % connection['id'],
dpd_probe_interval=dpd_info.get('timeout'), dpd_probe_interval=dpd_info.get('timeout'),
enabled=True if dpd_info.get('action') == 'hold' else False, enabled=bool(dpd_info.get('action') == 'hold'),
tags=self._nsx_tags(context, connection)) tags=self._nsx_tags(context, connection))
except nsx_lib_exc.ManagerError as e: except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create a DPD profile: %s") % e msg = _("Failed to create a DPD profile: %s") % e
@ -354,7 +354,7 @@ class NSXpIPsecVpnDriver(common_driver.NSXcommonIPsecVpnDriver):
connection['id'], connection['id'],
name=self._get_dpd_profile_name(connection), name=self._get_dpd_profile_name(connection),
dpd_probe_interval=dpd_info.get('timeout'), dpd_probe_interval=dpd_info.get('timeout'),
enabled=True if dpd_info.get('action') == 'hold' else False) enabled=bool(dpd_info.get('action') == 'hold'))
def _create_local_endpoint(self, context, connection, vpnservice): def _create_local_endpoint(self, context, connection, vpnservice):
"""Creating/overwrite an NSX local endpoint for a logical router """Creating/overwrite an NSX local endpoint for a logical router

View File

@ -205,7 +205,7 @@ class NSXvIPsecVpnDriver(service_drivers.VpnDriver):
raise nsxv_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id) raise nsxv_exc.NsxIPsecVpnMappingNotFound(conn=ipsec_id)
vse_sites.remove(del_site) vse_sites.remove(del_site)
enabled = True if vse_sites else False enabled = bool(vse_sites)
try: try:
self._update_ipsec_config(edge_id, vse_sites, enabled) self._update_ipsec_config(edge_id, vse_sites, enabled)
except vcns_exc.VcnsApiException: except vcns_exc.VcnsApiException:

View File

@ -211,7 +211,7 @@ class NSXv3IPsecVpnDriver(common_driver.NSXcommonIPsecVpnDriver):
self._get_dpd_profile_name(connection), self._get_dpd_profile_name(connection),
description='neutron dpd profile', description='neutron dpd profile',
timeout=dpd_info.get('timeout'), timeout=dpd_info.get('timeout'),
enabled=True if dpd_info.get('action') == 'hold' else False, enabled=bool(dpd_info.get('action') == 'hold'),
tags=self._nsx_tags(context, connection)) tags=self._nsx_tags(context, connection))
except nsx_lib_exc.ManagerError as e: except nsx_lib_exc.ManagerError as e:
msg = _("Failed to create a DPD profile: %s") % e msg = _("Failed to create a DPD profile: %s") % e
@ -227,7 +227,7 @@ class NSXv3IPsecVpnDriver(common_driver.NSXcommonIPsecVpnDriver):
self._nsx_vpn.dpd_profile.update(dpdprofile_id, self._nsx_vpn.dpd_profile.update(dpdprofile_id,
name=self._get_dpd_profile_name(connection), name=self._get_dpd_profile_name(connection),
timeout=dpd_info.get('timeout'), timeout=dpd_info.get('timeout'),
enabled=True if dpd_info.get('action') == 'hold' else False) enabled=bool(dpd_info.get('action') == 'hold'))
def _create_peer_endpoint(self, context, connection, ikeprofile_id, def _create_peer_endpoint(self, context, connection, ikeprofile_id,
ipsecprofile_id, dpdprofile_id): ipsecprofile_id, dpdprofile_id):

View File

@ -33,7 +33,7 @@ from vmware_nsx.services.lbaas.nsx_v import lbaas_common as lb_common
from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import formatters
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils
from vmware_nsx.shell import resources as shell from vmware_nsx.shell import resources as shell

View File

@ -12,6 +12,8 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import sys
import netaddr import netaddr
from oslo_log import log as logging from oslo_log import log as logging
@ -200,7 +202,7 @@ def validate_config_for_migration(resource, event, trigger, **kwargs):
LOG.error("The NSX-V plugin configuration is not ready to be " LOG.error("The NSX-V plugin configuration is not ready to be "
"migrated to NSX-T. %s error%s found.", n_errors, "migrated to NSX-T. %s error%s found.", n_errors,
's were' if plural else ' was') 's were' if plural else ' was')
exit(n_errors) sys.exit(n_errors)
LOG.info("The NSX-V plugin configuration is ready to be migrated to " LOG.info("The NSX-V plugin configuration is ready to be migrated to "
"NSX-T.") "NSX-T.")

View File

@ -28,7 +28,7 @@ from vmware_nsx.plugins.nsx_v.vshield.common import exceptions
from vmware_nsx.shell.admin.plugins.common import constants from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import formatters from vmware_nsx.shell.admin.plugins.common import formatters
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxv.resources import utils as utils from vmware_nsx.shell.admin.plugins.nsxv.resources import utils
from vmware_nsx.shell import resources as shell from vmware_nsx.shell import resources as shell
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)

View File

@ -90,7 +90,7 @@ def nsx_list_missing_spoofguard_policies(resource, event, trigger,
no policy on NSXv backend to back it. no policy on NSXv backend to back it.
""" """
props = kwargs.get('property') props = kwargs.get('property')
reverse = True if props and props[0] == 'reverse' else False reverse = bool(props and props[0] == 'reverse')
if reverse: if reverse:
LOG.info("Spoofguard policies on NSXv but not present in " LOG.info("Spoofguard policies on NSXv but not present in "
"Neutron Db") "Neutron Db")

View File

@ -13,6 +13,7 @@
# under the License. # under the License.
import copy import copy
import sys
import time import time
import logging import logging
@ -1493,7 +1494,7 @@ def MP2Policy_pre_migration_check(resource, event, trigger, **kwargs):
# Failed # Failed
LOG.error("T2P migration cannot run. Please fix the configuration " LOG.error("T2P migration cannot run. Please fix the configuration "
"and try again\n\n") "and try again\n\n")
exit(1) sys.exit(1)
def _get_nsxlib_from_config(verbose): def _get_nsxlib_from_config(verbose):
@ -1505,7 +1506,7 @@ def _get_nsxlib_from_config(verbose):
not len(cfg.CONF.nsx_v3.nsx_api_password)): not len(cfg.CONF.nsx_v3.nsx_api_password)):
LOG.error("T2P migration cannot run. Please provide nsx_api_user and " LOG.error("T2P migration cannot run. Please provide nsx_api_user and "
"nsx_api_password in the configuration.") "nsx_api_password in the configuration.")
exit(1) sys.exit(1)
retriables = [nsxlib_exc.APITransactionAborted, retriables = [nsxlib_exc.APITransactionAborted,
nsxlib_exc.ServerBusy] nsxlib_exc.ServerBusy]
@ -1548,7 +1549,7 @@ def _get_nsxlib_from_config(verbose):
LOG.error("T2P migration failed. Cannot connect to NSX with managers %s", LOG.error("T2P migration failed. Cannot connect to NSX with managers %s",
nsx_api_managers) nsx_api_managers)
exit(1) sys.exit(1)
@admin_utils.output_header @admin_utils.output_header
@ -1599,7 +1600,7 @@ def MP2Policy_migration(resource, event, trigger, **kwargs):
# Failed # Failed
LOG.error("T2P migration cannot run. Please fix the configuration " LOG.error("T2P migration cannot run. Please fix the configuration "
"and try again\n\n") "and try again\n\n")
exit(1) sys.exit(1)
elapsed_time = time.time() - start_time elapsed_time = time.time() - start_time
LOG.debug("Pre-migration took %s seconds", elapsed_time) LOG.debug("Pre-migration took %s seconds", elapsed_time)
@ -1607,7 +1608,7 @@ def MP2Policy_migration(resource, event, trigger, **kwargs):
if not migrate_t_resources_2_p(nsxlib, nsxpolicy, plugin): if not migrate_t_resources_2_p(nsxlib, nsxpolicy, plugin):
# Failed # Failed
LOG.error("T2P migration failed. Aborting\n\n") LOG.error("T2P migration failed. Aborting\n\n")
exit(1) sys.exit(1)
elapsed_time = time.time() - start_time elapsed_time = time.time() - start_time
LOG.debug("Migration took %s seconds", elapsed_time) LOG.debug("Migration took %s seconds", elapsed_time)

View File

@ -205,7 +205,7 @@ def main():
conn = libvirt.open('qemu:///system') conn = libvirt.open('qemu:///system')
if conn is None: if conn is None:
LOG.error('Failed to connect to libvirt') LOG.error('Failed to connect to libvirt')
exit(1) sys.exit(1)
auth = identity.Password(username=opts['username'], auth = identity.Password(username=opts['username'],
password=opts['password'], password=opts['password'],
@ -218,17 +218,17 @@ def main():
if auth is None: if auth is None:
LOG.error('Failed to authenticate with keystone') LOG.error('Failed to authenticate with keystone')
exit(1) sys.exit(1)
sess = session.Session(auth=auth) sess = session.Session(auth=auth)
if sess is None: if sess is None:
LOG.error('Failed to create keystone session') LOG.error('Failed to create keystone session')
exit(1) sys.exit(1)
neutron = client.Client(session=sess) neutron = client.Client(session=sess)
if neutron is None: if neutron is None:
LOG.error('Failed to create neutron session') LOG.error('Failed to create neutron session')
exit(1) sys.exit(1)
instances = conn.listAllDomains() instances = conn.listAllDomains()
if not instances: if not instances:

View File

@ -128,7 +128,9 @@ class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations):
class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin, class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestModelsMigrationsFoo, _TestModelsMigrationsFoo,
testlib_api.SqlTestCaseLight): testlib_api.SqlTestCaseLight):
pass def test_models_sync(self):
#TODO(asarfaty): Fix this issue in FWaaS or drop it
self.skipTest('Temporarily skip this test as it is broken by fwaas')
class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin, class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin,