Fix pep8 new warnings

A recent change in pep/pycodingchecks introduced new warnings as part of
the pep8 target that causes pep8 to fail now.

This patch fixes code that issued warnings W503,E731,E266,E402

Change-Id: I57c035440bd847193ea5f8a8078016fb9baa3c31
This commit is contained in:
Adit Sarfaty 2018-04-11 09:00:58 +03:00
parent f836b5fa90
commit 9743a4d0e4
64 changed files with 252 additions and 251 deletions

View File

@ -116,8 +116,8 @@ class NsxApiClient(eventlet_client.EventletApiClient):
exception.ERROR_MAPPINGS[status](response)
# Continue processing for non-error condition.
if (status != httplib.OK and status != httplib.CREATED
and status != httplib.NO_CONTENT):
if (status != httplib.OK and status != httplib.CREATED and
status != httplib.NO_CONTENT):
LOG.error("%(method)s to %(url)s, unexpected response code: "
"%(status)d (content = '%(body)s')",
{'method': method, 'url': url,

View File

@ -17,13 +17,14 @@
import time
import eventlet
eventlet.monkey_patch()
from oslo_log import log as logging
from vmware_nsx.api_client import base
from vmware_nsx.api_client import eventlet_request
import eventlet
eventlet.monkey_patch()
LOG = logging.getLogger(__name__)

View File

@ -377,8 +377,8 @@ class NsxSynchronizer(object):
['LogicalRouterStatus']
['fabric_status'])
status = (lr_status and
constants.NET_STATUS_ACTIVE
or constants.NET_STATUS_DOWN)
constants.NET_STATUS_ACTIVE or
constants.NET_STATUS_DOWN)
# Update db object
if status == neutron_router_data['status']:
# do nothing
@ -471,8 +471,8 @@ class NsxSynchronizer(object):
['LogicalPortStatus']
['fabric_status_up'])
status = (lp_status and
constants.PORT_STATUS_ACTIVE
or constants.PORT_STATUS_DOWN)
constants.PORT_STATUS_ACTIVE or
constants.PORT_STATUS_DOWN)
# Update db object
if status == neutron_port_data['status']:

View File

@ -145,12 +145,12 @@ class ExtendedSecurityGroupPropertiesMixin(object):
def _process_security_group_properties_update(self, context,
sg_res, sg_req):
if ((sg_logging.LOGGING in sg_req
and (sg_req[sg_logging.LOGGING] !=
sg_res.get(sg_logging.LOGGING, False))) or
(sg_policy.POLICY in sg_req
and (sg_req[sg_policy.POLICY] !=
sg_res.get(sg_policy.POLICY)))):
if ((sg_logging.LOGGING in sg_req and
(sg_req[sg_logging.LOGGING] !=
sg_res.get(sg_logging.LOGGING, False))) or
(sg_policy.POLICY in sg_req and
(sg_req[sg_policy.POLICY] !=
sg_res.get(sg_policy.POLICY)))):
prop = self._get_security_group_properties(context, sg_res['id'])
with db_api.context_manager.writer.using(context):
prop.update({

View File

@ -21,13 +21,13 @@ Create Date: 2015-08-24 18:19:09.397813
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '28430956782d'
down_revision = '53a3254aa95e'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(

View File

@ -20,14 +20,14 @@ Create Date: 2016-02-09 13:57:01.590154
"""
# revision identifiers, used by Alembic.
revision = '20483029f1ff'
down_revision = '69fb78b33d41'
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '20483029f1ff'
down_revision = '69fb78b33d41'
old_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
name='tz_network_bindings_binding_type')
new_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',

View File

@ -20,13 +20,12 @@ Create Date: 2015-11-24 13:44:08.664653
"""
# revision identifiers, used by Alembic.
revision = '2af850eb3970'
down_revision = '312211a5725f'
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2af850eb3970'
down_revision = '312211a5725f'
tz_binding_type_enum = sa.Enum('flat', 'vlan', 'portgroup',
name='nsxv_tz_network_bindings_binding_type')

View File

@ -21,11 +21,12 @@ Create Date: 2015-09-09 02:02:59.990122
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '312211a5725f'
down_revision = '279b70ac3ae8'
from alembic import op
import sqlalchemy as sa
def upgrade():

View File

@ -20,13 +20,13 @@ Create Date: 2016-01-27 07:28:35.369938
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '69fb78b33d41'
down_revision = '2af850eb3970'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(

View File

@ -20,12 +20,12 @@ Create Date: 2016-03-24 07:11:30.300482
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '081af0e396d7'
down_revision = '5ed1ffbc0d2a'
from alembic import op
def upgrade():
op.rename_table('nsxv_extended_security_group_rule_properties',

View File

@ -21,14 +21,14 @@ Create Date: 2016-03-24 06:06:06.680092
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5ed1ffbc0d2a'
down_revision = '3c88bdea3054'
depends_on = ('3e4dccfe6fb4',)
from alembic import op
import sqlalchemy as sa
def upgrade():
secgroup_prop_table = sa.Table(

View File

@ -20,13 +20,13 @@ Create Date: 2016-07-21 05:03:35.369938
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dbe29d208ac6'
down_revision = '081af0e396d7'
from alembic import op
import sqlalchemy as sa
def upgrade():
# Add a new column and make the previous column nullable,

View File

@ -20,13 +20,13 @@ Create Date: 2016-07-17 11:30:31.263918
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1b4eaffe4f31'
down_revision = '633514d94b93'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nsx_extended_security_group_properties',

View File

@ -21,13 +21,13 @@ Create Date: 2016-03-15 06:06:06.680092
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2c87aedb206f'
down_revision = '4c45bcadccf9'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nsxv_security_group_section_mappings',

View File

@ -20,13 +20,13 @@ Create Date: 2016-03-20 07:28:35.369938
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3e4dccfe6fb4'
down_revision = '2c87aedb206f'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(

View File

@ -20,13 +20,13 @@ Create Date: 2016-06-27 23:58:22.003350
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5e564e781d77'
down_revision = 'c644ec62c585'
from alembic import op
import sqlalchemy as sa
tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
'vxlan',

View File

@ -20,12 +20,12 @@ Create Date: 2016-05-09 14:11:31.940021
"""
revision = '633514d94b93'
down_revision = '86a55205337c'
from alembic import op
import sqlalchemy as sa
revision = '633514d94b93'
down_revision = '86a55205337c'
def upgrade():
op.create_table(

View File

@ -20,12 +20,12 @@ Create Date: 2016-09-01 10:17:16.770021
"""
revision = '6e6da8296c0e'
down_revision = '1b4eaffe4f31'
from alembic import op
import sqlalchemy as sa
revision = '6e6da8296c0e'
down_revision = '1b4eaffe4f31'
def upgrade():
op.create_table(

View File

@ -21,18 +21,17 @@ Create Date: 2016-04-21 10:45:32.278433
"""
from alembic import op
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '7e46906f8997'
down_revision = 'aede17d51d0f'
from alembic import op
from neutron.db import migration
def upgrade():
if (migration.schema_has_table('lbaas_loadbalancers')
and migration.schema_has_table('nsxv_lbaas_loadbalancer_bindings')):
if (migration.schema_has_table('lbaas_loadbalancers') and
migration.schema_has_table('nsxv_lbaas_loadbalancer_bindings')):
op.execute('delete from nsxv_lbaas_loadbalancer_bindings '
'where loadbalancer_id not in '
@ -42,8 +41,8 @@ def upgrade():
'lbaas_loadbalancers', ['loadbalancer_id'], ['id'],
ondelete='CASCADE')
if (migration.schema_has_table('lbaas_listeners')
and migration.schema_has_table('nsxv_lbaas_listener_bindings')):
if (migration.schema_has_table('lbaas_listeners') and
migration.schema_has_table('nsxv_lbaas_listener_bindings')):
op.execute('delete from nsxv_lbaas_listener_bindings '
'where listener_id not in '
@ -52,8 +51,8 @@ def upgrade():
'fk_lbaas_listeners_id', 'nsxv_lbaas_listener_bindings',
'lbaas_listeners', ['listener_id'], ['id'], ondelete='CASCADE')
if (migration.schema_has_table('lbaas_pools')
and migration.schema_has_table('nsxv_lbaas_pool_bindings')):
if (migration.schema_has_table('lbaas_pools') and
migration.schema_has_table('nsxv_lbaas_pool_bindings')):
op.execute('delete from nsxv_lbaas_pool_bindings '
'where pool_id not in (select id from lbaas_pools)')
@ -61,8 +60,8 @@ def upgrade():
'fk_lbaas_pools_id', 'nsxv_lbaas_pool_bindings',
'lbaas_pools', ['pool_id'], ['id'], ondelete='CASCADE')
if (migration.schema_has_table('lbaas_healthmonitors')
and migration.schema_has_table('nsxv_lbaas_monitor_bindings')):
if (migration.schema_has_table('lbaas_healthmonitors') and
migration.schema_has_table('nsxv_lbaas_monitor_bindings')):
op.execute('delete from nsxv_lbaas_monitor_bindings '
'where hm_id not in (select id from lbaas_healthmonitors)')

View File

@ -20,15 +20,15 @@ Revises: 7e46906f8997
Create Date: 2016-07-12 09:18:44.450116
"""
# revision identifiers, used by Alembic.
revision = '86a55205337c'
down_revision = '7e46906f8997'
from alembic import op
import sqlalchemy as sa
from vmware_nsx.common import config # noqa
# revision identifiers, used by Alembic.
revision = '86a55205337c'
down_revision = '7e46906f8997'
def upgrade():
op.alter_column('nsxv_router_bindings', 'resource_pool',

View File

@ -20,13 +20,13 @@ Create Date: 2016-02-23 18:22:01.998540
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '967462f585e1'
down_revision = '3e4dccfe6fb4'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('neutron_nsx_network_mappings',

View File

@ -21,13 +21,13 @@ Create Date: 2016-04-21 10:45:32.278433
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'aede17d51d0f'
down_revision = '5e564e781d77'
from alembic import op
import sqlalchemy as sa
tables = [
'nsxv_router_bindings',
'nsxv_edge_vnic_bindings',

View File

@ -19,13 +19,13 @@ Revises: 967462f585e1
Create Date: 2016-03-17 06:12:09.450116
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b7f41687cbad'
down_revision = '967462f585e1'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(

View File

@ -19,16 +19,16 @@ Revises: b7f41687cbad
Create Date: 2016-05-15 06:12:09.450116
"""
# revision identifiers, used by Alembic.
revision = 'c288bb6a7252'
down_revision = 'b7f41687cbad'
from alembic import op
from oslo_config import cfg
import sqlalchemy as sa
from vmware_nsx.common import config # noqa
# revision identifiers, used by Alembic.
revision = 'c288bb6a7252'
down_revision = 'b7f41687cbad'
def upgrade():
op.add_column('nsxv_router_bindings',

View File

@ -20,15 +20,14 @@ Create Date: 2016-04-29 23:19:39.523196
"""
# revision identifiers, used by Alembic.
revision = 'c644ec62c585'
down_revision = 'c288bb6a7252'
from alembic import op
import sqlalchemy as sa
from vmware_nsxlib.v3 import nsx_constants
# revision identifiers, used by Alembic.
revision = 'c644ec62c585'
down_revision = 'c288bb6a7252'
nsx_service_type_enum = sa.Enum(
nsx_constants.SERVICE_DHCP,

View File

@ -20,16 +20,15 @@ Create Date: 2017-02-05 14:34:21.163418
"""
# revision identifiers, used by Alembic.
revision = '14a89ddf96e2'
down_revision = '5c8f451290b7'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.engine import reflection
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '14a89ddf96e2'
down_revision = '5c8f451290b7'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.OCATA]

View File

@ -20,13 +20,13 @@ Create Date: 2016-12-25 11:08:30.300482
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '5c8f451290b7'
down_revision = 'd49ac91b560e'
depends_on = ('6e6da8296c0e',)
from alembic import op
def upgrade():
op.rename_table('nsxv_subnet_ipam',

View File

@ -20,14 +20,14 @@ Create Date: 2017-01-04 10:10:59.990122
"""
# revision identifiers, used by Alembic.
revision = '01a33f93f5fd'
down_revision = 'dd9fe5a3a526'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '01a33f93f5fd'
down_revision = 'dd9fe5a3a526'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.OCATA]

View File

@ -20,13 +20,13 @@ Create Date: 2017-01-06 12:30:01.070022
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'dd9fe5a3a526'
down_revision = 'e816d4fe9d4f'
from alembic import op
import sqlalchemy as sa
def upgrade():

View File

@ -20,13 +20,13 @@ Create Date: 2016-10-06 11:30:31.263918
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e816d4fe9d4f'
down_revision = '7b5ec3caa9a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nsx_extended_security_group_properties',

View File

@ -19,15 +19,14 @@ Revises: 8c0a81a07691
Create Date: 2017-03-15 11:47:09.450116
"""
# revision identifiers, used by Alembic.
revision = '84ceffa27115'
down_revision = '8c0a81a07691'
from alembic import op
from sqlalchemy.engine import reflection
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '84ceffa27115'
down_revision = '8c0a81a07691'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.PIKE]

View File

@ -20,13 +20,13 @@ Create Date: 2017-02-15 15:25:21.163418
"""
from alembic import op
from sqlalchemy.engine import reflection
# revision identifiers, used by Alembic.
revision = '8c0a81a07691'
down_revision = '14a89ddf96e2'
from alembic import op
from sqlalchemy.engine import reflection
def upgrade():
table_name = 'nsx_subnet_ipam'

View File

@ -20,10 +20,11 @@ Create Date: 2017-02-22 10:10:59.990122
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '53eb497903a4'
down_revision = '8699700cd95c'
from alembic import op
def upgrade():

View File

@ -20,14 +20,15 @@ Create Date: 2017-02-22 10:10:59.990122
"""
# revision identifiers, used by Alembic.
revision = '7c4704ad37df'
down_revision = 'e4c503f4133f'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '7c4704ad37df'
down_revision = 'e4c503f4133f'
def upgrade():
# On a previous upgrade this table was created conditionally.

View File

@ -20,13 +20,13 @@ Create Date: 2017-02-16 03:13:39.775670
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8699700cd95c'
down_revision = '7c4704ad37df'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(

View File

@ -20,13 +20,13 @@ Create Date: 2017-02-20 00:05:30.894680
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e4c503f4133f'
down_revision = '01a33f93f5fd'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(

View File

@ -20,15 +20,15 @@ Create Date: 2017-10-26 08:32:40.846088
"""
# revision identifiers, used by Alembic.
revision = '717f7f63a219'
down_revision = 'a1be06050b41'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '717f7f63a219'
down_revision = 'a1be06050b41'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.QUEENS]

View File

@ -19,16 +19,15 @@ Revises: 84ceffa27115
Create Date: 2017-09-04 23:58:22.003350
"""
# revision identifiers, used by Alembic.
revision = 'a1be06050b41'
down_revision = '84ceffa27115'
depends_on = ('aede17d51d0f')
from alembic import op
import sqlalchemy as sa
from neutron.db import migration as neutron_op
# revision identifiers, used by Alembic.
revision = 'a1be06050b41'
down_revision = '84ceffa27115'
depends_on = ('aede17d51d0f')
all_tz_binding_type_enum = sa.Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
'vxlan', 'geneve', 'portgroup', 'nsx-net',

View File

@ -20,13 +20,13 @@ Create Date: 2017-11-26 12:27:40.846088
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0dbeda408e41'
down_revision = '9799427fc0e1'
from alembic import op
import sqlalchemy as sa
def upgrade():

View File

@ -20,15 +20,15 @@ Create Date: 2017-06-12 16:59:48.021909
"""
# revision identifiers, used by Alembic.
revision = '9799427fc0e1'
down_revision = 'ea7a72ab9643'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
# revision identifiers, used by Alembic.
revision = '9799427fc0e1'
down_revision = 'ea7a72ab9643'
plugin_type_enum = sa.Enum('dvs', 'nsx-v', 'nsx-t',
name='nsx_plugin_type')
# milestone identifier, used by neutron-db-manage

View File

@ -88,8 +88,8 @@ class NsxPortBindingMixin(pbin_db.PortBindingMixin):
org_vnic_type = nsxv_db.get_nsxv_ext_attr_port_vnic_type(
context.session, port_id)
vnic_type = port.get(pbin.VNIC_TYPE, org_vnic_type)
cap_port_filter = (port.get(pbin.VNIC_TYPE, org_vnic_type)
== pbin.VNIC_NORMAL)
cap_port_filter = (port.get(pbin.VNIC_TYPE, org_vnic_type) ==
pbin.VNIC_NORMAL)
vif_details = {pbin.CAP_PORT_FILTER: cap_port_filter}
network = self.get_network(context, port_res['network_id'])
if network.get(pnet.NETWORK_TYPE) == c_utils.NsxVNetworkTypes.FLAT:

View File

@ -340,13 +340,13 @@ class DNSExtensionDriverNSXv3(DNSExtensionDriver):
# try to get the dns-domain from the specific availability zone
# of this network
az = self._get_network_az(network_id, context)
if (az.dns_domain
and _dotted_domain(az.dns_domain) !=
_dotted_domain(DNS_DOMAIN_DEFAULT)):
if (az.dns_domain and
_dotted_domain(az.dns_domain) !=
_dotted_domain(DNS_DOMAIN_DEFAULT)):
dns_domain = az.dns_domain
elif (cfg.CONF.nsx_v3.dns_domain
and (_dotted_domain(cfg.CONF.nsx_v3.dns_domain) !=
_dotted_domain(DNS_DOMAIN_DEFAULT))):
elif (cfg.CONF.nsx_v3.dns_domain and
(_dotted_domain(cfg.CONF.nsx_v3.dns_domain) !=
_dotted_domain(DNS_DOMAIN_DEFAULT))):
dns_domain = cfg.CONF.nsx_v3.dns_domain
elif cfg.CONF.dns_domain:
dns_domain = cfg.CONF.dns_domain

View File

@ -38,8 +38,8 @@ def add_nsx_extensions_to_parser(parser, client_manager, for_create=True):
action='store_true',
help=_("Disable logging (default)")
)
if ('provider-security-group' in utils.get_extensions(client_manager)
and for_create):
if ('provider-security-group' in utils.get_extensions(client_manager) and
for_create):
# provider
parser.add_argument(
'--provider',

View File

@ -453,8 +453,8 @@ class NsxTVDPlugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# Check if we need to invoke metadata search. Here we are unable to
# filter according to projects as this is from the nova api service
# so we invoke on all plugins that support this extension
if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields)
or (filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))):
if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields) or
(filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))):
for plugin in self.as_providers.values():
subnets = plugin.get_subnets(context, filters=filters,
fields=fields, sorts=sorts,

View File

@ -1404,8 +1404,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
lrouter = routerlib.create_lrouter(
self.cluster, router['id'],
tenant_id, router['name'], nexthop,
distributed=(validators.is_attr_set(distributed)
and distributed))
distributed=(validators.is_attr_set(distributed) and
distributed))
except nsx_exc.InvalidVersion:
msg = _("Cannot create a distributed router with the NSX "
"platform currently in execution. Please, try "
@ -1763,8 +1763,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
port = self._get_port(context, port_id)
if port.get('fixed_ips'):
subnet_id = port['fixed_ips'][0]['subnet_id']
if not (port['device_owner'] in constants.ROUTER_INTERFACE_OWNERS
and port['device_id'] == router_id):
if not (port['device_owner'] in
constants.ROUTER_INTERFACE_OWNERS and
port['device_id'] == router_id):
raise l3_exc.RouterInterfaceNotFound(
router_id=router_id, port_id=port_id)
elif 'subnet_id' in interface_info:

View File

@ -240,8 +240,8 @@ class RouterDistributedDriver(router_driver.RouterBaseDriver):
# Validate that the subnet is not a v6 one
subnet = self.plugin.get_subnet(context.elevated(), subnet_id)
if (subnet.get('ip_version') == 6 or
(subnet['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None)
and netaddr.IPNetwork(subnet['cidr']).version == 6)):
(subnet['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None) and
netaddr.IPNetwork(subnet['cidr']).version == 6)):
err_msg = _("No support for IPv6 interfaces")
raise n_exc.InvalidInput(error_message=err_msg)

View File

@ -174,8 +174,8 @@ class RouterExclusiveDriver(router_driver.RouterBaseDriver):
edge_id = self._get_router_edge_id(context, router_id)
with locking.LockManager.get_lock(edge_id):
if ((new_ext_net_id != org_ext_net_id or force_update)
and orgnexthop):
if ((new_ext_net_id != org_ext_net_id or force_update) and
orgnexthop):
# network changed, so need to remove default gateway before
# vnic can be configured
LOG.debug("Delete default gateway %s", orgnexthop)

View File

@ -584,8 +584,8 @@ class RouterSharedDriver(router_driver.RouterBaseDriver):
for router in routers:
router_res = {}
self.plugin._extend_nsx_router_dict(router_res, router)
if (router['id'] not in conflict_router_ids
and router_res.get('router_type') == 'shared'):
if (router['id'] not in conflict_router_ids and
router_res.get('router_type') == 'shared'):
optional_router_ids.append(router['id'])
return optional_router_ids, conflict_router_ids

View File

@ -164,15 +164,15 @@ class ErrorDhcpEdgeJob(base_job.BaseJob):
# appliance are registered in nsxv_edge_vnic_bindings
for vnic in backend_vnics:
if_changed[vnic['index']] = False
if (vnic['isConnected'] and vnic['type'] == 'trunk'
and vnic['subInterfaces']):
if (vnic['isConnected'] and vnic['type'] == 'trunk' and
vnic['subInterfaces']):
for sub_if in vnic['subInterfaces']['subInterfaces']:
# Subinterface name field contains the net id
vnic_bind = vnic_dict.get(sub_if['logicalSwitchName'])
if (vnic_bind
and vnic_bind['vnic_index'] == vnic['index']
and vnic_bind['tunnel_index'] == sub_if['tunnelId']):
if (vnic_bind and
vnic_bind['vnic_index'] == vnic['index'] and
vnic_bind['tunnel_index'] == sub_if['tunnelId']):
pass
else:
LOG.warning('Housekeeping: subinterface %s for vnic '

View File

@ -351,8 +351,8 @@ class NsxVMetadataProxyHandler(object):
).get('addressGroups', {}
)[0].get('primaryAddress')
cur_pgroup = if_data['portgroupId']
if (if_data and cur_pgroup != self.az.mgt_net_moid
or cur_ip != rtr_ext_ip):
if (if_data and cur_pgroup != self.az.mgt_net_moid or
cur_ip != rtr_ext_ip):
if cfg.CONF.nsxv.metadata_initializer:
self.nsxv_plugin.nsx_v.update_interface(
rtr_id,
@ -383,8 +383,8 @@ class NsxVMetadataProxyHandler(object):
m_ips = md_members.keys()
m_to_convert = (list(set(m_ips) -
set(cfg.CONF.nsxv.nova_metadata_ips)))
m_ip_to_set = (list(set(cfg.CONF.nsxv.nova_metadata_ips)
- set(m_ips)))
m_ip_to_set = (list(set(cfg.CONF.nsxv.nova_metadata_ips) -
set(m_ips)))
if m_to_convert or m_ip_to_set:
update_md_proxy = True
for m_ip in m_to_convert:

View File

@ -342,10 +342,10 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Should be called only once per worker
return
has_metadata_cfg = (
cfg.CONF.nsxv.nova_metadata_ips
and cfg.CONF.nsxv.mgt_net_moid
and cfg.CONF.nsxv.mgt_net_proxy_ips
and cfg.CONF.nsxv.mgt_net_proxy_netmask)
cfg.CONF.nsxv.nova_metadata_ips and
cfg.CONF.nsxv.mgt_net_moid and
cfg.CONF.nsxv.mgt_net_proxy_ips and
cfg.CONF.nsxv.mgt_net_proxy_netmask)
if has_metadata_cfg:
# Init md_proxy handler per availability zone
self.metadata_proxy_handler = {}
@ -394,9 +394,10 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
ports = self.get_ports(context, filters=filters)
for port in ports:
# Only add compute ports with device-id, vnic & port security
if (validators.is_attr_set(port.get(ext_vnic_idx.VNIC_INDEX))
and validators.is_attr_set(port.get('device_id'))
and port[psec.PORTSECURITY]):
if (validators.is_attr_set(
port.get(ext_vnic_idx.VNIC_INDEX)) and
validators.is_attr_set(port.get('device_id')) and
port[psec.PORTSECURITY]):
try:
vnic_idx = port[ext_vnic_idx.VNIC_INDEX]
device_id = port['device_id']
@ -829,8 +830,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
return subnets
new_subnets = []
if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields)
or (filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))):
if ((fields and as_providers.ADV_SERVICE_PROVIDERS in fields) or
(filters and filters.get(as_providers.ADV_SERVICE_PROVIDERS))):
# This ugly mess should reduce DB calls with network_id field
# as filter - as network_id is not indexed
@ -1574,12 +1575,12 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
vlan type networks.
"""
if (original_network.get(pnet.NETWORK_TYPE) ==
c_utils.NsxVNetworkTypes.VLAN
and validators.is_attr_set(
attrs.get(pnet.PHYSICAL_NETWORK))
and not validators.is_attr_set(
attrs.get(pnet.NETWORK_TYPE))
and not validators.is_attr_set(
c_utils.NsxVNetworkTypes.VLAN and
validators.is_attr_set(
attrs.get(pnet.PHYSICAL_NETWORK)) and
not validators.is_attr_set(
attrs.get(pnet.NETWORK_TYPE)) and
not validators.is_attr_set(
attrs.get(pnet.SEGMENTATION_ID))):
return
providernet._raise_if_updates_provider_attributes(attrs)
@ -1888,8 +1889,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
port_data[psec.PORTSECURITY] = port_security
provider_sg_specified = (validators.is_attr_set(
port_data.get(provider_sg.PROVIDER_SECURITYGROUPS))
and port_data[provider_sg.PROVIDER_SECURITYGROUPS] != [])
port_data.get(provider_sg.PROVIDER_SECURITYGROUPS)) and
port_data[provider_sg.PROVIDER_SECURITYGROUPS] != [])
has_security_groups = (
self._check_update_has_security_groups(port))
@ -2001,8 +2002,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
ports = [port for port in device_ports
if port['device_owner'].startswith('compute')]
return len([p for p in ports
if validators.is_attr_set(p.get(ext_vnic_idx.VNIC_INDEX))
and not p[psec.PORTSECURITY]])
if validators.is_attr_set(p.get(ext_vnic_idx.VNIC_INDEX)) and
not p[psec.PORTSECURITY]])
def _add_vm_to_exclude_list(self, context, device_id, port_id):
if (self._vcm and
@ -2238,8 +2239,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
LOG.error("Port has conflicting port security status and "
"security groups")
raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups()
if ((not delete_security_groups
and original_port[ext_sg.SECURITYGROUPS]) or
if ((not delete_security_groups and
original_port[ext_sg.SECURITYGROUPS]) or
(not delete_provider_sg and
original_port[provider_sg.PROVIDER_SECURITYGROUPS])):
LOG.error("Port has conflicting port security status and "
@ -2403,8 +2404,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
"function properly ",
{'id': id,
'device_id': original_port['device_id']})
if (delete_security_groups
or has_security_groups or pvd_sg_changed):
if (delete_security_groups or has_security_groups or
pvd_sg_changed):
# Update security-groups,
# calculate differences and update vnic membership
# accordingly.
@ -2698,8 +2699,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
subnet['subnet']['network_id'])
data = subnet['subnet']
if (data.get('ip_version') == 6 or
(data['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None)
and netaddr.IPNetwork(data['cidr']).version == 6)):
(data['cidr'] not in (constants.ATTR_NOT_SPECIFIED, None) and
netaddr.IPNetwork(data['cidr']).version == 6)):
err_msg = _("No support for DHCP for IPv6")
raise n_exc.InvalidInput(error_message=err_msg)
if self._is_overlapping_reserved_subnets(subnet):
@ -2927,8 +2928,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
for net_id in all_networks:
p_net = nsxv_db.get_network_bindings(context.session,
net_id['id'])
if (p_net and binding_type == p_net[0]['binding_type']
and binding_type == c_utils.NsxVNetworkTypes.FLAT):
if (p_net and binding_type == p_net[0]['binding_type'] and
binding_type == c_utils.NsxVNetworkTypes.FLAT):
conflicting_networks.append(net_id['id'])
elif (p_net and phy_uuid != p_net[0]['phy_uuid']):
conflicting_networks.append(net_id['id'])
@ -4555,8 +4556,9 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
def _is_compute_port(self, port):
try:
if (port['device_id'] and uuidutils.is_uuid_like(port['device_id'])
and port['device_owner'].startswith('compute:')):
if (port['device_id'] and
uuidutils.is_uuid_like(port['device_id']) and
port['device_owner'].startswith('compute:')):
return True
except (KeyError, AttributeError):
pass
@ -4646,8 +4648,8 @@ class NsxVPluginV2(addr_pair_db.AllowedAddressPairsMixin,
res_name='vdn_scope_id', res_id=vdns)
# Validate the global & per-AZ mgt_net_moid
if (cfg.CONF.nsxv.mgt_net_moid
and not self.nsx_v.vcns.validate_network(
if (cfg.CONF.nsxv.mgt_net_moid and
not self.nsx_v.vcns.validate_network(
cfg.CONF.nsxv.mgt_net_moid)):
raise nsx_exc.NsxResourceNotFound(
res_name='mgt_net_moid',

View File

@ -117,8 +117,8 @@ def is_overlapping_reserved_subnets(cidr, reserved_subnets):
# translate the reserved subnet to a range object
reserved_range = netaddr.IPNetwork(reserved_subnet)
# check if new subnet overlaps this reserved subnet
if (range.first <= reserved_range.last
and reserved_range.first <= range.last):
if (range.first <= reserved_range.last and
reserved_range.first <= range.last):
return True
return False
@ -474,8 +474,8 @@ class EdgeManager(object):
if bindings:
binding = bindings[0]
network_type = binding['binding_type']
if (network_type == c_utils.NsxVNetworkTypes.VLAN
and binding['phy_uuid'] != ''):
if (network_type == c_utils.NsxVNetworkTypes.VLAN and
binding['phy_uuid'] != ''):
if ',' not in binding['phy_uuid']:
phys_net = binding['phy_uuid']
# Return user input physical network value for all network types
@ -484,9 +484,9 @@ class EdgeManager(object):
# We also validate that this binding starts with 'dvs'. If a admin
# creates a provider portgroup then we need to use the default
# configured DVS.
elif (not network_type == c_utils.NsxVNetworkTypes.VXLAN
and binding['phy_uuid'] != ''
and binding['phy_uuid'].startswith('dvs')):
elif (not network_type == c_utils.NsxVNetworkTypes.VXLAN and
binding['phy_uuid'] != '' and
binding['phy_uuid'].startswith('dvs')):
phys_net = binding['phy_uuid']
return phys_net, network_type
@ -1764,8 +1764,8 @@ class EdgeManager(object):
# one vnic is used to provide external access.
net_number = (
vcns_const.MAX_VNIC_NUM - len(edge_vnic_bindings) - 1)
if (net_number > max_net_number
and net_number >= network_number):
if (net_number > max_net_number and
net_number >= network_number):
net_ids = [vnic_binding.network_id
for vnic_binding in edge_vnic_bindings]
if not (set(conflict_network_ids) & set(net_ids)):
@ -2071,8 +2071,8 @@ def _retrieve_nsx_switch_id(context, network_id, az_name):
if bindings:
binding = bindings[0]
network_type = binding['binding_type']
if (network_type == c_utils.NsxVNetworkTypes.VLAN
and binding['phy_uuid'] != ''):
if (network_type == c_utils.NsxVNetworkTypes.VLAN and
binding['phy_uuid'] != ''):
if ',' not in binding['phy_uuid']:
dvs_id = binding['phy_uuid']
else:

View File

@ -156,8 +156,8 @@ class NsxvLoadbalancer(nsxv_edge_cfg_obj.NsxvEdgeCfgObj):
# Find application profile objects, attach to virtual server
for app_prof in edge_lb['applicationProfile']:
if (virt_srvr['applicationProfileId']
== app_prof['applicationProfileId']):
if (virt_srvr['applicationProfileId'] ==
app_prof['applicationProfileId']):
a_p = NsxvLBAppProfile(
app_prof['name'],
app_prof['serverSslEnabled'],

View File

@ -634,8 +634,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
return self._get_mac_learning_profile()
def _get_mac_learning_profile(self):
if (hasattr(self, '_mac_learning_profile')
and self._mac_learning_profile):
if (hasattr(self, '_mac_learning_profile') and
self._mac_learning_profile):
return self._mac_learning_profile
profile = self.nsxlib.switching_profile.find_by_display_name(
NSX_V3_MAC_LEARNING_PROFILE_NAME)
@ -4236,8 +4236,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
subnet_id = port['fixed_ips'][0]['subnet_id']
self._confirm_router_interface_not_in_use(
context, router_id, subnet_id)
if not (port['device_owner'] in const.ROUTER_INTERFACE_OWNERS
and port['device_id'] == router_id):
if not (port['device_owner'] in const.ROUTER_INTERFACE_OWNERS and
port['device_id'] == router_id):
raise l3_exc.RouterInterfaceNotFound(
router_id=router_id, port_id=port_id)
elif 'subnet_id' in interface_info:
@ -4662,8 +4662,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# skip if there are no rules in group. i.e provider case
if sg_rules:
# translate and creates firewall rules.
logging = (cfg.CONF.nsx_v3.log_security_groups_allowed_traffic
or secgroup.get(sg_logging.LOGGING, False))
logging = (
cfg.CONF.nsx_v3.log_security_groups_allowed_traffic or
secgroup.get(sg_logging.LOGGING, False))
action = (nsxlib_consts.FW_ACTION_DROP
if secgroup.get(provider_sg.PROVIDER)
else nsxlib_consts.FW_ACTION_ALLOW)
@ -4759,8 +4760,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
sg_id = rules_db[0]['security_group_id']
nsgroup_id, section_id = nsx_db.get_sg_mappings(context.session,
sg_id)
logging_enabled = (cfg.CONF.nsx_v3.log_security_groups_allowed_traffic
or self._is_security_group_logged(context, sg_id))
logging_enabled = (
cfg.CONF.nsx_v3.log_security_groups_allowed_traffic or
self._is_security_group_logged(context, sg_id))
try:
rules = self._create_firewall_rules(
context, section_id, nsgroup_id,

View File

@ -232,8 +232,7 @@ class NSXvIPsecVpnDriver(service_drivers.VpnDriver):
if ipsecvpn_configs['enabled']:
vse_sites = ipsecvpn_configs['sites'].get('sites')
for s in vse_sites:
if ((s['peerSubnets'].get('subnets') == site['peer_cidrs'])
and
if ((s['peerSubnets'].get('subnets') == site['peer_cidrs']) and
(s['localSubnets'].get('subnets')[0] == local_cidr)):
old_site = s
break

View File

@ -104,8 +104,8 @@ def _nsx_delete_backup_edge(edge_id, all_backup_edges):
# edge_result[1] is response body
edge = edge_result[1]
backup_edges = [e['id'] for e in all_backup_edges]
if (not edge['name'].startswith('backup-')
or edge['id'] not in backup_edges):
if (not edge['name'].startswith('backup-') or
edge['id'] not in backup_edges):
LOG.error(
'Edge: %s is not a backup edge; aborting delete',
edge_id)
@ -206,8 +206,8 @@ def nsx_list_name_mismatches(resource, event, trigger, **kwargs):
edgeapi.context.session, edge['id'])
if (rtr_binding and
edge['name'].startswith('backup-')
and rtr_binding['router_id'] != edge['name']):
edge['name'].startswith('backup-') and
rtr_binding['router_id'] != edge['name']):
plugin_nsx_mismatch.append(
{'edge_id': edge['id'],
'edge_name': edge['name'],
@ -279,8 +279,8 @@ def nsx_fix_name_mismatch(resource, event, trigger, **kwargs):
if nsx_attr and nsx_attr['router_type'] == 'shared':
edge['name'] = ('shared-' + _uuid())[
:vcns_const.EDGE_NAME_LEN]
elif (nsx_attr
and nsx_attr['router_type'] == 'exclusive'):
elif (nsx_attr and
nsx_attr['router_type'] == 'exclusive'):
rtr_db = (edgeapi.context.session.query(
l3_db.Router).filter_by(
id=rtr_binding['router_id']).first())

View File

@ -104,10 +104,10 @@ def nsx_redo_metadata_cfg_for_az(az, edgeapi):
filters={'edge_type': [nsxv_constants.SERVICE_EDGE],
'availability_zones': az.name})
edge_ids = list(set([binding['edge_id'] for binding in router_bindings
if (binding['router_id'] not in set(md_rtr_ids)
and not binding['router_id'].startswith(
vcns_constants.BACKUP_ROUTER_PREFIX)
and not binding['router_id'].startswith(
if (binding['router_id'] not in set(md_rtr_ids) and
not binding['router_id'].startswith(
vcns_constants.BACKUP_ROUTER_PREFIX) and
not binding['router_id'].startswith(
vcns_constants.PLR_EDGE_PREFIX))]))
for edge_id in edge_ids:
@ -143,10 +143,10 @@ def update_shared_secret(resource, event, trigger, **kwargs):
edgeapi.context.session,
filters={'edge_type': [nsxv_constants.SERVICE_EDGE]})
edge_ids = list(set([binding['edge_id'] for binding in router_bindings
if (binding['router_id'] not in set(md_rtr_ids)
and not binding['router_id'].startswith(
vcns_constants.BACKUP_ROUTER_PREFIX)
and not binding['router_id'].startswith(
if (binding['router_id'] not in set(md_rtr_ids) and
not binding['router_id'].startswith(
vcns_constants.BACKUP_ROUTER_PREFIX) and
not binding['router_id'].startswith(
vcns_constants.PLR_EDGE_PREFIX))]))
for edge_id in edge_ids:

View File

@ -166,8 +166,8 @@ def list_orphaned_networks(resource, event, trigger, **kwargs):
backend_name = net['name']
# Decide if this is a neutron network by its name (which should always
# contain the net-id), and type
if (backend_name.startswith('edge-') or len(backend_name) < 36
or net['type'] == 'Network'):
if (backend_name.startswith('edge-') or len(backend_name) < 36 or
net['type'] == 'Network'):
# This is not a neutron network
continue
# get the list of neutron networks with this moref

View File

@ -140,8 +140,8 @@ def is_valid_os_data(libvirt_conn, os_type, os_arch, os_machine):
caps_xml = libvirt_conn.getCapabilities()
caps_root = et.fromstring(caps_xml)
for guest_tag in caps_root.findall('guest'):
if (xmltag_text_get(guest_tag, 'os_type') == os_type
and xmltag_attr_get(guest_tag, 'arch', 'name') == os_arch):
if (xmltag_text_get(guest_tag, 'os_type') == os_type and
xmltag_attr_get(guest_tag, 'arch', 'name') == os_arch):
for machine_tag in guest_tag.find('arch').findall('machine'):
if machine_tag.text == os_machine:
return True

View File

@ -405,8 +405,8 @@ class FakeClient(object):
# verify that the switch exist
if parent_uuid and parent_uuid not in self._fake_lswitch_dict:
raise Exception(_("lswitch:%s not found") % parent_uuid)
if (not parent_uuid
or res_dict[res_uuid].get('ls_uuid') == parent_uuid):
if (not parent_uuid or
res_dict[res_uuid].get('ls_uuid') == parent_uuid):
return True
return False
@ -449,7 +449,7 @@ class FakeClient(object):
self.LROUTER_LPORT_STATUS):
parent_func = _lrouter_match
else:
parent_func = lambda x: True
parent_func = (lambda x: True)
items = [_build_item(res_dict[res_uuid])
for res_uuid in res_dict

View File

@ -2248,8 +2248,7 @@ class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxVPluginV2TestCase):
data['router']['name'] = name
if admin_state_up:
data['router']['admin_state_up'] = admin_state_up
for arg in (('admin_state_up', 'tenant_id')
+ (arg_list or ())):
for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())):
# Arg must be present and not empty
if kwargs.get(arg):
data['router'][arg] = kwargs[arg]

View File

@ -1046,8 +1046,8 @@ class FakeVcns(object):
def get_section_id(self, section_name):
for k, v in self._sections.items():
if (k not in ('section_ids', 'rule_ids', 'names')
and v['name'] == section_name):
if (k not in ('section_ids', 'rule_ids', 'names') and
v['name'] == section_name):
return k
def update_section_by_id(self, id, type, request):

View File

@ -585,8 +585,8 @@ class EdgeManagerTestCase(EdgeUtilsTestCaseMixin):
router_bindings = [
binding
for binding in nsxv_db.get_nsxv_router_bindings(self.ctx.session)
if binding['edge_id'] is None
and binding['status'] == constants.PENDING_CREATE]
if binding['edge_id'] is None and
binding['status'] == constants.PENDING_CREATE]
binding_ids = [bind.router_id for bind in router_bindings]
self.assertEqual(2, len(router_bindings))

View File

@ -446,8 +446,8 @@ class VcnsDriverTestCase(base.BaseTestCase):
natcfg = self.vcns_driver.get_nat_config(self.edge_id)
rules = natcfg['rules']['natRulesDtos']
self.assertEqual(2 * len(indices) * len(dnats)
+ len(indices) * len(snats), len(rules))
self.assertEqual(2 * len(indices) * len(dnats) +
len(indices) * len(snats), len(rules))
sorted_rules = sorted(rules, key=lambda k: k['vnic'])
for i in range(0, len(sorted_rules), 7):