Fix pep8 E128 warnings in non-test code

Reduces E128 warnings by ~260 to just ~900,
no way we're getting rid of all of them at once (or ever).
Files under neutron/tests still have a ton of E128 warnings.

Change-Id: I9137150ccf129bf443e33428267cd4bc9c323b54
Co-Authored-By: Akihiro Motoki <amotoki@gmail.com>
This commit is contained in:
Brian Haley 2019-02-21 16:20:45 -05:00 committed by Brian Haley
parent 773489af62
commit eaf990b2bc
104 changed files with 422 additions and 354 deletions

View File

@ -32,8 +32,8 @@ class L2AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
"""
def __init__(self, conf):
super(L2AgentExtensionsManager, self).__init__(conf,
L2_AGENT_EXT_MANAGER_NAMESPACE)
super(L2AgentExtensionsManager,
self).__init__(conf, L2_AGENT_EXT_MANAGER_NAMESPACE)
def handle_port(self, context, data):
"""Notify all agent extensions to handle port."""

View File

@ -321,8 +321,7 @@ class DvrLocalRouter(dvr_router_base.DvrRouterBase):
def _delete_gateway_device_if_exists(self, ns_ip_device, gw_ip_addr,
snat_idx):
try:
ns_ip_device.route.delete_gateway(gw_ip_addr,
table=snat_idx)
ns_ip_device.route.delete_gateway(gw_ip_addr, table=snat_idx)
except exceptions.DeviceNotFoundError:
pass

View File

@ -32,8 +32,8 @@ class L3AgentExtensionsManager(agent_ext_manager.AgentExtensionsManager):
"""Manage l3 agent extensions."""
def __init__(self, conf):
super(L3AgentExtensionsManager, self).__init__(conf,
L3_AGENT_EXT_MANAGER_NAMESPACE)
super(L3AgentExtensionsManager,
self).__init__(conf, L3_AGENT_EXT_MANAGER_NAMESPACE)
def add_router(self, context, data):
"""Notify all agent extensions to add router."""

View File

@ -587,7 +587,8 @@ class RouterInfo(object):
interface_name = self.get_internal_device_name(p['id'])
self.agent.pd.enable_subnet(self.router_id, subnet['id'],
subnet['cidr'],
interface_name, p['mac_address'])
interface_name,
p['mac_address'])
if (subnet['cidr'] !=
lib_constants.PROVISIONAL_IPV6_PD_PREFIX):
self.pd_subnets[subnet['id']] = subnet['cidr']

View File

@ -185,8 +185,8 @@ class IpConntrackManager(object):
r'.* -j CT --zone (?P<zone>\d+).*', rule)
if match:
# strip off any prefix that the interface is using
short_port_id = (match.group('dev')
[n_const.LINUX_DEV_PREFIX_LEN:])
short_port_id = (
match.group('dev')[n_const.LINUX_DEV_PREFIX_LEN:])
self._device_zone_map[short_port_id] = int(match.group('zone'))
LOG.debug("Populated conntrack zone map: %s", self._device_zone_map)

View File

@ -72,7 +72,8 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
CONNTRACK_ZONE_PER_PORT = False
def __init__(self, namespace=None):
self.iptables = iptables_manager.IptablesManager(state_less=True,
self.iptables = iptables_manager.IptablesManager(
state_less=True,
use_ipv6=ipv6_utils.is_enabled_and_bind_by_default(),
namespace=namespace)
# TODO(majopela, shihanzhang): refactor out ipset to a separate
@ -722,8 +723,9 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
if (is_port and rule_protocol in constants.IPTABLES_PROTOCOL_MAP):
# iptables adds '-m protocol' when the port number is specified
iptables_rule += ['-m',
constants.IPTABLES_PROTOCOL_MAP[rule_protocol]]
iptables_rule += [
'-m', constants.IPTABLES_PROTOCOL_MAP[rule_protocol]
]
return iptables_rule
def _port_arg(self, direction, protocol, port_range_min, port_range_max):

View File

@ -326,7 +326,8 @@ class ConjIPFlowManager(object):
# no address overlaps.
addr_to_conj = self._build_addr_conj_id_map(
ethertype, sg_conj_id_map)
self._update_flows_for_vlan_subr(direction, ethertype, vlan_tag,
self._update_flows_for_vlan_subr(
direction, ethertype, vlan_tag,
self.flow_state[vlan_tag][(direction, ethertype)],
addr_to_conj)
self.flow_state[vlan_tag][(direction, ethertype)] = addr_to_conj

View File

@ -102,16 +102,24 @@ class DaemonMonitor(object):
if not v6_subnets:
continue
ra_modes = {subnet['ipv6_ra_mode'] for subnet in v6_subnets}
auto_config_prefixes = [subnet['cidr'] for subnet in v6_subnets if
subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC or
subnet['ipv6_ra_mode'] == constants.DHCPV6_STATELESS]
stateful_config_prefixes = [subnet['cidr'] for subnet in v6_subnets
if subnet['ipv6_ra_mode'] == constants.DHCPV6_STATEFUL]
auto_config_prefixes = [
subnet['cidr'] for subnet in v6_subnets
if (subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC or
subnet['ipv6_ra_mode'] == constants.DHCPV6_STATELESS)
]
stateful_config_prefixes = [
subnet['cidr'] for subnet in v6_subnets
if subnet['ipv6_ra_mode'] == constants.DHCPV6_STATEFUL
]
interface_name = self._dev_name_helper(p['id'])
slaac_subnets = [subnet for subnet in v6_subnets if
subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC]
dns_servers = list(iter_chain(*[subnet['dns_nameservers'] for
subnet in slaac_subnets if subnet.get('dns_nameservers')]))
slaac_subnets = [
subnet for subnet in v6_subnets
if subnet['ipv6_ra_mode'] == constants.IPV6_SLAAC
]
dns_servers = list(iter_chain(*[
subnet['dns_nameservers'] for subnet in slaac_subnets
if subnet.get('dns_nameservers')
]))
network_mtu = p.get('mtu', 0)
buf.write('%s' % CONFIG_TEMPLATE.render(

View File

@ -162,8 +162,10 @@ class MetadataProxyHandler(object):
ports = self._get_ports(remote_address, network_id, router_id)
LOG.debug("Gotten ports for remote_address %(remote_address)s, "
"network_id %(network_id)s, router_id %(router_id)s are: "
"%(ports)s", {"remote_address": remote_address,
"network_id": network_id, "router_id": router_id,
"%(ports)s",
{"remote_address": remote_address,
"network_id": network_id,
"router_id": router_id,
"ports": ports})
if len(ports) == 1:

View File

@ -102,8 +102,7 @@ class DVRServerRpcCallback(object):
host = kwargs.get('host')
subnet = kwargs.get('subnet')
LOG.debug("DVR Agent requests list of VM ports on host %s", host)
return self.plugin.get_ports_on_host_by_subnet(context,
host, subnet)
return self.plugin.get_ports_on_host_by_subnet(context, host, subnet)
def get_subnet_for_dvr(self, context, **kwargs):
fixed_ips = kwargs.get('fixed_ips')

View File

@ -230,11 +230,10 @@ class L3RpcCallback(object):
# of hosts on which DVR router interfaces are spawned). Such
# bindings are created/updated here by invoking
# update_distributed_port_binding
self.plugin.update_distributed_port_binding(context, port['id'],
{'port':
{portbindings.HOST_ID: host,
'device_id': router_id}
})
self.plugin.update_distributed_port_binding(
context, port['id'],
{'port': {portbindings.HOST_ID: host,
'device_id': router_id}})
def get_external_network_id(self, context, **kwargs):
"""Get one external network id for l3 agent.
@ -305,7 +304,8 @@ class L3RpcCallback(object):
admin_ctx, network_id, host)
self._ensure_host_set_on_port(admin_ctx, host, agent_port)
LOG.debug('Agent Gateway port returned : %(agent_port)s with '
'host %(host)s', {'agent_port': agent_port,
'host %(host)s',
{'agent_port': agent_port,
'host': host})
return agent_port

View File

@ -94,7 +94,8 @@ class ResourcesPullRpcApi(object):
def pull(self, context, resource_type, resource_id):
resource_type_cls = _resource_to_class(resource_type)
cctxt = self.client.prepare()
primitive = cctxt.call(context, 'pull',
primitive = cctxt.call(
context, 'pull',
resource_type=resource_type,
version=resource_type_cls.VERSION, resource_id=resource_id)
@ -107,7 +108,8 @@ class ResourcesPullRpcApi(object):
def bulk_pull(self, context, resource_type, filter_kwargs=None):
resource_type_cls = _resource_to_class(resource_type)
cctxt = self.client.prepare()
primitives = cctxt.call(context, 'bulk_pull',
primitives = cctxt.call(
context, 'bulk_pull',
resource_type=resource_type,
version=resource_type_cls.VERSION, filter_kwargs=filter_kwargs)
return [resource_type_cls.clean_obj_from_primitive(primitive)

View File

@ -327,8 +327,8 @@ class Controller(object):
fields_to_strip += self._exclude_attributes_by_policy(
request.context, obj_list[0])
collection = {self._collection:
[self._filter_attributes(obj,
fields_to_strip=fields_to_strip)
[self._filter_attributes(
obj, fields_to_strip=fields_to_strip)
for obj in obj_list]}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:

View File

@ -289,7 +289,8 @@ class KeepalivedIPv6Test(object):
self.config_path = tempfile.mkdtemp()
# Instantiate keepalived manager with the IPv6 configuration.
self.manager = keepalived.KeepalivedManager('router1', self.config,
self.manager = keepalived.KeepalivedManager(
'router1', self.config,
namespace=self.nsname, process_monitor=self.pm,
conf_path=self.config_path)
self.manager.spawn()

View File

@ -35,7 +35,8 @@ class CoreChecks(base.BaseChecks):
"defined in config"))
else:
return upgradecheck.Result(
upgradecheck.Code.WARNING, _("The default number of workers "
upgradecheck.Code.WARNING,
_("The default number of workers "
"has changed. Please see release notes for the new values, "
"but it is strongly encouraged for deployers to manually set "
"the values for api_workers and rpc_workers."))
"but it is strongly encouraged for deployers to manually "
"set the values for api_workers and rpc_workers."))

View File

@ -39,12 +39,14 @@ designate_opts = [
'context')),
cfg.BoolOpt('allow_reverse_dns_lookup', default=True,
help=_('Allow the creation of PTR records')),
cfg.IntOpt('ipv4_ptr_zone_prefix_size', default=24,
cfg.IntOpt(
'ipv4_ptr_zone_prefix_size', default=24,
help=_('Number of bits in an ipv4 PTR zone that will be considered '
'network prefix. It has to align to byte boundary. Minimum '
'value is 8. Maximum value is 24. As a consequence, range '
'of values is 8, 16 and 24')),
cfg.IntOpt('ipv6_ptr_zone_prefix_size', default=120,
cfg.IntOpt(
'ipv6_ptr_zone_prefix_size', default=120,
help=_('Number of bits in an ipv6 PTR zone that will be considered '
'network prefix. It has to align to nyble boundary. Minimum '
'value is 4. Maximum value is 124. As a consequence, range '
@ -59,6 +61,6 @@ designate_opts = [
def register_designate_opts(CONF=cfg.CONF):
CONF.register_opts(designate_opts, 'designate')
loading.register_auth_conf_options(CONF, 'designate')
loading.register_session_conf_options(conf=CONF,
group='designate',
loading.register_session_conf_options(
conf=CONF, group='designate',
deprecated_opts={'cafile': [cfg.DeprecatedOpt('ca_cert')]})

View File

@ -21,7 +21,8 @@ class DataPlaneStatusMixin(object):
"""Mixin class to add data plane status to a port"""
def _process_create_port_data_plane_status(self, context, data, res):
obj = dps_obj.PortDataPlaneStatus(context, port_id=res['id'],
obj = dps_obj.PortDataPlaneStatus(
context, port_id=res['id'],
data_plane_status=data[dps_lib.DATA_PLANE_STATUS])
obj.create()
res[dps_lib.DATA_PLANE_STATUS] = data[dps_lib.DATA_PLANE_STATUS]

View File

@ -711,8 +711,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
return
external_gateway_info['external_fixed_ips'].append(
{'subnet_id': subnet['id']})
info = {'router': {'external_gateway_info':
external_gateway_info}}
info = {'router': {'external_gateway_info': external_gateway_info}}
l3plugin.update_router(context, router_id, info)
@db_api.retry_if_session_inactive()
@ -724,8 +723,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
# If this subnet supports auto-addressing, then update any
# internal ports on the network with addresses for this subnet.
if ipv6_utils.is_auto_address_subnet(result):
updated_ports = self.ipam.add_auto_addrs_on_network_ports(context,
result, ipam_subnet)
updated_ports = self.ipam.add_auto_addrs_on_network_ports(
context, result, ipam_subnet)
for port_id in updated_ports:
port_info = {'port': {'id': port_id}}
try:
@ -1379,12 +1378,12 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
self._enforce_device_owner_not_router_intf_or_device_id(
context, pdata.get('device_owner'),
pdata.get('device_id'), pdata.get('tenant_id'))
bulk_port_data.append(dict(project_id=pdata.get('project_id'),
bulk_port_data.append(dict(
project_id=pdata.get('project_id'),
name=pdata.get('name'),
network_id=pdata.get('network_id'),
admin_state_up=pdata.get('admin_state_up'),
status=pdata.get('status',
constants.PORT_STATUS_ACTIVE),
status=pdata.get('status', constants.PORT_STATUS_ACTIVE),
mac_address=pdata.get('mac_address'),
device_id=pdata.get('device_id'),
device_owner=pdata.get('device_owner'),

View File

@ -92,7 +92,8 @@ class DNSDbMixin(object):
context, floatingip_data, req_data))
dns_actions_data = None
if current_dns_name and current_dns_domain:
fip_obj.FloatingIPDNS(context,
fip_obj.FloatingIPDNS(
context,
floatingip_id=floatingip_data['id'],
dns_name=req_data[dns_apidef.DNSNAME],
dns_domain=req_data[dns_apidef.DNSDOMAIN],
@ -149,7 +150,8 @@ class DNSDbMixin(object):
else:
return
if current_dns_name and current_dns_domain:
fip_obj.FloatingIPDNS(context,
fip_obj.FloatingIPDNS(
context,
floatingip_id=floatingip_data['id'],
dns_name='',
dns_domain='',

View File

@ -222,8 +222,8 @@ class FlavorsDbMixin(common_db_mixin.CommonDbMixin):
marker=None, page_reverse=False):
"""From flavor, choose service profile and find provider for driver."""
objs = obj_flavor.FlavorServiceProfileBinding.get_objects(context,
flavor_id=flavor_id)
objs = obj_flavor.FlavorServiceProfileBinding.get_objects(
context, flavor_id=flavor_id)
if not objs:
raise flav_exc.FlavorServiceProfileBindingNotFound(
sp_id='', fl_id=flavor_id)

View File

@ -125,7 +125,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
if _combine(route) == route_str:
route.delete()
for route_str in new_route_set - old_route_set:
route = subnet_obj.Route(context,
route = subnet_obj.Route(
context,
destination=common_utils.AuthenticIPNetwork(
route_str.partition("_")[0]),
nexthop=netaddr.IPAddress(route_str.partition("_")[2]),

View File

@ -437,8 +437,8 @@ class IpamPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
port_copy = copy.deepcopy(original)
port_copy.update(new_port)
port_copy['fixed_ips'] = auto_assign_subnets
self.allocate_ips_for_port_and_store(context,
{'port': port_copy}, port_copy['id'])
self.allocate_ips_for_port_and_store(
context, {'port': port_copy}, port_copy['id'])
getattr(db_port, 'fixed_ips') # refresh relationship before return

View File

@ -262,8 +262,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
new_hosts = [agent['host'] for agent in new_agents]
router = self.get_router(context, router_id)
for host in set(old_hosts) - set(new_hosts):
retain_router = self._check_router_retain_needed(context,
router, host)
retain_router = self._check_router_retain_needed(
context, router, host)
if retain_router:
l3_notifier.routers_updated_on_host(
context, [router_id], host)
@ -362,7 +362,8 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
record_objs = rb_obj.RouterL3AgentBinding.get_objects(
context, router_id=router_ids)
if admin_state_up is not None:
l3_agents = ag_obj.Agent.get_objects(context,
l3_agents = ag_obj.Agent.get_objects(
context,
id=[obj.l3_agent_id for obj in record_objs],
admin_state_up=admin_state_up)
else:

View File

@ -471,7 +471,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
def _create_gw_port(self, context, router_id, router, new_network_id,
ext_ips):
new_valid_gw_port_attachment = (
new_network_id and (not router.gw_port or
new_network_id and
(not router.gw_port or
router.gw_port['network_id'] != new_network_id))
if new_valid_gw_port_attachment:
subnets = self._core_plugin.get_subnets_by_network(context,
@ -840,9 +841,11 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
if port:
fixed_ips = list(map(dict, port['port']['fixed_ips']))
fixed_ips.append(fixed_ip)
return self._core_plugin.update_port(context,
port['port_id'], {'port':
{'fixed_ips': fixed_ips}}), [subnet], False
return (self._core_plugin.update_port(
context, port['port_id'],
{'port': {'fixed_ips': fixed_ips}}),
[subnet],
False)
port_data = {'tenant_id': router.tenant_id,
'network_id': subnet['network_id'],
@ -1041,9 +1044,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# multiple prefix port - delete prefix from port
fixed_ips = [dict(fip) for fip in p['fixed_ips']
if fip['subnet_id'] != subnet_id]
self._core_plugin.update_port(context, p['id'],
{'port':
{'fixed_ips': fixed_ips}})
self._core_plugin.update_port(
context, p['id'], {'port': {'fixed_ips': fixed_ips}})
return (p, [subnet])
elif subnet_id in port_subnets:
# only one subnet on port - delete the port
@ -1133,8 +1135,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
internal_subnet_id,
external_network_id):
subnet = self._core_plugin.get_subnet(context, internal_subnet_id)
return self.get_router_for_floatingip(context,
internal_port, subnet, external_network_id)
return self.get_router_for_floatingip(
context, internal_port, subnet, external_network_id)
# NOTE(yamamoto): This method is an override point for plugins
# inheriting this class. Do not optimize this out.
@ -1282,7 +1284,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
fixed_ip_address=netaddr.IPAddress(internal_ip_address))
if fip_exists:
floating_ip_address = (str(floatingip_obj.floating_ip_address)
if floatingip_obj.floating_ip_address else None)
if floatingip_obj.floating_ip_address
else None)
raise l3_exc.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_obj.id,

View File

@ -1098,7 +1098,7 @@ class _DVRAgentInterfaceMixin(object):
def _get_address_pair_active_port_with_fip(
self, context, port_dict, port_addr_pair_ip):
port_valid_state = (port_dict['admin_state_up'] or
(port_dict['status'] == const.PORT_STATUS_ACTIVE))
port_dict['status'] == const.PORT_STATUS_ACTIVE)
if not port_valid_state:
return
fips = l3_obj.FloatingIP.get_objects(

View File

@ -234,7 +234,9 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
def get_number_of_agents_for_scheduling(self, context):
"""Return number of agents on which the router will be scheduled."""
num_agents = len(self.get_l3_agents(context, active=True,
num_agents = len(
self.get_l3_agents(
context, active=True,
filters={'agent_modes': [constants.L3_AGENT_MODE_LEGACY,
constants.L3_AGENT_MODE_DVR_SNAT]}))
max_agents = cfg.CONF.max_l3_agents_per_router
@ -257,8 +259,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
port_id=port_id,
router_id=router_id,
port_type=constants.DEVICE_OWNER_ROUTER_HA_INTF).create()
portbinding = l3_hamode.L3HARouterAgentPortBinding(context,
port_id=port_id, router_id=router_id)
portbinding = l3_hamode.L3HARouterAgentPortBinding(
context, port_id=port_id, router_id=router_id)
portbinding.create()
return portbinding
@ -671,8 +673,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
sync_data = self._get_dvr_sync_data(context, host, agent,
router_ids, active)
else:
sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(context,
router_ids, active)
sync_data = super(L3_HA_NAT_db_mixin, self).get_sync_data(
context, router_ids, active)
return self._process_sync_ha_data(
context, sync_data, host, dvr_agent_mode)
@ -703,7 +705,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
constants.DEVICE_OWNER_ROUTER_SNAT,
constants.DEVICE_OWNER_ROUTER_GW]}
ports = self._core_plugin.get_ports(admin_ctx, filters=device_filter)
active_ports = (port for port in ports
active_ports = (
port for port in ports
if states[port['device_id']] == n_const.HA_ROUTER_STATE_ACTIVE)
for port in active_ports:

View File

@ -196,7 +196,8 @@ def upgrade():
sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
sa.PrimaryKeyConstraint('router_id'))
op.create_table('cisco_hosting_devices',
op.create_table(
'cisco_hosting_devices',
sa.Column('tenant_id', sa.String(length=255), nullable=True,
index=True),
sa.Column('id', sa.String(length=36), nullable=False),
@ -213,7 +214,8 @@ def upgrade():
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cisco_port_mappings',
op.create_table(
'cisco_port_mappings',
sa.Column('logical_resource_id', sa.String(length=36), nullable=False),
sa.Column('logical_port_id', sa.String(length=36), nullable=False),
sa.Column('port_type', sa.String(length=32), nullable=True),
@ -227,7 +229,8 @@ def upgrade():
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id')
)
op.create_table('cisco_router_mappings',
op.create_table(
'cisco_router_mappings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('auto_schedule', sa.Boolean(), nullable=False),
sa.Column('hosting_device_id', sa.String(length=36), nullable=True),

View File

@ -43,6 +43,7 @@ def upgrade():
constants.INGRESS_DIRECTION,
name='directions'),
nullable=False, server_default=constants.EGRESS_DIRECTION),
sa.UniqueConstraint('qos_policy_id', 'direction',
sa.UniqueConstraint(
'qos_policy_id', 'direction',
name='qos_minimum_bandwidth_rules0qos_policy_id0direction')
)

View File

@ -22,7 +22,8 @@ down_revision = '45f8dd33480b'
def upgrade():
op.create_table('trunks',
op.create_table(
'trunks',
sa.Column('admin_state_up', sa.Boolean(),
nullable=False, server_default=sql.true()),
sa.Column('tenant_id', sa.String(length=255), nullable=True,
@ -42,7 +43,8 @@ def upgrade():
sa.UniqueConstraint('port_id'),
sa.UniqueConstraint('standard_attr_id')
)
op.create_table('subports',
op.create_table(
'subports',
sa.Column('port_id', sa.String(length=36)),
sa.Column('trunk_id', sa.String(length=36), nullable=False),
sa.Column('segmentation_type', sa.String(length=32), nullable=False),
@ -51,6 +53,7 @@ def upgrade():
sa.ForeignKeyConstraint(['trunk_id'], ['trunks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id'),
sa.UniqueConstraint('trunk_id', 'segmentation_type', 'segmentation_id',
sa.UniqueConstraint(
'trunk_id', 'segmentation_type', 'segmentation_id',
name='uniq_subport0trunk_id0segmentation_type0segmentation_id')
)

View File

@ -30,7 +30,8 @@ down_revision = '030a959ceafa'
def upgrade():
op.create_table('subnet_service_types',
op.create_table(
'subnet_service_types',
sa.Column('subnet_id', sa.String(length=36)),
sa.Column('service_type', sa.String(length=255)),
sa.ForeignKeyConstraint(['subnet_id'], ['subnets.id'],

View File

@ -31,7 +31,8 @@ down_revision = '0ff9e3881597'
def upgrade():
op.create_table('securitygrouprbacs',
op.create_table(
'securitygrouprbacs',
sa.Column('project_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('target_tenant', sa.String(length=255), nullable=False),

View File

@ -24,7 +24,8 @@ class Flavor(model_base.BASEV2, model_base.HasId):
# Make it True for multi-type flavors
service_type = sa.Column(sa.String(36), nullable=True)
service_profiles = orm.relationship("FlavorServiceProfileBinding",
cascade="all, delete-orphan", lazy="subquery")
cascade="all, delete-orphan",
lazy="subquery")
class ServiceProfile(model_base.BASEV2, model_base.HasId):

View File

@ -188,8 +188,8 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
fields)
if (fields is None or len(fields) == 0 or
'security_group_rules' in fields):
rules = self.get_security_group_rules(context,
{'security_group_id': [id]})
rules = self.get_security_group_rules(
context, {'security_group_id': [id]})
ret['security_group_rules'] = rules
finally:
@ -318,8 +318,8 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
def _create_port_security_group_binding(self, context, port_id,
security_group_id):
with db_api.CONTEXT_WRITER.using(context):
db = sg_models.SecurityGroupPortBinding(port_id=port_id,
security_group_id=security_group_id)
db = sg_models.SecurityGroupPortBinding(
port_id=port_id, security_group_id=security_group_id)
context.session.add(db)
def _get_port_security_group_bindings(self, context,
@ -427,7 +427,8 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
id=sg_rule.id)
res_rule_dict = self._make_security_group_rule_dict(sg_rule.db_obj)
kwargs['security_group_rule'] = res_rule_dict
self._registry_notify(resources.SECURITY_GROUP_RULE,
self._registry_notify(
resources.SECURITY_GROUP_RULE,
events.PRECOMMIT_CREATE,
exc_cls=ext_sg.SecurityGroupConflict, **kwargs)
return res_rule_dict
@ -800,10 +801,12 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
sg_objs = sg_obj.SecurityGroup.get_objects(context, id=port_sg)
valid_groups = set(g.id for g in sg_objs
if not tenant_id or g.tenant_id == tenant_id or
sg_obj.SecurityGroup.is_shared_with_tenant(context,
g.id, tenant_id))
valid_groups = set(
g.id for g in sg_objs
if (not tenant_id or g.tenant_id == tenant_id or
sg_obj.SecurityGroup.is_shared_with_tenant(
context, g.id, tenant_id))
)
requested_groups = set(port_sg)
port_sg_missing = requested_groups - valid_groups

View File

@ -44,9 +44,8 @@ class ServiceTypeManager(object):
def get_service_providers(self, context, filters=None, fields=None):
if filters and 'service_type' in filters:
return list(
chain.from_iterable(self.config[svc_type].
get_service_providers(filters, fields)
return list(chain.from_iterable(
self.config[svc_type].get_service_providers(filters, fields)
for svc_type in filters['service_type']
if svc_type in self.config)
)
@ -76,7 +75,8 @@ class ServiceTypeManager(object):
def add_resource_association(self, context, service_type, provider_name,
resource_id):
r = self.get_service_providers(context,
filters={'service_type': [service_type], 'name': [provider_name]})
filters={'service_type': [service_type],
'name': [provider_name]})
if not r:
raise pconf.ServiceProviderNotFound(provider=provider_name,
service_type=service_type)

View File

@ -20,7 +20,8 @@ class UplinkStatusPropagationMixin(object):
"""Mixin class to add uplink propagation to a port"""
def _process_create_port(self, context, data, res):
obj = usp_obj.PortUplinkStatusPropagation(context, port_id=res['id'],
obj = usp_obj.PortUplinkStatusPropagation(
context, port_id=res['id'],
propagate_uplink_status=data[usp.PROPAGATE_UPLINK_STATUS])
obj.create()
res[usp.PROPAGATE_UPLINK_STATUS] = data[usp.PROPAGATE_UPLINK_STATUS]

View File

@ -58,12 +58,13 @@ validators.add_validator('type:validate_subnet_service_types',
EXTENDED_ATTRIBUTES_2_0 = {
subnet_def.COLLECTION_NAME: {
'service_types': {'allow_post': True,
'service_types': {
'allow_post': True,
'allow_put': True,
'default': constants.ATTR_NOT_SPECIFIED,
'validate': {'type:validate_subnet_service_types':
None},
'is_visible': True, },
'validate': {'type:validate_subnet_service_types': None},
'is_visible': True,
},
},
}

View File

@ -64,8 +64,8 @@ class IpamSubnetManager(object):
:param context: neutron api request context
:param neutron_subnet_id: neutron subnet id associated with ipam subnet
"""
return ipam_objs.IpamSubnet.delete_objects(context,
neutron_subnet_id=neutron_subnet_id)
return ipam_objs.IpamSubnet.delete_objects(
context, neutron_subnet_id=neutron_subnet_id)
def create_pool(self, context, pool_start, pool_end):
"""Create an allocation pool for the subnet.

View File

@ -187,8 +187,8 @@ class NeutronDbSubnet(ipam_base.Subnet):
allocated_num_addresses = requested_num_addresses
if prefer_next:
allocated_ip_pool = list(itertools.islice(av_set,
allocated_num_addresses))
allocated_ip_pool = list(itertools.islice(
av_set, allocated_num_addresses))
allocated_ips.extend([str(allocated_ip)
for allocated_ip in allocated_ip_pool])

View File

@ -292,8 +292,8 @@ class Network(rbac_db.NeutronRbacObject):
def _set_dns_domain(self, dns_domain):
NetworkDNSDomain.delete_objects(self.obj_context, network_id=self.id)
if dns_domain:
NetworkDNSDomain(self.obj_context,
network_id=self.id, dns_domain=dns_domain).create()
NetworkDNSDomain(self.obj_context, network_id=self.id,
dns_domain=dns_domain).create()
self.dns_domain = dns_domain
self.obj_reset_changes(['dns_domain'])

View File

@ -279,13 +279,16 @@ class FloatingIP(base.NeutronDbObject):
def get_scoped_floating_ips(cls, context, router_ids):
query = context.session.query(l3.FloatingIP,
models_v2.SubnetPool.address_scope_id)
query = query.join(models_v2.Port,
query = query.join(
models_v2.Port,
l3.FloatingIP.fixed_port_id == models_v2.Port.id)
# Outer join of Subnet can cause each ip to have more than one row.
query = query.outerjoin(models_v2.Subnet,
query = query.outerjoin(
models_v2.Subnet,
models_v2.Subnet.network_id == models_v2.Port.network_id)
query = query.filter(models_v2.Subnet.ip_version == 4)
query = query.outerjoin(models_v2.SubnetPool,
query = query.outerjoin(
models_v2.SubnetPool,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id)
# Filter out on router_ids

View File

@ -60,19 +60,22 @@ def _apply_tag_filters(model, query, filters):
if 'tags' in filters:
tags = _get_tag_list(filters.pop('tags'))
first_tag = tags.pop(0)
query = query.join(tag_model.Tag,
query = query.join(
tag_model.Tag,
model.standard_attr_id == tag_model.Tag.standard_attr_id)
query = query.filter(tag_model.Tag.tag == first_tag)
for tag in tags:
tag_alias = aliased(tag_model.Tag)
query = query.join(tag_alias,
query = query.join(
tag_alias,
model.standard_attr_id == tag_alias.standard_attr_id)
query = query.filter(tag_alias.tag == tag)
if 'tags-any' in filters:
tags = _get_tag_list(filters.pop('tags-any'))
query = query.join(tag_model.Tag,
query = query.join(
tag_model.Tag,
model.standard_attr_id == tag_model.Tag.standard_attr_id)
query = query.filter(tag_model.Tag.tag.in_(tags))
@ -84,7 +87,8 @@ def _apply_tag_filters(model, query, filters):
for tag in tags:
tag_alias = aliased(tag_model.Tag)
subq = subq.join(tag_alias,
subq = subq.join(
tag_alias,
tag_model.Tag.standard_attr_id == tag_alias.standard_attr_id)
subq = subq.filter(tag_alias.tag == tag)

View File

@ -29,8 +29,8 @@ class ExceptionTranslationHook(hooks.PecanHook):
language = None
if state.request.accept_language:
all_languages = oslo_i18n.get_available_languages('neutron')
language = state.request.accept_language.lookup(all_languages,
default='fake_LANG')
language = state.request.accept_language.lookup(
all_languages, default='fake_LANG')
if language == 'fake_LANG':
language = None
exc = api_common.convert_exception_to_http_exc(e, faults.FAULT_MAP,

View File

@ -264,7 +264,8 @@ def generate_distributed_port_status(context, port_id):
def get_distributed_port_binding_by_host(context, port_id, host):
with db_api.CONTEXT_READER.using(context):
binding = (context.session.query(models.DistributedPortBinding).
binding = (
context.session.query(models.DistributedPortBinding).
filter(models.DistributedPortBinding.port_id.startswith(port_id),
models.DistributedPortBinding.host == host).first())
if not binding:

View File

@ -74,7 +74,8 @@ def get_agent_by_host(context, agent_host):
def _get_active_network_ports(context, network_id):
query = context.session.query(ml2_models.PortBinding,
agent_model.Agent)
query = query.join(agent_model.Agent,
query = query.join(
agent_model.Agent,
agent_model.Agent.host == ml2_models.PortBinding.host)
query = query.join(models_v2.Port)
query = query.options(orm.subqueryload(ml2_models.PortBinding.port))
@ -85,7 +86,8 @@ def _get_active_network_ports(context, network_id):
def _ha_router_interfaces_on_network_query(context, network_id):
query = context.session.query(models_v2.Port)
query = query.join(l3ha_model.L3HARouterAgentPortBinding,
query = query.join(
l3ha_model.L3HARouterAgentPortBinding,
l3ha_model.L3HARouterAgentPortBinding.router_id ==
models_v2.Port.device_id)
return query.filter(

View File

@ -522,8 +522,9 @@ class LinuxBridgeManager(amb.CommonAgentManagerBase):
ctx.reraise = False
return False
def _add_tap_interface(self, network_id, network_type, physical_network,
segmentation_id, tap_device_name, device_owner, mtu):
def _add_tap_interface(self, network_id,
network_type, physical_network, segmentation_id,
tap_device_name, device_owner, mtu):
"""Add tap interface.
If a VIF has been plugged into a network, this function will

View File

@ -380,7 +380,8 @@ class ESwitchManager(object):
# We don't know about this device at the moment, so add to the map.
if PciOsWrapper.pf_device_exists(dev_name):
self._create_emb_switch(phys_net, dev_name,
self._create_emb_switch(
phys_net, dev_name,
exclude_devices.get(dev_name, set()))
def discover_devices(self, device_mappings, exclude_devices):
@ -426,7 +427,8 @@ class ESwitchManager(object):
Clear the "rate" configuration from VF by setting it to 0.
@param pci_slot: VF PCI slot
"""
self._clear_rate(pci_slot,
self._clear_rate(
pci_slot,
ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_RATE)
def clear_min_tx_rate(self, pci_slot):
@ -435,7 +437,8 @@ class ESwitchManager(object):
Clear the "min_tx_rate" configuration from VF by setting it to 0.
@param pci_slot: VF PCI slot
"""
self._clear_rate(pci_slot,
self._clear_rate(
pci_slot,
ip_link_support.IpLinkConstants.IP_LINK_CAPABILITY_MIN_TX_RATE)
def _clear_rate(self, pci_slot, rate_type):

View File

@ -153,8 +153,8 @@ class SriovNicSwitchAgent(object):
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc)
self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc(
self.context, self.sg_plugin_rpc)
self._setup_rpc()
self.ext_manager = self._create_agent_extension_manager(
self.connection)

View File

@ -35,16 +35,16 @@ class OVSDVRProcessMixin(object):
def install_dvr_process_ipv4(self, vlan_tag, gateway_ip):
# block ARP
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_process_ipv4_match(ofp, ofpp,
vlan_tag=vlan_tag, gateway_ip=gateway_ip)
match = self._dvr_process_ipv4_match(ofp, ofpp, vlan_tag=vlan_tag,
gateway_ip=gateway_ip)
self.install_drop(table_id=self.dvr_process_table_id,
priority=3,
match=match)
def delete_dvr_process_ipv4(self, vlan_tag, gateway_ip):
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_process_ipv4_match(ofp, ofpp,
vlan_tag=vlan_tag, gateway_ip=gateway_ip)
match = self._dvr_process_ipv4_match(ofp, ofpp, vlan_tag=vlan_tag,
gateway_ip=gateway_ip)
self.uninstall_flows(table_id=self.dvr_process_table_id,
match=match)
@ -59,15 +59,15 @@ class OVSDVRProcessMixin(object):
def install_dvr_process_ipv6(self, vlan_tag, gateway_mac):
# block RA
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_process_ipv6_match(ofp, ofpp,
vlan_tag=vlan_tag, gateway_mac=gateway_mac)
match = self._dvr_process_ipv6_match(ofp, ofpp, vlan_tag=vlan_tag,
gateway_mac=gateway_mac)
self.install_drop(table_id=self.dvr_process_table_id, priority=3,
match=match)
def delete_dvr_process_ipv6(self, vlan_tag, gateway_mac):
(_dp, ofp, ofpp) = self._get_dp()
match = self._dvr_process_ipv6_match(ofp, ofpp,
vlan_tag=vlan_tag, gateway_mac=gateway_mac)
match = self._dvr_process_ipv6_match(ofp, ofpp, vlan_tag=vlan_tag,
gateway_mac=gateway_mac)
self.uninstall_flows(table_id=self.dvr_process_table_id,
match=match)

View File

@ -62,5 +62,6 @@ class OVSPhysicalBridge(ovs_bridge.OVSAgentBridge,
def remove_dvr_mac_vlan(self, mac):
# REVISIT(yamamoto): match in_port as well?
self.uninstall_flows(table_id=constants.DVR_NOT_LEARN_VLAN,
self.uninstall_flows(
table_id=constants.DVR_NOT_LEARN_VLAN,
eth_src=mac)

View File

@ -995,8 +995,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the nd_target can only
# match on /1 or more.
bridge.install_icmpv6_na_spoofing_protection(port=vif.ofport,
ip_addresses=ipv6_addresses)
bridge.install_icmpv6_na_spoofing_protection(
port=vif.ofport, ip_addresses=ipv6_addresses)
ipv4_addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 4}
@ -1222,8 +1222,8 @@ class OVSNeutronAgent(l2population_rpc.L2populationRpcCallBackTunnelMixin,
# be same, so check only one of them.
# Not logging error here, as the interface may not exist yet.
# Type check is done to cleanup wrong interface if any.
int_type = self.int_br.db_get_val("Interface",
int_if_name, "type", log_errors=False)
int_type = self.int_br.db_get_val("Interface", int_if_name, "type",
log_errors=False)
if self.use_veth_interconnection:
# Drop ports if the interface types doesn't match the
# configuration value.

View File

@ -55,9 +55,11 @@ class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
def __init__(self):
sg_enabled = securitygroups_rpc.is_firewall_enabled()
hybrid_plug_required = (not cfg.CONF.SECURITYGROUP.firewall_driver or
hybrid_plug_required = (
not cfg.CONF.SECURITYGROUP.firewall_driver or
cfg.CONF.SECURITYGROUP.firewall_driver in (
IPTABLES_FW_DRIVER_FULL, 'iptables_hybrid')) and sg_enabled
IPTABLES_FW_DRIVER_FULL, 'iptables_hybrid')
) and sg_enabled
vif_details = {portbindings.CAP_PORT_FILTER: sg_enabled,
portbindings.OVS_HYBRID_PLUG: hybrid_plug_required}
# NOTE(moshele): Bind DIRECT (SR-IOV) port allows
@ -163,9 +165,10 @@ class OpenvswitchMechanismDriver(mech_agent.SimpleAgentMechanismDriverBase):
if bridge_name:
vif_details[portbindings.VIF_DETAILS_BRIDGE_NAME] = bridge_name
registry.publish(a_const.OVS_BRIDGE_NAME, events.BEFORE_READ,
set_bridge_name_inner, payload=events.EventPayload(
None, metadata={'port': port}))
registry.publish(
a_const.OVS_BRIDGE_NAME, events.BEFORE_READ,
set_bridge_name_inner,
payload=events.EventPayload(None, metadata={'port': port}))
def _pre_get_vif_details(self, agent, context):
a_config = agent['configurations']

View File

@ -550,8 +550,8 @@ class TunnelRpcCallbackMixin(object):
host, ip_endpoint.ip_address)
elif (host_endpoint and host_endpoint.ip_address != tunnel_ip):
# Notify all other listening agents to delete stale tunnels
self._notifier.tunnel_delete(rpc_context,
host_endpoint.ip_address, tunnel_type)
self._notifier.tunnel_delete(
rpc_context, host_endpoint.ip_address, tunnel_type)
driver.obj.delete_endpoint(host_endpoint.ip_address)
tunnel = driver.obj.add_endpoint(tunnel_ip, host)

View File

@ -139,9 +139,11 @@ class DNSExtensionDriver(api.ExtensionDriver):
plugin_context,
port_id=db_data['id'])
if dns_data_db:
is_dns_name_changed = (dns_name is not None and
is_dns_name_changed = (
dns_name is not None and
dns_data_db[dns_apidef.DNSNAME] != dns_name)
is_dns_domain_changed = (dns_domain is not None and
is_dns_domain_changed = (
dns_domain is not None and
dns_data_db[dns_apidef.DNSDOMAIN] != dns_domain)
if (is_dns_name_changed or is_dns_domain_changed or
(has_fixed_ips and dns_data_db['current_dns_name'])):
@ -159,8 +161,8 @@ class DNSExtensionDriver(api.ExtensionDriver):
dns_data_db.update()
return dns_data_db
if dns_name or dns_domain:
dns_data_db = self._create_port_dns_record(plugin_context,
request_data, db_data, network, dns_name or '')
dns_data_db = self._create_port_dns_record(
plugin_context, request_data, db_data, network, dns_name or '')
return dns_data_db
def _populate_previous_external_dns_data(self, dns_data_db):

View File

@ -1371,8 +1371,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
if self._check_update_has_allowed_address_pairs(port):
# has address pairs in request
raise addr_exc.AddressPairAndPortSecurityRequired()
elif (not
self._check_update_deletes_allowed_address_pairs(port)):
elif not self._check_update_deletes_allowed_address_pairs(port):
# not a request for deleting the address-pairs
updated_port[addr_apidef.ADDRESS_PAIRS] = (
self.get_allowed_address_pairs(context, id))
@ -1386,8 +1385,7 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# security groups, port security is set
if self._check_update_has_security_groups(port):
raise psec_exc.PortSecurityAndIPRequiredForSecurityGroups()
elif (not
self._check_update_deletes_security_groups(port)):
elif not self._check_update_deletes_security_groups(port):
if not extensions.is_extension_supported(self, 'security-group'):
return
# Update did not have security groups passed in. Check
@ -1578,7 +1576,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
id, host)
device_id = attrs and attrs.get('device_id')
router_id = binding and binding.get('router_id')
update_required = (not binding or
update_required = (
not binding or
binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED or
router_id != device_id)
if update_required:
@ -1591,8 +1590,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
network = self.get_network(context,
orig_port['network_id'])
levels = db.get_binding_level_objs(context, id, host)
mech_context = driver_context.PortContext(self,
context, orig_port, network,
mech_context = driver_context.PortContext(
self, context, orig_port, network,
binding, levels, original_port=orig_port)
self._process_distributed_port_binding(
mech_context, context, attrs)
@ -2059,7 +2058,8 @@ class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
# change in segments could affect resulting network mtu, so let's
# recalculate it
network_db = self._get_network(context, network_id)
network_db.mtu = self._get_network_mtu(network_db,
network_db.mtu = self._get_network_mtu(
network_db,
validate=(event != events.PRECOMMIT_DELETE))
network_db.save(session=context.session)

View File

@ -176,8 +176,9 @@ class DhcpFilter(base_resource_filter.BaseResourceFilter):
# DetachedInstanceError
agent_id = agent.id
try:
network.NetworkDhcpAgentBinding(context,
dhcp_agent_id=agent_id, network_id=network_id).create()
network.NetworkDhcpAgentBinding(
context, dhcp_agent_id=agent_id,
network_id=network_id).create()
except exceptions.NeutronDbObjectDuplicateEntry:
# it's totally ok, someone just did our job!
bound_agents.remove(agent)

View File

@ -66,8 +66,8 @@ def _ensure_external_network_default_value_callback(
if is_default:
# ensure only one default external network at any given time
pager = base_obj.Pager(limit=1)
objs = net_obj.ExternalNetwork.get_objects(context,
_pager=pager, is_default=True)
objs = net_obj.ExternalNetwork.get_objects(context, _pager=pager,
is_default=True)
if objs:
if objs[0] and network['id'] != objs[0].network_id:
raise exceptions.DefaultExternalNetworkExists(

View File

@ -45,8 +45,9 @@ class NetworkIPAvailabilityPlugin(ip_availability_db.IpAvailabilityMixin,
def get_network_ip_availabilities(self, context, filters=None,
fields=None):
"""Returns ip availability data for a collection of networks."""
net_ip_availabilities = super(NetworkIPAvailabilityPlugin,
self).get_network_ip_availabilities(context, filters)
net_ip_availabilities = super(
NetworkIPAvailabilityPlugin, self
).get_network_ip_availabilities(context, filters)
return [db_utils.resource_fields(net_ip_availability, fields)
for net_ip_availability in net_ip_availabilities]

View File

@ -379,7 +379,8 @@ class NovaSegmentNotifier(object):
if segment_id:
if event == events.AFTER_DELETE:
ipv4_subnets_number = -ipv4_subnets_number
self.batch_notifier.queue_event(Event(self._update_nova_inventory,
self.batch_notifier.queue_event(
Event(self._update_nova_inventory,
segment_id, reserved=ipv4_subnets_number))
@registry.receives(resources.PORT, [events.AFTER_UPDATE])

View File

@ -116,6 +116,6 @@ class TagPlugin(common_db_mixin.CommonDbMixin, tagging.TagPluginBase):
@log_helpers.log_method_call
def delete_tag(self, context, resource, resource_id, tag):
res = self._get_resource(context, resource, resource_id)
if not tag_obj.Tag.delete_objects(context,
tag=tag, standard_attr_id=res.standard_attr_id):
if not tag_obj.Tag.delete_objects(
context, tag=tag, standard_attr_id=res.standard_attr_id):
raise tagging.TagNotFound(tag=tag)

View File

@ -120,7 +120,8 @@ class L3AgentTestFramework(base.BaseSudoTestCase):
enable_snat=enable_snat,
num_internal_ports=(
num_internal_ports),
enable_floating_ip=enable_fip,
enable_floating_ip=(
enable_fip),
enable_ha=enable_ha,
extra_routes=extra_routes,
dual_stack=dual_stack,
@ -375,7 +376,9 @@ class L3AgentTestFramework(base.BaseSudoTestCase):
ipv6_subnet_modes=None,
interface_id=None):
return l3_test_common.router_append_subnet(router, count,
ip_version, ipv6_subnet_modes, interface_id)
ip_version,
ipv6_subnet_modes,
interface_id)
def _namespace_exists(self, namespace):
return ip_lib.network_namespace_exists(namespace)

View File

@ -402,8 +402,7 @@ class KeepalivedTrackScriptTestCase(base.BaseTestCase):
keepalived.KeepalivedVirtualRoute('12.0.0.0/24', '10.0.0.1'), ]
self.assertEqual(''' track_script {
ha_health_check_1
}''',
ts.get_config_str())
}''', ts.get_config_str())
def test_get_script_str(self):
ts = keepalived.KeepalivedTrackScript(