Fix some pylint indentation warnings
Running with a stricter .pylintrc generates a lot of C0330 warnings (hanging/continued indentation). Fix the ones in neutron/db. Trivialfix Change-Id: I9311cfe5efc51552008072d84aa238e5d0c9de60
This commit is contained in:
parent
ba795c6692
commit
55b16d7b7c
|
@ -146,7 +146,7 @@ class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase):
|
|||
'name': k[0], 'resource': k[1],
|
||||
'tenant_id': context.tenant_id}
|
||||
for k, v in self._list_availability_zones(
|
||||
context, filters).items()
|
||||
context, filters).items()
|
||||
if not filter_states or v in filter_states]
|
||||
else:
|
||||
# NOTE(hichihara): 'tenant_id' is dummy for policy check.
|
||||
|
@ -155,7 +155,7 @@ class AgentAvailabilityZoneMixin(az_ext.AvailabilityZonePluginBase):
|
|||
'name': k[0], 'resource': k[1],
|
||||
'tenant_id': context.tenant_id}
|
||||
for k, v in self._list_availability_zones(
|
||||
context, filters).items()]
|
||||
context, filters).items()]
|
||||
|
||||
@db_api.retry_if_session_inactive()
|
||||
def validate_availability_zones(self, context, resource_type,
|
||||
|
|
|
@ -219,7 +219,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
|||
agent_dead_limit = datetime.timedelta(
|
||||
seconds=self.agent_dead_limit_seconds())
|
||||
network_count = network.NetworkDhcpAgentBinding.count(
|
||||
context, dhcp_agent_id=agent['id'])
|
||||
context, dhcp_agent_id=agent['id'])
|
||||
# amount of networks assigned to agent affect amount of time we give
|
||||
# it so startup. Tests show that it's more or less sage to assume
|
||||
# that DHCP agent processes each network in less than 2 seconds.
|
||||
|
@ -367,7 +367,7 @@ class DhcpAgentSchedulerDbMixin(dhcpagentscheduler
|
|||
# get all the NDAB objects, which will also fetch (from DB)
|
||||
# the related dhcp_agent objects because of the synthetic field
|
||||
bindings = network.NetworkDhcpAgentBinding.get_objects(
|
||||
context, network_id=network_ids)
|
||||
context, network_id=network_ids)
|
||||
# get the already fetched dhcp_agent objects
|
||||
agent_objs = [binding.db_obj.dhcp_agent for binding in bindings]
|
||||
# filter the dhcp_agent objects on admin_state_up
|
||||
|
|
|
@ -103,7 +103,7 @@ class AllowedAddressPairsMixin(object):
|
|||
|
||||
def _has_address_pairs(self, port):
|
||||
return (validators.is_attr_set(
|
||||
port['port'][addr_apidef.ADDRESS_PAIRS]) and
|
||||
port['port'][addr_apidef.ADDRESS_PAIRS]) and
|
||||
port['port'][addr_apidef.ADDRESS_PAIRS] != [])
|
||||
|
||||
def _check_update_has_allowed_address_pairs(self, port):
|
||||
|
|
|
@ -47,4 +47,4 @@ class DataPlaneStatusMixin(object):
|
|||
|
||||
if port_db.get(dps_lib.DATA_PLANE_STATUS):
|
||||
port_res[dps_lib.DATA_PLANE_STATUS] = (
|
||||
port_db[dps_lib.DATA_PLANE_STATUS].data_plane_status)
|
||||
port_db[dps_lib.DATA_PLANE_STATUS].data_plane_status)
|
||||
|
|
|
@ -168,7 +168,7 @@ class DbBasePluginCommon(object):
|
|||
if isinstance(subnet, subnet_obj.Subnet):
|
||||
res['cidr'] = str(subnet.cidr)
|
||||
res['allocation_pools'] = [{'start': str(pool.start),
|
||||
'end': str(pool.end)}
|
||||
'end': str(pool.end)}
|
||||
for pool in subnet.allocation_pools]
|
||||
res['host_routes'] = [{'destination': str(route.destination),
|
||||
'nexthop': str(route.nexthop)}
|
||||
|
@ -182,7 +182,7 @@ class DbBasePluginCommon(object):
|
|||
else:
|
||||
res['cidr'] = subnet['cidr']
|
||||
res['allocation_pools'] = [{'start': pool['first_ip'],
|
||||
'end': pool['last_ip']}
|
||||
'end': pool['last_ip']}
|
||||
for pool in subnet['allocation_pools']]
|
||||
res['host_routes'] = [{'destination': route['destination'],
|
||||
'nexthop': route['nexthop']}
|
||||
|
|
|
@ -239,7 +239,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||
with db_api.CONTEXT_READER.using(elevated):
|
||||
ports = model_query.query_with_hooks(
|
||||
elevated, models_v2.Port).filter(
|
||||
models_v2.Port.network_id == network_id)
|
||||
models_v2.Port.network_id == network_id)
|
||||
if tenant_id == '*':
|
||||
# for the wildcard we need to get all of the rbac entries to
|
||||
# see if any allow the remaining ports on the network.
|
||||
|
@ -476,8 +476,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||
def _ensure_network_not_in_use(self, context, net_id):
|
||||
non_auto_ports = context.session.query(
|
||||
models_v2.Port.id).filter_by(network_id=net_id).filter(
|
||||
~models_v2.Port.device_owner.in_(
|
||||
_constants.AUTO_DELETE_PORT_OWNERS))
|
||||
~models_v2.Port.device_owner.in_(
|
||||
_constants.AUTO_DELETE_PORT_OWNERS))
|
||||
if non_auto_ports.count():
|
||||
ports = [port.id for port in non_auto_ports.all()]
|
||||
reason = _("There are one or more ports still in use on the "
|
||||
|
@ -493,8 +493,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||
self._ensure_network_not_in_use(context, id)
|
||||
auto_delete_port_ids = [p.id for p in context.session.query(
|
||||
models_v2.Port.id).filter_by(network_id=id).filter(
|
||||
models_v2.Port.device_owner.in_(
|
||||
_constants.AUTO_DELETE_PORT_OWNERS))]
|
||||
models_v2.Port.device_owner.in_(
|
||||
_constants.AUTO_DELETE_PORT_OWNERS))]
|
||||
for port_id in auto_delete_port_ids:
|
||||
try:
|
||||
self.delete_port(context.elevated(), port_id)
|
||||
|
@ -748,7 +748,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||
fips[0]['ip_address']).version == subnet['ip_version']:
|
||||
return
|
||||
external_gateway_info['external_fixed_ips'].append(
|
||||
{'subnet_id': subnet['id']})
|
||||
{'subnet_id': subnet['id']})
|
||||
info = {'router': {'external_gateway_info': external_gateway_info}}
|
||||
l3plugin.update_router(ctx_admin, router_id, info)
|
||||
|
||||
|
@ -1053,7 +1053,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||
# Do not allow a subnet to be deleted if a router is attached to it
|
||||
sid = subnet['id']
|
||||
self._subnet_check_ip_allocations_internal_router_ports(
|
||||
context, sid)
|
||||
context, sid)
|
||||
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
|
||||
if not is_auto_addr_subnet:
|
||||
# we only automatically remove IP addresses from user ports if
|
||||
|
@ -1354,9 +1354,9 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||
raise exc.SubnetPoolNotFound(subnetpool_id=id)
|
||||
|
||||
subnets_to_onboard = subnet_obj.Subnet.get_objects(
|
||||
context,
|
||||
network_id=network_id,
|
||||
ip_version=subnetpool.ip_version)
|
||||
context,
|
||||
network_id=network_id,
|
||||
ip_version=subnetpool.ip_version)
|
||||
|
||||
self._onboard_network_subnets(context, subnets_to_onboard, subnetpool)
|
||||
|
||||
|
@ -1378,8 +1378,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||
subnetpool):
|
||||
allocated_prefix_set = netaddr.IPSet(
|
||||
[x.cidr for x in subnet_obj.Subnet.get_objects(
|
||||
context,
|
||||
subnetpool_id=subnetpool.id)])
|
||||
context,
|
||||
subnetpool_id=subnetpool.id)])
|
||||
prefixes_to_add = []
|
||||
|
||||
for subnet in subnets_to_onboard:
|
||||
|
@ -1405,8 +1405,8 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
|
|||
|
||||
def _check_mac_addr_update(self, context, port, new_mac, device_owner):
|
||||
if (device_owner and
|
||||
device_owner.startswith(
|
||||
constants.DEVICE_OWNER_NETWORK_PREFIX)):
|
||||
device_owner.startswith(
|
||||
constants.DEVICE_OWNER_NETWORK_PREFIX)):
|
||||
raise exc.UnsupportedPortDeviceOwner(
|
||||
op=_("mac address update"), port_id=id,
|
||||
device_owner=device_owner)
|
||||
|
|
|
@ -136,7 +136,7 @@ class External_net_db_mixin(object):
|
|||
net_obj.ExternalNetwork.delete_objects(
|
||||
context, network_id=net_id)
|
||||
net_obj.NetworkRBAC.delete_objects(
|
||||
context, object_id=net_id, action='access_as_external')
|
||||
context, object_id=net_id, action='access_as_external')
|
||||
net_data[extnet_apidef.EXTERNAL] = False
|
||||
|
||||
def _process_l3_delete(self, context, network_id):
|
||||
|
|
|
@ -62,7 +62,7 @@ class ExtraDhcpOptMixin(object):
|
|||
|
||||
def _get_port_extra_dhcp_opts_binding(self, context, port_id):
|
||||
opts = obj_extra_dhcp.ExtraDhcpOpt.get_objects(
|
||||
context, port_id=port_id)
|
||||
context, port_id=port_id)
|
||||
# TODO(mhickey): When port serilization is available then
|
||||
# the object list should be returned instead
|
||||
return [{'opt_name': r.opt_name, 'opt_value': r.opt_value,
|
||||
|
|
|
@ -155,7 +155,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
|
|||
subnet):
|
||||
super(ExtraRoute_dbonly_mixin,
|
||||
self)._confirm_router_interface_not_in_use(
|
||||
context, router_id, subnet)
|
||||
context, router_id, subnet)
|
||||
subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
|
||||
extra_routes = self._get_extra_routes_by_router_id(context, router_id)
|
||||
for route in extra_routes:
|
||||
|
@ -224,8 +224,8 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
|
|||
context,
|
||||
router_id,
|
||||
{'router':
|
||||
{'routes':
|
||||
self._add_extra_routes(old_routes, routes)}})
|
||||
{'routes':
|
||||
self._add_extra_routes(old_routes, routes)}})
|
||||
return {'router': router}
|
||||
|
||||
@db_api.retry_if_session_inactive()
|
||||
|
@ -241,8 +241,8 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
|
|||
context,
|
||||
router_id,
|
||||
{'router':
|
||||
{'routes':
|
||||
self._remove_extra_routes(old_routes, routes)}})
|
||||
{'routes':
|
||||
self._remove_extra_routes(old_routes, routes)}})
|
||||
return {'router': router}
|
||||
|
||||
|
||||
|
|
|
@ -196,7 +196,7 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
return updated_types
|
||||
|
||||
def update_db_subnet(self, context, subnet_id, s, oldpools,
|
||||
subnet_obj=None):
|
||||
subnet_obj=None):
|
||||
changes = {}
|
||||
if "dns_nameservers" in s:
|
||||
changes['dns_nameservers'] = (
|
||||
|
@ -243,8 +243,8 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
str(subnet['cidr']) != const.PROVISIONAL_IPV6_PD_PREFIX):
|
||||
# don't give out details of the overlapping subnet
|
||||
err_msg = (_("Requested subnet with cidr: %(cidr)s for "
|
||||
"network: %(network_id)s overlaps with another "
|
||||
"subnet") %
|
||||
"network: %(network_id)s overlaps with another "
|
||||
"subnet") %
|
||||
{'cidr': new_subnet_cidr,
|
||||
'network_id': network.id})
|
||||
LOG.info("Validation for CIDR: %(new_cidr)s failed - "
|
||||
|
@ -369,9 +369,9 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
to_create_subnet_id = None
|
||||
|
||||
segments = subnet_obj.Subnet.get_subnet_segment_ids(
|
||||
context, network_id,
|
||||
ignored_service_type=const.DEVICE_OWNER_ROUTED,
|
||||
subnet_id=to_create_subnet_id)
|
||||
context, network_id,
|
||||
ignored_service_type=const.DEVICE_OWNER_ROUTED,
|
||||
subnet_id=to_create_subnet_id)
|
||||
|
||||
associated_segments = set(segments)
|
||||
if None in associated_segments and len(associated_segments) > 1:
|
||||
|
@ -427,9 +427,9 @@ class IpamBackendMixin(db_base_plugin_common.DbBasePluginCommon):
|
|||
raise exc.InvalidInput(error_message=msg)
|
||||
# Ensure that the IP is valid on the subnet
|
||||
if ('ip_address' in fixed and
|
||||
not ipam_utils.check_subnet_ip(subnet['cidr'],
|
||||
fixed['ip_address'],
|
||||
fixed['device_owner'])):
|
||||
not ipam_utils.check_subnet_ip(subnet['cidr'],
|
||||
fixed['ip_address'],
|
||||
fixed['device_owner'])):
|
||||
raise exc.InvalidIpForSubnet(ip_address=fixed['ip_address'])
|
||||
return subnet
|
||||
|
||||
|
|
|
@ -63,13 +63,13 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||
def reschedule_routers_from_down_agents(self):
|
||||
"""Reschedule routers from down l3 agents if admin state is up."""
|
||||
self.reschedule_resources_from_down_agents(
|
||||
agent_type='L3',
|
||||
get_down_bindings=self.get_down_router_bindings,
|
||||
agent_id_attr='l3_agent_id',
|
||||
resource_id_attr='router_id',
|
||||
resource_name='router',
|
||||
reschedule_resource=self.reschedule_router,
|
||||
rescheduling_failed=l3agentscheduler.RouterReschedulingFailed)
|
||||
agent_type='L3',
|
||||
get_down_bindings=self.get_down_router_bindings,
|
||||
agent_id_attr='l3_agent_id',
|
||||
resource_id_attr='router_id',
|
||||
resource_name='router',
|
||||
reschedule_resource=self.reschedule_router,
|
||||
rescheduling_failed=l3agentscheduler.RouterReschedulingFailed)
|
||||
|
||||
def get_down_router_bindings(self, context, agent_dead_limit):
|
||||
cutoff = self.get_cutoff_time(agent_dead_limit)
|
||||
|
@ -225,7 +225,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||
|
||||
def _unbind_router(self, context, router_id, agent_id):
|
||||
rb_obj.RouterL3AgentBinding.delete_objects(
|
||||
context, router_id=router_id, l3_agent_id=agent_id)
|
||||
context, router_id=router_id, l3_agent_id=agent_id)
|
||||
|
||||
def _unschedule_router(self, context, router_id, agents_ids):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
|
@ -284,7 +284,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||
|
||||
def list_routers_on_l3_agent(self, context, agent_id):
|
||||
binding_objs = rb_obj.RouterL3AgentBinding.get_objects(
|
||||
context, l3_agent_id=agent_id)
|
||||
context, l3_agent_id=agent_id)
|
||||
|
||||
router_ids = [item.router_id for item in binding_objs]
|
||||
if router_ids:
|
||||
|
@ -366,7 +366,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||
if not router_ids:
|
||||
return []
|
||||
record_objs = rb_obj.RouterL3AgentBinding.get_objects(
|
||||
context, router_id=router_ids)
|
||||
context, router_id=router_ids)
|
||||
if admin_state_up is not None:
|
||||
l3_agents = ag_obj.Agent.get_objects(
|
||||
context,
|
||||
|
@ -456,7 +456,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||
agent_mode = agent_conf.get(constants.L3_AGENT_MODE,
|
||||
constants.L3_AGENT_MODE_LEGACY)
|
||||
if (agent_mode == constants.L3_AGENT_MODE_DVR or
|
||||
agent_mode == constants.L3_AGENT_MODE_DVR_NO_EXTERNAL or
|
||||
agent_mode == constants.L3_AGENT_MODE_DVR_NO_EXTERNAL or
|
||||
(agent_mode == constants.L3_AGENT_MODE_LEGACY and
|
||||
is_router_distributed)):
|
||||
continue
|
||||
|
@ -494,7 +494,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||
if not agent_ids:
|
||||
return None
|
||||
agents = ag_obj.Agent.get_l3_agent_with_min_routers(
|
||||
context, agent_ids)
|
||||
context, agent_ids)
|
||||
return agents
|
||||
|
||||
def get_hosts_to_notify(self, context, router_id):
|
||||
|
@ -519,7 +519,7 @@ class L3AgentSchedulerDbMixin(l3agentscheduler.L3AgentSchedulerPluginBase,
|
|||
|
||||
pager = base_obj.Pager(sorts=[('binding_index', True)])
|
||||
bindings = rb_obj.RouterL3AgentBinding.get_objects(
|
||||
context, _pager=pager, router_id=router_id)
|
||||
context, _pager=pager, router_id=router_id)
|
||||
return base_scheduler.get_vacant_binding_index(
|
||||
num_agents, bindings, rb_model.LOWEST_BINDING_INDEX,
|
||||
force_scheduling=is_manual_scheduling)
|
||||
|
|
|
@ -118,7 +118,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
if (subnet['ip_version'] == 6 and subnet['ipv6_ra_mode'] is None and
|
||||
subnet['ipv6_address_mode'] is not None):
|
||||
msg = (_('IPv6 subnet %s configured to receive RAs from an '
|
||||
'external router cannot be added to Neutron Router.') %
|
||||
'external router cannot be added to Neutron Router.') %
|
||||
subnet['id'])
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
|
||||
|
@ -483,7 +483,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
new_valid_gw_port_attachment = (
|
||||
new_network_id and
|
||||
(not router.gw_port or
|
||||
router.gw_port['network_id'] != new_network_id))
|
||||
router.gw_port['network_id'] != new_network_id))
|
||||
if new_valid_gw_port_attachment:
|
||||
subnets = self._core_plugin.get_subnets_by_network(context,
|
||||
new_network_id)
|
||||
|
@ -611,7 +611,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
# TODO(ralonsoh): move this section (port deletion) out of the DB
|
||||
# transaction.
|
||||
router_ports_ids = (rp.port_id for rp in
|
||||
l3_obj.RouterPort.get_objects(context, router_id=id))
|
||||
l3_obj.RouterPort.get_objects(context,
|
||||
router_id=id))
|
||||
if db_api.is_session_active(context.session):
|
||||
context.GUARD_TRANSACTION = False
|
||||
for rp_id in router_ports_ids:
|
||||
|
@ -842,12 +843,12 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
if self._port_has_ipv6_address(port):
|
||||
for existing_port in (rp.port for rp in router.attached_ports):
|
||||
if (existing_port["id"] != port["id"] and
|
||||
existing_port["network_id"] == port["network_id"] and
|
||||
existing_port["network_id"] == port["network_id"] and
|
||||
self._port_has_ipv6_address(existing_port)):
|
||||
msg = _("Router already contains IPv6 port %(p)s "
|
||||
"belonging to network id %(nid)s. Only one IPv6 port "
|
||||
"from the same network subnet can be connected to a "
|
||||
"router.")
|
||||
"belonging to network id %(nid)s. Only one IPv6 "
|
||||
"port from the same network subnet can be "
|
||||
"connected to a router.")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg % {
|
||||
'p': existing_port['id'],
|
||||
'nid': existing_port['network_id']})
|
||||
|
@ -884,7 +885,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
if context.project_id not in rbac_allowed_projects:
|
||||
msg = (_('Cannot add interface to router because subnet '
|
||||
'%s is not owned by project making the request')
|
||||
% subnet_id)
|
||||
% subnet_id)
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
self._validate_subnet_address_mode(subnet)
|
||||
self._check_for_dup_router_subnets(context, router,
|
||||
|
@ -902,8 +903,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
fixed_ips = list(map(dict, port['port']['fixed_ips']))
|
||||
fixed_ips.append(fixed_ip)
|
||||
return (self._core_plugin.update_port(
|
||||
context, port['port_id'],
|
||||
{'port': {'fixed_ips': fixed_ips}}),
|
||||
context, port['port_id'],
|
||||
{'port': {'fixed_ips': fixed_ips}}),
|
||||
[subnet],
|
||||
False)
|
||||
|
||||
|
@ -952,7 +953,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
# _validate_interface_info ensures that either of add_by_* is True.
|
||||
else:
|
||||
port, subnets, new_router_intf = self._add_interface_by_subnet(
|
||||
context, router, interface_info['subnet_id'], device_owner)
|
||||
context, router, interface_info['subnet_id'], device_owner)
|
||||
cleanup_port = new_router_intf # only cleanup port we created
|
||||
revert_value = {'device_id': '',
|
||||
'device_owner': port['device_owner']}
|
||||
|
@ -1114,7 +1115,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
subnets = subnet_obj.Subnet.get_objects(context, id=port_subnet_ids)
|
||||
for subnet in subnets:
|
||||
self._confirm_router_interface_not_in_use(
|
||||
context, router_id, subnet)
|
||||
context, router_id, subnet)
|
||||
self._core_plugin.delete_port(context, port['id'],
|
||||
l3_port_check=False)
|
||||
return port, subnets
|
||||
|
@ -1162,7 +1163,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
device_owner)
|
||||
else:
|
||||
port, subnets = self._remove_interface_by_subnet(
|
||||
context, router_id, subnet_id, device_owner)
|
||||
context, router_id, subnet_id, device_owner)
|
||||
|
||||
gw_network_id = None
|
||||
gw_ips = []
|
||||
|
@ -1185,7 +1186,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
port['id'], port['network_id'],
|
||||
subnets[0]['id'],
|
||||
[subnet['id'] for subnet in
|
||||
subnets])
|
||||
subnets])
|
||||
|
||||
def _get_floatingip(self, context, id):
|
||||
floatingip = l3_obj.FloatingIP.get_object(context, id=id)
|
||||
|
@ -1248,16 +1249,21 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
RouterPort = l3_models.RouterPort
|
||||
gw_port = orm.aliased(models_v2.Port, name="gw_port")
|
||||
# TODO(lujinluo): Need IPAllocation and Port object
|
||||
routerport_qry = context.session.query(
|
||||
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
|
||||
RouterPort.port, models_v2.Port.fixed_ips).filter(
|
||||
models_v2.Port.network_id == internal_port['network_id'],
|
||||
RouterPort.port_type.in_(constants.ROUTER_INTERFACE_OWNERS),
|
||||
models_v2.IPAllocation.subnet_id == internal_subnet['id']
|
||||
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
|
||||
gw_port.network_id == external_network_id,
|
||||
gw_port.device_owner == DEVICE_OWNER_ROUTER_GW
|
||||
).distinct()
|
||||
routerport_qry = (context.session.query(
|
||||
RouterPort.router_id, models_v2.IPAllocation.ip_address).
|
||||
join(RouterPort.port, models_v2.Port.fixed_ips).
|
||||
filter(models_v2.Port.network_id ==
|
||||
internal_port['network_id'],
|
||||
RouterPort.port_type.in_(
|
||||
constants.ROUTER_INTERFACE_OWNERS),
|
||||
models_v2.IPAllocation.subnet_id ==
|
||||
internal_subnet['id']).
|
||||
join(gw_port,
|
||||
gw_port.device_id == RouterPort.router_id).
|
||||
filter(gw_port.network_id == external_network_id,
|
||||
gw_port.device_owner ==
|
||||
DEVICE_OWNER_ROUTER_GW).
|
||||
distinct())
|
||||
|
||||
first_router_id = None
|
||||
for router_id, interface_ip in routerport_qry:
|
||||
|
@ -1336,7 +1342,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
"""
|
||||
(internal_port, internal_subnet_id,
|
||||
internal_ip_address) = self._internal_fip_assoc_data(
|
||||
context, fip, floatingip_obj.project_id)
|
||||
context, fip, floatingip_obj.project_id)
|
||||
router_id = self._get_router_for_floatingip(
|
||||
context, internal_port,
|
||||
internal_subnet_id, floatingip_obj.floating_network_id)
|
||||
|
@ -1366,10 +1372,10 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
return port_id, internal_ip_address, router_id
|
||||
|
||||
fip_exists = l3_obj.FloatingIP.objects_exist(
|
||||
context,
|
||||
fixed_port_id=fip['port_id'],
|
||||
floating_network_id=floatingip_obj.floating_network_id,
|
||||
fixed_ip_address=netaddr.IPAddress(internal_ip_address))
|
||||
context,
|
||||
fixed_port_id=fip['port_id'],
|
||||
floating_network_id=floatingip_obj.floating_network_id,
|
||||
fixed_ip_address=netaddr.IPAddress(internal_ip_address))
|
||||
if fip_exists:
|
||||
floating_ip_address = (str(floatingip_obj.floating_ip_address)
|
||||
if floatingip_obj.floating_ip_address
|
||||
|
@ -1627,7 +1633,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
if (port['device_owner'] ==
|
||||
constants.DEVICE_OWNER_FLOATINGIP):
|
||||
registry.publish(resources.FLOATING_IP, events.PRECOMMIT_DELETE,
|
||||
self, payload)
|
||||
self, payload)
|
||||
|
||||
def _delete_floatingip(self, context, id):
|
||||
floatingip = self._get_floatingip(context, id)
|
||||
|
@ -2083,10 +2089,10 @@ class L3RpcNotifierMixin(object):
|
|||
subnet_id = updated['id']
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
query = context.session.query(models_v2.Port.device_id).filter_by(
|
||||
network_id=network_id,
|
||||
device_owner=DEVICE_OWNER_ROUTER_GW)
|
||||
network_id=network_id,
|
||||
device_owner=DEVICE_OWNER_ROUTER_GW)
|
||||
query = query.join(models_v2.Port.fixed_ips).filter(
|
||||
models_v2.IPAllocation.subnet_id == subnet_id)
|
||||
models_v2.IPAllocation.subnet_id == subnet_id)
|
||||
router_ids = set(port.device_id for port in query)
|
||||
for router_id in router_ids:
|
||||
l3plugin.notify_router_updated(context, router_id)
|
||||
|
|
|
@ -64,9 +64,10 @@ _IS_ADMIN_STATE_DOWN_NECESSARY = None
|
|||
def is_admin_state_down_necessary():
|
||||
global _IS_ADMIN_STATE_DOWN_NECESSARY
|
||||
if _IS_ADMIN_STATE_DOWN_NECESSARY is None:
|
||||
_IS_ADMIN_STATE_DOWN_NECESSARY = \
|
||||
router_admin_state_down_before_update.ALIAS in (extensions.
|
||||
PluginAwareExtensionManager.get_instance().extensions)
|
||||
_IS_ADMIN_STATE_DOWN_NECESSARY = (
|
||||
router_admin_state_down_before_update.ALIAS in (
|
||||
extensions.PluginAwareExtensionManager.get_instance().
|
||||
extensions))
|
||||
return _IS_ADMIN_STATE_DOWN_NECESSARY
|
||||
|
||||
|
||||
|
@ -621,8 +622,8 @@ class DVRResourceOperationHandler(object):
|
|||
if cs_port:
|
||||
fixed_ips = (
|
||||
[fixedip for fixedip in
|
||||
cs_port['fixed_ips']
|
||||
if fixedip['subnet_id'] != subnet_id])
|
||||
cs_port['fixed_ips']
|
||||
if fixedip['subnet_id'] != subnet_id])
|
||||
|
||||
if len(fixed_ips) == len(cs_port['fixed_ips']):
|
||||
# The subnet being detached from router is not part of
|
||||
|
@ -1039,9 +1040,9 @@ class _DVRAgentInterfaceMixin(object):
|
|||
# agent on re-syncs then we need to add the appropriate
|
||||
# port['agent'] before updating the dict.
|
||||
if (l3_agent_mode == (
|
||||
const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and
|
||||
requesting_agent_mode == (
|
||||
const.L3_AGENT_MODE_DVR_NO_EXTERNAL)):
|
||||
const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and
|
||||
requesting_agent_mode == (
|
||||
const.L3_AGENT_MODE_DVR_NO_EXTERNAL)):
|
||||
port['agent'] = (
|
||||
const.L3_AGENT_MODE_DVR_NO_EXTERNAL)
|
||||
|
||||
|
@ -1053,9 +1054,9 @@ class _DVRAgentInterfaceMixin(object):
|
|||
# the portbinding host resides in dvr_no_external
|
||||
# agent then include the port.
|
||||
if (l3_agent_mode == (
|
||||
const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and
|
||||
requesting_agent_mode == (
|
||||
const.L3_AGENT_MODE_DVR_SNAT)):
|
||||
const.L3_AGENT_MODE_DVR_NO_EXTERNAL) and
|
||||
requesting_agent_mode == (
|
||||
const.L3_AGENT_MODE_DVR_SNAT)):
|
||||
port['agent'] = (
|
||||
const.L3_AGENT_MODE_DVR_NO_EXTERNAL)
|
||||
port_dict.update({port['id']: port})
|
||||
|
|
|
@ -325,7 +325,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
are bound
|
||||
"""
|
||||
subnet_ids = self.get_subnet_ids_on_router(context, router_id,
|
||||
keep_gateway_port=False)
|
||||
keep_gateway_port=False)
|
||||
hosts = self._get_dvr_hosts_for_subnets(context, subnet_ids)
|
||||
LOG.debug('Hosts for router %s: %s', router_id, hosts)
|
||||
return hosts
|
||||
|
@ -420,7 +420,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
with_dvr=True):
|
||||
result_set = set(super(L3_DVRsch_db_mixin,
|
||||
self)._get_router_ids_for_agent(
|
||||
context, agent_db, router_ids, with_dvr))
|
||||
context, agent_db, router_ids, with_dvr))
|
||||
if not with_dvr:
|
||||
return result_set
|
||||
LOG.debug("Routers %(router_ids)s bound to L3 agent in host %(host)s",
|
||||
|
@ -435,9 +435,9 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
# dvr routers are not explicitly scheduled to agents on hosts with
|
||||
# dvr serviceable ports, so need special handling
|
||||
if (self._get_agent_mode(agent_db) in
|
||||
[n_const.L3_AGENT_MODE_DVR,
|
||||
n_const.L3_AGENT_MODE_DVR_NO_EXTERNAL,
|
||||
n_const.L3_AGENT_MODE_DVR_SNAT]):
|
||||
[n_const.L3_AGENT_MODE_DVR,
|
||||
n_const.L3_AGENT_MODE_DVR_NO_EXTERNAL,
|
||||
n_const.L3_AGENT_MODE_DVR_SNAT]):
|
||||
dvr_routers = self._get_dvr_router_ids_for_host(context,
|
||||
agent_db['host'])
|
||||
if not router_ids:
|
||||
|
@ -448,10 +448,10 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
context, router_id, keep_gateway_port=False)
|
||||
if (subnet_ids and (
|
||||
self._check_dvr_serviceable_ports_on_host(
|
||||
context, agent_db['host'],
|
||||
list(subnet_ids)) or
|
||||
context, agent_db['host'],
|
||||
list(subnet_ids)) or
|
||||
self._is_router_related_to_dvr_routers(
|
||||
context, router_id, dvr_routers))):
|
||||
context, router_id, dvr_routers))):
|
||||
result_set.add(router_id)
|
||||
|
||||
LOG.debug("Routers %(router_ids)s are scheduled or have "
|
||||
|
@ -557,7 +557,7 @@ def _notify_port_delete(event, resource, trigger, payload):
|
|||
context = payload.context
|
||||
port = payload.latest_state
|
||||
get_related_hosts_info = payload.metadata.get(
|
||||
"get_related_hosts_info", True)
|
||||
"get_related_hosts_info", True)
|
||||
l3plugin = directory.get_plugin(plugin_constants.L3)
|
||||
if port:
|
||||
port_host = port.get(portbindings.HOST_ID)
|
||||
|
@ -605,7 +605,7 @@ def _notify_l3_agent_port_update(resource, event, trigger, payload):
|
|||
dest_host = new_port_profile.get('migrating_to')
|
||||
if is_new_port_binding_changed or is_bound_port_moved or dest_host:
|
||||
fips = l3plugin._get_floatingips_by_port_id(
|
||||
context, port_id=original_port['id'])
|
||||
context, port_id=original_port['id'])
|
||||
fip = fips[0] if fips else None
|
||||
if fip:
|
||||
fip_router_id = fip['router_id']
|
||||
|
|
|
@ -72,7 +72,7 @@ class L3_gw_ip_qos_dbonly_mixin(l3_gwmode_db.L3_NAT_dbonly_mixin):
|
|||
# Calls superclass, pass router db object for avoiding re-loading
|
||||
router = super(L3_gw_ip_qos_dbonly_mixin,
|
||||
self)._update_router_gw_info(
|
||||
context, router_id, info, request_body, router)
|
||||
context, router_id, info, request_body, router)
|
||||
|
||||
if not self._is_gw_ip_qos_supported:
|
||||
return router
|
||||
|
|
|
@ -389,7 +389,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
|||
# net was deleted, throw a retry to start over to create another
|
||||
raise db_exc.RetryRequest(
|
||||
l3ha_exc.HANetworkConcurrentDeletion(
|
||||
tenant_id=router['tenant_id']))
|
||||
tenant_id=router['tenant_id']))
|
||||
|
||||
@registry.receives(resources.ROUTER, [events.AFTER_CREATE],
|
||||
priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE)
|
||||
|
@ -456,8 +456,8 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
|
|||
'ha', requested_ha_state)
|
||||
return
|
||||
self._migrate_router_ports(
|
||||
payload.context, payload.desired_state,
|
||||
old_owner=old_owner, new_owner=new_owner)
|
||||
payload.context, payload.desired_state,
|
||||
old_owner=old_owner, new_owner=new_owner)
|
||||
self.set_extra_attr_value(
|
||||
payload.context, payload.desired_state, 'ha', requested_ha_state)
|
||||
|
||||
|
|
|
@ -95,13 +95,9 @@ def _contracts(context, directive, phase):
|
|||
def _alter_column(context, directive, phase):
|
||||
is_expand = phase == 'expand'
|
||||
|
||||
if is_expand and (
|
||||
directive.modify_nullable is True
|
||||
):
|
||||
if is_expand and directive.modify_nullable is True:
|
||||
return directive
|
||||
elif not is_expand and (
|
||||
directive.modify_nullable is False
|
||||
):
|
||||
elif not is_expand and directive.modify_nullable is False:
|
||||
return directive
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
|
|
|
@ -49,7 +49,7 @@ class NetworkSegmentRange(standard_attr.HasStandardAttributes,
|
|||
constants.TYPE_GRE,
|
||||
constants.TYPE_GENEVE,
|
||||
name='network_segment_range_network_type'),
|
||||
nullable=False)
|
||||
nullable=False)
|
||||
|
||||
# network segment range physical network, only applicable for VLAN.
|
||||
physical_network = sa.Column(sa.String(64))
|
||||
|
|
|
@ -30,7 +30,7 @@ class SubnetServiceType(model_base.BASEV2):
|
|||
sa.ForeignKey('subnets.id', ondelete="CASCADE"))
|
||||
# Service types must be valid device owners, therefore share max length
|
||||
service_type = sa.Column(sa.String(
|
||||
length=db_const.DEVICE_OWNER_FIELD_SIZE))
|
||||
length=db_const.DEVICE_OWNER_FIELD_SIZE))
|
||||
subnet = orm.relationship(models_v2.Subnet, load_on_pending=True,
|
||||
backref=orm.backref('service_types',
|
||||
lazy='subquery',
|
||||
|
|
|
@ -110,8 +110,8 @@ class IpAvailabilityMixin(object):
|
|||
query = query.outerjoin(mod.Subnet,
|
||||
mod.Network.id == mod.Subnet.network_id)
|
||||
query = query.outerjoin(
|
||||
mod.IPAllocationPool,
|
||||
mod.Subnet.id == mod.IPAllocationPool.subnet_id)
|
||||
mod.IPAllocationPool,
|
||||
mod.Subnet.id == mod.IPAllocationPool.subnet_id)
|
||||
return cls._adjust_query_for_filters(query, filters)
|
||||
|
||||
@classmethod
|
||||
|
@ -130,13 +130,13 @@ class IpAvailabilityMixin(object):
|
|||
# Add IPAllocationPool data
|
||||
if row.last_ip:
|
||||
pool_total = netaddr.IPRange(
|
||||
netaddr.IPAddress(row.first_ip),
|
||||
netaddr.IPAddress(row.last_ip)).size
|
||||
netaddr.IPAddress(row.first_ip),
|
||||
netaddr.IPAddress(row.last_ip)).size
|
||||
cur_total = subnet_totals_dict.get(row.subnet_id, 0)
|
||||
subnet_totals_dict[row.subnet_id] = cur_total + pool_total
|
||||
else:
|
||||
subnet_totals_dict[row.subnet_id] = netaddr.IPNetwork(
|
||||
row.cidr, version=row.ip_version).size
|
||||
row.cidr, version=row.ip_version).size
|
||||
|
||||
return subnet_totals_dict
|
||||
|
||||
|
|
|
@ -156,7 +156,7 @@ def get_revision_row(context, resource_uuid):
|
|||
with db_api.CONTEXT_READER.using(context):
|
||||
return context.session.query(
|
||||
ovn_models.OVNRevisionNumbers).filter_by(
|
||||
resource_uuid=resource_uuid).one()
|
||||
resource_uuid=resource_uuid).one()
|
||||
except exc.NoResultFound:
|
||||
pass
|
||||
|
||||
|
|
|
@ -37,8 +37,8 @@ class QuotaUsageInfo(collections.namedtuple(
|
|||
|
||||
|
||||
class ReservationInfo(collections.namedtuple(
|
||||
'ReservationInfo', ['reservation_id', 'project_id',
|
||||
'expiration', 'deltas'])):
|
||||
'ReservationInfo', ['reservation_id', 'project_id',
|
||||
'expiration', 'deltas'])):
|
||||
"""Information about a resource reservation."""
|
||||
|
||||
|
||||
|
@ -176,7 +176,7 @@ def create_reservation(context, project_id, deltas, expiration=None):
|
|||
# This method is usually called from within another transaction.
|
||||
# Consider using begin_nested
|
||||
expiration = expiration or (
|
||||
utcnow() + datetime.timedelta(0, RESERVATION_EXPIRATION_TIMEOUT))
|
||||
utcnow() + datetime.timedelta(0, RESERVATION_EXPIRATION_TIMEOUT))
|
||||
delta_objs = []
|
||||
for (resource, delta) in deltas.items():
|
||||
delta_objs.append(quota_obj.ResourceDelta(
|
||||
|
|
|
@ -66,7 +66,7 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI):
|
|||
|
||||
# init with defaults
|
||||
project_quota = dict((key, resource.default)
|
||||
for key, resource in resources.items())
|
||||
for key, resource in resources.items())
|
||||
|
||||
# update with project specific limits
|
||||
quota_objs = quota_obj.Quota.get_objects(context,
|
||||
|
@ -135,7 +135,7 @@ class DbQuotaDriver(nlib_quota_api.QuotaDriverAPI):
|
|||
resourcekey2: ...
|
||||
"""
|
||||
project_default = dict((key, resource.default)
|
||||
for key, resource in resources.items())
|
||||
for key, resource in resources.items())
|
||||
|
||||
all_project_quotas = {}
|
||||
|
||||
|
|
|
@ -51,8 +51,7 @@ class Quota(model_base.BASEV2, model_base.HasId, model_base.HasProject):
|
|||
'project_id',
|
||||
'resource',
|
||||
name='uniq_quotas0project_id0resource'),
|
||||
model_base.BASEV2.__table_args__
|
||||
)
|
||||
model_base.BASEV2.__table_args__)
|
||||
|
||||
|
||||
class QuotaUsage(model_base.BASEV2, model_base.HasProjectPrimaryKeyIndex):
|
||||
|
|
|
@ -207,11 +207,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
|
|||
try:
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
ret = self._make_security_group_dict(self._get_security_group(
|
||||
context, id,
|
||||
fields=fields),
|
||||
fields)
|
||||
context, id, fields=fields), fields)
|
||||
if (fields is None or len(fields) == 0 or
|
||||
'security_group_rules' in fields):
|
||||
'security_group_rules' in fields):
|
||||
rules = self.get_security_group_rules(
|
||||
context_lib.get_admin_context(),
|
||||
{'security_group_id': [id]})
|
||||
|
@ -311,13 +309,13 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
|
|||
sg.update()
|
||||
sg_dict = self._make_security_group_dict(sg)
|
||||
self._registry_publish(
|
||||
resources.SECURITY_GROUP,
|
||||
events.PRECOMMIT_UPDATE,
|
||||
exc_cls=ext_sg.SecurityGroupConflict,
|
||||
payload=events.DBEventPayload(
|
||||
context, request_body=s,
|
||||
states=(original_security_group,),
|
||||
resource_id=id, desired_state=sg_dict))
|
||||
resources.SECURITY_GROUP,
|
||||
events.PRECOMMIT_UPDATE,
|
||||
exc_cls=ext_sg.SecurityGroupConflict,
|
||||
payload=events.DBEventPayload(
|
||||
context, request_body=s,
|
||||
states=(original_security_group,),
|
||||
resource_id=id, desired_state=sg_dict))
|
||||
registry.publish(resources.SECURITY_GROUP, events.AFTER_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context, request_body=s,
|
||||
|
@ -411,9 +409,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
|
|||
res = self._create_security_group_rule(context, security_group_rule)
|
||||
registry.publish(resources.SECURITY_GROUP_RULE, events.AFTER_CREATE,
|
||||
self, payload=events.DBEventPayload(
|
||||
context,
|
||||
resource_id=res['id'],
|
||||
states=(res,)))
|
||||
context,
|
||||
resource_id=res['id'],
|
||||
states=(res,)))
|
||||
|
||||
return res
|
||||
|
||||
|
@ -592,7 +590,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
|
|||
str(constants.PROTO_NUM_IPV6_ROUTE)]:
|
||||
if rule['ethertype'] == constants.IPv4:
|
||||
raise ext_sg.SecurityGroupEthertypeConflictWithProtocol(
|
||||
ethertype=rule['ethertype'], protocol=rule['protocol'])
|
||||
ethertype=rule['ethertype'], protocol=rule['protocol'])
|
||||
|
||||
def _validate_single_tenant_and_group(self, security_group_rules):
|
||||
"""Check that all rules belong to the same security group and tenant
|
||||
|
@ -725,7 +723,7 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
|
|||
return none_char
|
||||
elif key == 'protocol':
|
||||
return str(self._get_ip_proto_name_and_num(
|
||||
value, ethertype=rule.get('ethertype')))
|
||||
value, ethertype=rule.get('ethertype')))
|
||||
return str(value)
|
||||
|
||||
comparison_keys = [
|
||||
|
@ -1001,9 +999,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
|
|||
is either [] or not is_attr_set, otherwise return False
|
||||
"""
|
||||
if (ext_sg.SECURITYGROUPS in port['port'] and
|
||||
not (validators.is_attr_set(
|
||||
port['port'][ext_sg.SECURITYGROUPS]) and
|
||||
port['port'][ext_sg.SECURITYGROUPS] != [])):
|
||||
not (validators.is_attr_set(
|
||||
port['port'][ext_sg.SECURITYGROUPS]) and
|
||||
port['port'][ext_sg.SECURITYGROUPS] != [])):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -1013,8 +1011,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
|
|||
This method is called both for port create and port update.
|
||||
"""
|
||||
if (ext_sg.SECURITYGROUPS in port['port'] and
|
||||
(validators.is_attr_set(port['port'][ext_sg.SECURITYGROUPS]) and
|
||||
port['port'][ext_sg.SECURITYGROUPS] != [])):
|
||||
(validators.is_attr_set(
|
||||
port['port'][ext_sg.SECURITYGROUPS]) and
|
||||
port['port'][ext_sg.SECURITYGROUPS] != [])):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
@ -1030,9 +1029,9 @@ class SecurityGroupDbMixin(ext_sg.SecurityGroupPluginBase,
|
|||
need_notify = False
|
||||
port_updates = port['port']
|
||||
if (ext_sg.SECURITYGROUPS in port_updates and
|
||||
not helpers.compare_elements(
|
||||
original_port.get(ext_sg.SECURITYGROUPS),
|
||||
port_updates[ext_sg.SECURITYGROUPS])):
|
||||
not helpers.compare_elements(
|
||||
original_port.get(ext_sg.SECURITYGROUPS),
|
||||
port_updates[ext_sg.SECURITYGROUPS])):
|
||||
# delete the port binding and read it with the new rules
|
||||
sgs = self._get_security_groups_on_port(context, port)
|
||||
port_updates[ext_sg.SECURITYGROUPS] = [sg.id for sg in sgs]
|
||||
|
|
|
@ -100,10 +100,10 @@ class SecurityGroupServerNotifierRpcMixin(sg_db.SecurityGroupDbMixin):
|
|||
"""
|
||||
need_notify = False
|
||||
if (original_port['fixed_ips'] != updated_port['fixed_ips'] or
|
||||
original_port['mac_address'] != updated_port['mac_address'] or
|
||||
not helpers.compare_elements(
|
||||
original_port.get(ext_sg.SECURITYGROUPS),
|
||||
updated_port.get(ext_sg.SECURITYGROUPS))):
|
||||
original_port['mac_address'] != updated_port['mac_address'] or
|
||||
not helpers.compare_elements(
|
||||
original_port.get(ext_sg.SECURITYGROUPS),
|
||||
updated_port.get(ext_sg.SECURITYGROUPS))):
|
||||
need_notify = True
|
||||
return need_notify
|
||||
|
||||
|
@ -189,8 +189,8 @@ class SecurityGroupInfoAPIMixin(object):
|
|||
|
||||
if remote_gid:
|
||||
if (remote_gid
|
||||
not in sg_info['devices'][port_id][
|
||||
'security_group_source_groups']):
|
||||
not in sg_info['devices'][port_id][
|
||||
'security_group_source_groups']):
|
||||
sg_info['devices'][port_id][
|
||||
'security_group_source_groups'].append(remote_gid)
|
||||
if remote_gid not in remote_security_group_info:
|
||||
|
@ -200,11 +200,11 @@ class SecurityGroupInfoAPIMixin(object):
|
|||
remote_security_group_info[remote_gid][ethertype] = set()
|
||||
elif remote_ag_id:
|
||||
if (remote_ag_id
|
||||
not in sg_info['devices'][port_id][
|
||||
'security_group_remote_address_groups']):
|
||||
not in sg_info['devices'][port_id][
|
||||
'security_group_remote_address_groups']):
|
||||
sg_info['devices'][port_id][
|
||||
'security_group_remote_address_groups'].append(
|
||||
remote_ag_id)
|
||||
remote_ag_id)
|
||||
if remote_ag_id not in remote_address_group_info:
|
||||
remote_address_group_info[remote_ag_id] = {}
|
||||
if ethertype not in remote_address_group_info[remote_ag_id]:
|
||||
|
|
|
@ -47,8 +47,7 @@ class ServiceTypeManager(object):
|
|||
return list(chain.from_iterable(
|
||||
self.config[svc_type].get_service_providers(filters, fields)
|
||||
for svc_type in filters['service_type']
|
||||
if svc_type in self.config)
|
||||
)
|
||||
if svc_type in self.config))
|
||||
return list(
|
||||
chain.from_iterable(
|
||||
self.config[p].get_service_providers(filters, fields)
|
||||
|
|
Loading…
Reference in New Issue