[sqlalchemy-20] Add missing DB contexts in L3 methods
The goal of this series of patches is to make the Neutron code compliant with SQLAlchemy 2.0. This patch adds the missing database contexts in the execution of some queries in the L3 code, reported in the following bugs. This patch also refactors the ``neutron.db.l3_db`` methods, using the method decorators instead of the inline context builders. Closes-Bug: #1974144 Closes-Bug: #1974142 Related-Bug: #1964575 Change-Id: I7e18db7f1fa6b8878c13df09895c993704401815
This commit is contained in:
parent
5c6eff7e15
commit
59973d29c3
|
@ -46,6 +46,7 @@ dvr_mac_db.register_db_dvr_mac_opts()
|
|||
l3_dvr_db.register_db_l3_dvr_opts()
|
||||
|
||||
|
||||
@db_api.CONTEXT_READER
|
||||
def get_ports_query_by_subnet_and_ip(context, subnet, ip_addresses=None):
|
||||
query = context.session.query(models_v2.Port)
|
||||
query = query.join(models_v2.IPAllocation)
|
||||
|
@ -55,7 +56,7 @@ def get_ports_query_by_subnet_and_ip(context, subnet, ip_addresses=None):
|
|||
if ip_addresses:
|
||||
query = query.filter(
|
||||
models_v2.IPAllocation.ip_address.in_(ip_addresses))
|
||||
return query
|
||||
return query.all()
|
||||
|
||||
|
||||
@registry.has_registry_receivers
|
||||
|
@ -214,9 +215,8 @@ class DVRDbMixin(ext_dvr.DVRMacAddressPluginBase):
|
|||
else:
|
||||
ip_address = subnet_info['gateway_ip']
|
||||
|
||||
query = get_ports_query_by_subnet_and_ip(
|
||||
internal_gateway_ports = get_ports_query_by_subnet_and_ip(
|
||||
context, subnet, [ip_address])
|
||||
internal_gateway_ports = query.all()
|
||||
|
||||
if not internal_gateway_ports:
|
||||
LOG.error("Could not retrieve gateway port "
|
||||
|
|
|
@ -301,19 +301,19 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
states=(new_router,)))
|
||||
return new_router
|
||||
|
||||
@db_api.CONTEXT_WRITER
|
||||
def _update_router_db(self, context, router_id, data):
|
||||
"""Update the DB object."""
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
router_db = self._get_router(context, router_id)
|
||||
old_router = self._make_router_dict(router_db)
|
||||
if data:
|
||||
router_db.update(self._get_stripped_router(data))
|
||||
registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context, request_body=data,
|
||||
states=(old_router,), resource_id=router_id,
|
||||
desired_state=router_db))
|
||||
return router_db
|
||||
router_db = self._get_router(context, router_id)
|
||||
old_router = self._make_router_dict(router_db)
|
||||
if data:
|
||||
router_db.update(self._get_stripped_router(data))
|
||||
registry.publish(resources.ROUTER, events.PRECOMMIT_UPDATE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context, request_body=data,
|
||||
states=(old_router,), resource_id=router_id,
|
||||
desired_state=router_db))
|
||||
return router_db
|
||||
|
||||
@db_api.retry_if_session_inactive()
|
||||
def update_router(self, context, id, router):
|
||||
|
@ -456,23 +456,23 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
metadata=metadata,
|
||||
resource_id=router_id))
|
||||
|
||||
@db_api.CONTEXT_WRITER
|
||||
def _delete_router_gw_port_db(self, context, router, request_body):
|
||||
with db_api.CONTEXT_WRITER.using(context):
|
||||
router.gw_port = None
|
||||
if router not in context.session:
|
||||
context.session.add(router)
|
||||
try:
|
||||
registry.publish(resources.ROUTER_GATEWAY,
|
||||
events.BEFORE_DELETE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context, states=(router,),
|
||||
request_body=request_body,
|
||||
resource_id=router.id))
|
||||
except exceptions.CallbackFailure as e:
|
||||
# NOTE(armax): preserve old check's behavior
|
||||
if len(e.errors) == 1:
|
||||
raise e.errors[0].error
|
||||
raise l3_exc.RouterInUse(router_id=router.id, reason=e)
|
||||
router.gw_port = None
|
||||
if router not in context.session:
|
||||
context.session.add(router)
|
||||
try:
|
||||
registry.publish(resources.ROUTER_GATEWAY,
|
||||
events.BEFORE_DELETE, self,
|
||||
payload=events.DBEventPayload(
|
||||
context, states=(router,),
|
||||
request_body=request_body,
|
||||
resource_id=router.id))
|
||||
except exceptions.CallbackFailure as e:
|
||||
# NOTE(armax): preserve old check's behavior
|
||||
if len(e.errors) == 1:
|
||||
raise e.errors[0].error
|
||||
raise l3_exc.RouterInUse(router_id=router.id, reason=e)
|
||||
|
||||
def _create_gw_port(self, context, router_id, router, new_network_id,
|
||||
ext_ips):
|
||||
|
@ -761,43 +761,43 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
|
|||
{'port': port['id'], 'subnet': subnet_id})
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
|
||||
@db_api.CONTEXT_READER
|
||||
def _validate_router_port_info(self, context, router, port_id):
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
# check again within transaction to mitigate race
|
||||
port = self._check_router_port(context, port_id, router.id)
|
||||
# check again within transaction to mitigate race
|
||||
port = self._check_router_port(context, port_id, router.id)
|
||||
|
||||
# Only allow one router port with IPv6 subnets per network id
|
||||
if self._port_has_ipv6_address(port):
|
||||
for existing_port in (rp.port for rp in router.attached_ports):
|
||||
if (existing_port['network_id'] == port['network_id'] and
|
||||
self._port_has_ipv6_address(existing_port)):
|
||||
msg = _("Cannot have multiple router ports with the "
|
||||
"same network id if both contain IPv6 "
|
||||
"subnets. Existing port %(p)s has IPv6 "
|
||||
"subnet(s) and network id %(nid)s")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg % {
|
||||
'p': existing_port['id'],
|
||||
'nid': existing_port['network_id']})
|
||||
# Only allow one router port with IPv6 subnets per network id
|
||||
if self._port_has_ipv6_address(port):
|
||||
for existing_port in (rp.port for rp in router.attached_ports):
|
||||
if (existing_port['network_id'] == port['network_id'] and
|
||||
self._port_has_ipv6_address(existing_port)):
|
||||
msg = _("Cannot have multiple router ports with the "
|
||||
"same network id if both contain IPv6 "
|
||||
"subnets. Existing port %(p)s has IPv6 "
|
||||
"subnet(s) and network id %(nid)s")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg % {
|
||||
'p': existing_port['id'],
|
||||
'nid': existing_port['network_id']})
|
||||
|
||||
fixed_ips = list(port['fixed_ips'])
|
||||
subnets = []
|
||||
for fixed_ip in fixed_ips:
|
||||
subnet = self._core_plugin.get_subnet(context,
|
||||
fixed_ip['subnet_id'])
|
||||
subnets.append(subnet)
|
||||
fixed_ips = list(port['fixed_ips'])
|
||||
subnets = []
|
||||
for fixed_ip in fixed_ips:
|
||||
subnet = self._core_plugin.get_subnet(context,
|
||||
fixed_ip['subnet_id'])
|
||||
subnets.append(subnet)
|
||||
|
||||
if subnets:
|
||||
self._check_for_dup_router_subnets(context, router,
|
||||
port['network_id'],
|
||||
subnets)
|
||||
if subnets:
|
||||
self._check_for_dup_router_subnets(context, router,
|
||||
port['network_id'],
|
||||
subnets)
|
||||
|
||||
# Keep the restriction against multiple IPv4 subnets
|
||||
if len([s for s in subnets if s['ip_version'] == 4]) > 1:
|
||||
msg = _("Cannot have multiple "
|
||||
"IPv4 subnets on router port")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
self._validate_port_in_range_or_admin(context, subnets, port)
|
||||
return port, subnets
|
||||
# Keep the restriction against multiple IPv4 subnets
|
||||
if len([s for s in subnets if s['ip_version'] == 4]) > 1:
|
||||
msg = _("Cannot have multiple "
|
||||
"IPv4 subnets on router port")
|
||||
raise n_exc.BadRequest(resource='router', msg=msg)
|
||||
self._validate_port_in_range_or_admin(context, subnets, port)
|
||||
return port, subnets
|
||||
|
||||
def _notify_attaching_interface(self, context, router_db, port,
|
||||
interface_info):
|
||||
|
@ -2018,12 +2018,13 @@ class L3RpcNotifierMixin(object):
|
|||
return
|
||||
network_id = updated['network_id']
|
||||
subnet_id = updated['id']
|
||||
query = context.session.query(models_v2.Port.device_id).filter_by(
|
||||
network_id=network_id,
|
||||
device_owner=DEVICE_OWNER_ROUTER_GW)
|
||||
query = query.join(models_v2.Port.fixed_ips).filter(
|
||||
models_v2.IPAllocation.subnet_id == subnet_id)
|
||||
router_ids = set(port.device_id for port in query)
|
||||
with db_api.CONTEXT_READER.using(context):
|
||||
query = context.session.query(models_v2.Port.device_id).filter_by(
|
||||
network_id=network_id,
|
||||
device_owner=DEVICE_OWNER_ROUTER_GW)
|
||||
query = query.join(models_v2.Port.fixed_ips).filter(
|
||||
models_v2.IPAllocation.subnet_id == subnet_id)
|
||||
router_ids = set(port.device_id for port in query)
|
||||
for router_id in router_ids:
|
||||
l3plugin.notify_router_updated(context, router_id)
|
||||
|
||||
|
|
|
@ -436,6 +436,7 @@ class DVRResourceOperationHandler(object):
|
|||
self.l3plugin.l3_rpc_notifier.delete_fipnamespace_for_ext_net(
|
||||
payload.context, payload.resource_id)
|
||||
|
||||
@db_api.CONTEXT_READER
|
||||
def _get_ports_for_allowed_address_pair_ip(self, context, network_id,
|
||||
fixed_ip):
|
||||
"""Return all active ports associated with the allowed_addr_pair ip."""
|
||||
|
@ -1428,8 +1429,8 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
|
|||
return False
|
||||
|
||||
def get_ports_under_dvr_connected_subnet(self, context, subnet_id):
|
||||
query = dvr_mac_db.get_ports_query_by_subnet_and_ip(context, subnet_id)
|
||||
ports = [p for p in query.all() if is_port_bound(p)]
|
||||
ports = dvr_mac_db.get_ports_query_by_subnet_and_ip(context, subnet_id)
|
||||
ports = [p for p in ports if is_port_bound(p)]
|
||||
# TODO(slaweq): if there would be way to pass to neutron-lib only
|
||||
# list of extensions which actually should be processed, than setting
|
||||
# process_extensions=True below could avoid that second loop and
|
||||
|
|
|
@ -19,6 +19,7 @@ from neutron_lib.callbacks import events
|
|||
from neutron_lib.callbacks import registry
|
||||
from neutron_lib.callbacks import resources
|
||||
from neutron_lib import constants as n_const
|
||||
from neutron_lib.db import api as db_api
|
||||
from neutron_lib.exceptions import l3 as l3_exc
|
||||
from neutron_lib.plugins import constants as plugin_constants
|
||||
from neutron_lib.plugins import directory
|
||||
|
@ -348,6 +349,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
{'router_id': router_id, 'dvr_hosts': dvr_hosts})
|
||||
return dvr_hosts
|
||||
|
||||
@db_api.CONTEXT_READER
|
||||
def _get_dvr_hosts_for_subnets(self, context, subnet_ids):
|
||||
"""Get a list of hosts with DVR servicable ports on subnet_ids."""
|
||||
host_dvr_dhcp = cfg.CONF.host_dvr_for_dhcp
|
||||
|
@ -367,6 +369,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
hosts = [item[0] for item in query if item[0] != '']
|
||||
return hosts
|
||||
|
||||
@db_api.CONTEXT_READER
|
||||
def _get_dvr_subnet_ids_on_host_query(self, context, host):
|
||||
host_dvr_dhcp = cfg.CONF.host_dvr_for_dhcp
|
||||
query = context.session.query(
|
||||
|
@ -382,6 +385,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
query = query.filter(owner_filter)
|
||||
return query
|
||||
|
||||
@db_api.CONTEXT_READER
|
||||
def _get_dvr_router_ids_for_host(self, context, host):
|
||||
subnet_ids_on_host_query = self._get_dvr_subnet_ids_on_host_query(
|
||||
context, host)
|
||||
|
@ -395,6 +399,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
LOG.debug('DVR routers on host %s: %s', host, router_ids)
|
||||
return router_ids
|
||||
|
||||
@db_api.CONTEXT_READER
|
||||
def _get_other_dvr_router_ids_connected_router(self, context, router_id):
|
||||
# TODO(slaweq): move this method to RouterPort OVO object
|
||||
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
|
||||
|
@ -466,6 +471,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
|
|||
return list(result_set)
|
||||
|
||||
@log_helpers.log_method_call
|
||||
@db_api.CONTEXT_READER
|
||||
def _check_dvr_serviceable_ports_on_host(self, context, host, subnet_ids):
|
||||
"""Check for existence of dvr serviceable ports on host
|
||||
|
||||
|
|
Loading…
Reference in New Issue