Set system_scope='all' in elevated context

In case when enforce_new_defaults is set to True and new policy rules
are used, context.is_admin flag isn't really working as it was with old
rules.
But in case when elevated context is needed, it means that we need
context which has full rights to the system. So we should also set
"system_scope" parameter to "all" to be sure that system scope queries
can be done with such elevated context always.

It is needed e.g. when elevated context is used to get some data from
db. In such case we need to have db query which will not be scoped to
the single project_id and with new defaults to achieve that system_scope
has to be set to "all".

Proper fix for that should be done in neutron-lib and it is proposed
in [1] already but as we are have frozen neutron-lib version for
stable/wallaby already this patch for neutron is temporary fix for that
issue.
We can revert that patch as soon as we will be in Xena development cycle
and [1] will be merged and released.

[1] https://review.opendev.org/c/openstack/neutron-lib/+/781625

Related-Bug: #1920001
Change-Id: I0068c1de09f5c6fae5bb5cd0d6f26f451e701939
This commit is contained in:
Slawek Kaplonski 2021-03-19 12:05:56 +01:00
parent 80dfbb0371
commit 062336e59b
29 changed files with 154 additions and 97 deletions

View File

@ -28,6 +28,7 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from neutron.common import utils as common_utils
# Priorities - lower value is higher priority
PRIORITY_NETWORK_CREATE = 0
@ -194,7 +195,8 @@ class DhcpAgentNotifyAPI(object):
if fanout_required:
self._fanout_message(context, method, payload)
elif cast_required:
admin_ctx = (context if context.is_admin else context.elevated())
admin_ctx = (context if context.is_admin else
common_utils.get_elevated_context(context))
network = self.plugin.get_network(admin_ctx, network_id)
if 'subnet' in payload and payload['subnet'].get('segment_id'):
# if segment_id exists then the segment service plugin

View File

@ -25,6 +25,7 @@ from oslo_log import log as logging
import oslo_messaging
from neutron.api.rpc.agentnotifiers import utils as ag_utils
from neutron.common import utils as common_utils
LOG = logging.getLogger(__name__)
@ -55,7 +56,9 @@ class L3AgentNotifyAPI(object):
def _agent_notification(self, context, method, router_ids, operation,
shuffle_agents):
"""Notify changed routers to hosting l3 agents."""
adminContext = context if context.is_admin else context.elevated()
adminContext = (
context if context.is_admin else
common_utils.get_elevated_context(context))
plugin = directory.get_plugin(plugin_constants.L3)
for router_id in router_ids:
hosts = plugin.get_hosts_to_notify(adminContext, router_id)
@ -92,8 +95,9 @@ class L3AgentNotifyAPI(object):
return
if extensions.is_extension_supported(
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
adminContext = (context.is_admin and
context or context.elevated())
adminContext = (
context.is_admin and
context or common_utils.get_elevated_context(context))
if schedule_routers:
plugin.schedule_routers(adminContext, router_ids)
self._agent_notification(

View File

@ -21,6 +21,7 @@ from neutron_lib import rpc as n_rpc
from oslo_log import log as logging
import oslo_messaging
from neutron.common import utils as common_utils
from neutron.db import agentschedulers_db
LOG = logging.getLogger(__name__)
@ -36,7 +37,9 @@ class MeteringAgentNotifyAPI(object):
def _agent_notification(self, context, method, routers):
"""Notify l3 metering agents hosted by l3 agent hosts."""
adminContext = context if context.is_admin else context.elevated()
adminContext = (
context if context.is_admin else
common_utils.get_elevated_context(context))
plugin = directory.get_plugin(plugin_constants.L3)
l3_routers = {}

View File

@ -1037,3 +1037,13 @@ def with_metaclass(meta, *bases):
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
def get_elevated_context(context):
admin_context = context.elevated()
# NOTE(slaweq): system_scope='all' is needed if new policies are
# enforced. This should be set in context.elevated() method in the
# neutron-lib but as a temporary workaround it is done here
if cfg.CONF.oslo_policy.enforce_new_defaults:
admin_context.system_scope = 'all'
return admin_context

View File

@ -19,6 +19,7 @@ from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.services.qos import constants as qos_consts
from neutron.common import utils as common_utils
from neutron.core_extensions import base
from neutron.objects.qos import policy as policy_object
@ -47,7 +48,7 @@ class QosCoreResourceExtension(base.CoreResourceExtension):
def _update_port_policy(self, context, port, port_changes):
old_policy = policy_object.QosPolicy.get_port_policy(
context.elevated(), port['id'])
common_utils.get_elevated_context(context), port['id'])
if old_policy:
self._check_policy_change_permission(context, old_policy)
old_policy.detach_port(port['id'])
@ -75,7 +76,7 @@ class QosCoreResourceExtension(base.CoreResourceExtension):
def _update_network_policy(self, context, network, network_changes):
old_policy = policy_object.QosPolicy.get_network_policy(
context.elevated(), network['id'])
common_utils.get_elevated_context(context), network['id'])
if old_policy:
self._check_policy_change_permission(context, old_policy)
old_policy.detach_network(network['id'])

View File

@ -21,6 +21,7 @@ from neutron_lib.exceptions import address_group as ag_exc
from oslo_utils import uuidutils
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.extensions import address_group as ag_ext
from neutron.objects import address_group as ag_obj
from neutron.objects import base as base_obj
@ -196,8 +197,9 @@ class AddressGroupDbMixin(ag_ext.AddressGroupPluginBase):
]
def delete_address_group(self, context, id):
if sg_obj.SecurityGroupRule.get_objects(context.elevated(),
remote_address_group_id=id):
if sg_obj.SecurityGroupRule.get_objects(
common_utils.get_elevated_context(context),
remote_address_group_id=id):
# TODO(hangyang): use exception from neutron_lib
raise AddressGroupInUse(address_group_id=id)
ag = self._get_address_group(context, id)

View File

@ -485,7 +485,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
_constants.AUTO_DELETE_PORT_OWNERS))]
for port_id in auto_delete_port_ids:
try:
self.delete_port(context.elevated(), port_id)
self.delete_port(utils.get_elevated_context(context), port_id)
except exc.PortNotFound:
# Don't raise if something else concurrently deleted the port
LOG.debug("Ignoring PortNotFound when deleting port '%s'. "
@ -710,7 +710,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
def _update_router_gw_port(self, context, router_id, network, subnet):
l3plugin = directory.get_plugin(plugin_constants.L3)
ctx_admin = context.elevated()
ctx_admin = utils.get_elevated_context(context)
ext_subnets_dict = {s['id']: s for s in network['subnets']}
router = l3plugin.get_router(ctx_admin, router_id)
external_gateway_info = router['external_gateway_info']
@ -1586,7 +1586,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
if device_id:
if hasattr(self, 'get_router'):
try:
ctx_admin = context.elevated()
ctx_admin = utils.get_elevated_context(context)
router = self.get_router(ctx_admin, device_id)
except l3_exc.RouterNotFound:
return
@ -1594,7 +1594,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
l3plugin = directory.get_plugin(plugin_constants.L3)
if l3plugin:
try:
ctx_admin = context.elevated()
ctx_admin = utils.get_elevated_context(context)
router = l3plugin.get_router(ctx_admin,
device_id)
except l3_exc.RouterNotFound:

View File

@ -29,6 +29,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.conf.db import extraroute_db
from neutron.db import l3_db
from neutron.objects import router as l3_obj
@ -92,7 +93,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
router_id=router_id,
quota=cfg.CONF.max_routes)
context = context.elevated()
context = common_utils.get_elevated_context(context)
filters = {'device_id': [router_id]}
ports = self._core_plugin.get_ports(context, filters)
cidrs = []

View File

@ -320,17 +320,19 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# the current transaction.
context.GUARD_TRANSACTION = False
gw_port = plugin_utils.create_port(
self._core_plugin, context.elevated(), {'port': port_data})
self._core_plugin, utils.get_elevated_context(context),
{'port': port_data})
if not gw_port['fixed_ips']:
LOG.debug('No IPs available for external network %s',
network_id)
with plugin_utils.delete_port_on_error(
self._core_plugin, context.elevated(), gw_port['id']):
self._core_plugin, utils.get_elevated_context(context),
gw_port['id']):
with db_api.CONTEXT_WRITER.using(context):
router = self._get_router(context, router['id'])
router.gw_port = self._core_plugin._get_port(
context.elevated(), gw_port['id'])
utils.get_elevated_context(context), gw_port['id'])
router_port = l3_obj.RouterPort(
context,
router_id=router.id,
@ -373,7 +375,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
router.gw_port and router.gw_port['network_id'] != new_network_id)
if not port_requires_deletion:
return
admin_ctx = context.elevated()
admin_ctx = utils.get_elevated_context(context)
old_network_id = router.gw_port['network_id']
if self.router_gw_port_has_floating_ips(admin_ctx, router_id):
@ -462,8 +464,9 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
resource_id=router_id))
def _update_current_gw_port(self, context, router_id, router, ext_ips):
self._core_plugin.update_port(context.elevated(), router.gw_port['id'],
{'port': {'fixed_ips': ext_ips}})
self._core_plugin.update_port(
utils.get_elevated_context(context), router.gw_port['id'],
{'port': {'fixed_ips': ext_ips}})
def _update_router_gw_info(self, context, router_id, info, router=None):
router = router or self._get_router(context, router_id)
@ -532,8 +535,9 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
if context.session.is_active:
context.GUARD_TRANSACTION = False
for rp_id in router_ports_ids:
self._core_plugin.delete_port(context.elevated(), rp_id,
l3_port_check=False)
self._core_plugin.delete_port(
utils.get_elevated_context(context), rp_id,
l3_port_check=False)
router = self._get_router(context, id)
registry.notify(resources.ROUTER, events.PRECOMMIT_DELETE,
@ -586,7 +590,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
raise n_exc.BadRequest(resource='router', msg=msg)
if p.get('device_owner') == DEVICE_OWNER_ROUTER_GW:
ext_subts = self._core_plugin.get_subnets(
context.elevated(),
utils.get_elevated_context(context),
filters={'network_id': [p['network_id']]})
for sub in ext_subts:
router_subnets.append(sub['id'])
@ -597,8 +601,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
new_subnets = [s for s in new_subnets
if s['cidr'] != constants.PROVISIONAL_IPV6_PD_PREFIX]
id_filter = {'id': router_subnets}
subnets = self._core_plugin.get_subnets(context.elevated(),
filters=id_filter)
subnets = self._core_plugin.get_subnets(
utils.get_elevated_context(context), filters=id_filter)
for sub in subnets:
cidr = sub['cidr']
ipnet = netaddr.IPNetwork(cidr)
@ -1340,11 +1344,11 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# 'status' in port dict could not be updated by default, use
# check_allow_post to stop the verification of system
external_port = plugin_utils.create_port(
self._core_plugin, context.elevated(),
self._core_plugin, utils.get_elevated_context(context),
{'port': port}, check_allow_post=False)
with plugin_utils.delete_port_on_error(
self._core_plugin, context.elevated(),
self._core_plugin, utils.get_elevated_context(context),
external_port['id']),\
db_api.CONTEXT_WRITER.using(context):
# Ensure IPv4 addresses are allocated on external port
@ -1381,7 +1385,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
floatingip_db=floatingip_obj.db_obj)
self._core_plugin.update_port(
context.elevated(), external_port['id'],
utils.get_elevated_context(context), external_port['id'],
{'port': {'device_id': fip_id,
'project_id': fip['tenant_id']}})
registry.notify(resources.FLOATING_IP,
@ -1505,7 +1509,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# floating IP record once the port is deleted. We can't start
# a transaction first to remove it ourselves because the delete_port
# method will yield in its post-commit activities.
self._core_plugin.delete_port(context.elevated(),
self._core_plugin.delete_port(utils.get_elevated_context(context),
floatingip.floating_port_id,
l3_port_check=False)
registry.notify(resources.FLOATING_IP, events.AFTER_DELETE,
@ -1595,8 +1599,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
"%(port_id)s no longer exists, allowing deletion.",
{'f_id': port['device_id'], 'port_id': port['id']})
return
elif not l3_obj.Router.objects_exist(context.elevated(),
id=port['device_id']):
elif not l3_obj.Router.objects_exist(
utils.get_elevated_context(context), id=port['device_id']):
LOG.debug("Router %(router_id)s corresponding to port "
"%(port_id)s no longer exists, allowing deletion.",
{'router_id': port['device_id'],

View File

@ -211,7 +211,7 @@ class DVRResourceOperationHandler(object):
if (old_router and old_router['distributed'] and not
router['distributed']):
self._core_plugin.delete_distributed_port_bindings_by_router_id(
context.elevated(), router_db['id'])
n_utils.get_elevated_context(context), router_db['id'])
@registry.receives(resources.ROUTER, [events.AFTER_UPDATE],
priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE)
@ -224,7 +224,7 @@ class DVRResourceOperationHandler(object):
old_router = kwargs['old_router']
if old_router and old_router['distributed']:
self.delete_csnat_router_interface_ports(
context.elevated(), router_db)
n_utils.get_elevated_context(context), router_db)
@registry.receives(resources.ROUTER,
[events.AFTER_CREATE, events.AFTER_UPDATE],
@ -246,8 +246,8 @@ class DVRResourceOperationHandler(object):
not old_router.get(l3_apidef.EXTERNAL_GW_INFO))
if not do_create:
return
if not self._create_snat_intf_ports_if_not_exists(context.elevated(),
router_db):
if not self._create_snat_intf_ports_if_not_exists(
n_utils.get_elevated_context(context), router_db):
LOG.debug("SNAT interface ports not created: %s",
router_db['id'])
return router_db
@ -280,9 +280,10 @@ class DVRResourceOperationHandler(object):
msg = _("Unable to create the SNAT Interface Port")
raise n_exc.BadRequest(resource='router', msg=msg)
with plugin_utils.delete_port_on_error(self.l3plugin._core_plugin,
context.elevated(),
snat_port['id']):
with plugin_utils.delete_port_on_error(
self.l3plugin._core_plugin,
n_utils.get_elevated_context(context),
snat_port['id']):
l3_obj.RouterPort(
context,
port_id=snat_port['id'],
@ -357,7 +358,8 @@ class DVRResourceOperationHandler(object):
return
if not payload.metadata.get('new_network_id'):
self.delete_csnat_router_interface_ports(
payload.context.elevated(), payload.latest_state)
n_utils.get_elevated_context(payload.context),
payload.latest_state)
network_id = payload.metadata.get('network_id')
# NOTE(Swami): Delete the Floatingip agent gateway port
@ -366,10 +368,11 @@ class DVRResourceOperationHandler(object):
filters = {'network_id': [network_id],
'device_owner': [const.DEVICE_OWNER_ROUTER_GW]}
ext_net_gw_ports = self._core_plugin.get_ports(
payload.context.elevated(), filters)
n_utils.get_elevated_context(payload.context), filters)
if not ext_net_gw_ports:
self.delete_floatingip_agent_gateway_port(
payload.context.elevated(), None, network_id)
n_utils.get_elevated_context(payload.context),
None, network_id)
# Send the information to all the L3 Agent hosts
# to clean up the fip namespace as it is no longer required.
self.l3plugin.l3_rpc_notifier.delete_fipnamespace_for_ext_net(
@ -443,7 +446,7 @@ class DVRResourceOperationHandler(object):
floatingIP association happens.
"""
if association_event and router_id:
admin_ctx = context.elevated()
admin_ctx = n_utils.get_elevated_context(context)
router_dict = self.get_router(admin_ctx, router_id)
# Check if distributed router and then create the
# FloatingIP agent gateway port
@ -500,7 +503,7 @@ class DVRResourceOperationHandler(object):
"""Event handler to for csnat port creation on interface creation."""
if not router_db.extra_attributes.distributed or not router_db.gw_port:
return
admin_context = context.elevated()
admin_context = n_utils.get_elevated_context(context)
self._add_csnat_router_interface_port(
admin_context, router_db, port['network_id'],
[{'subnet_id': port['fixed_ips'][-1]['subnet_id']}])
@ -524,7 +527,7 @@ class DVRResourceOperationHandler(object):
# IPv6 subnet
# Add new prefix to an existing ipv6 csnat port with the
# same network id if one exists
admin_ctx = context.elevated()
admin_ctx = n_utils.get_elevated_context(context)
router = self.l3plugin._get_router(admin_ctx, router_id)
cs_port = self._find_v6_router_port_by_network_and_device_owner(
router, subnet['network_id'], const.DEVICE_OWNER_ROUTER_SNAT)
@ -603,7 +606,7 @@ class DVRResourceOperationHandler(object):
if fixed_ips:
# multiple prefix port - delete prefix from port
self.l3plugin._core_plugin.update_port(
context.elevated(),
n_utils.get_elevated_context(context),
cs_port['id'], {'port': {'fixed_ips': fixed_ips}})
return True
return False
@ -693,7 +696,8 @@ class DVRResourceOperationHandler(object):
if not is_multiple_prefix_csport:
# Single prefix port - go ahead and delete the port
self.delete_csnat_router_interface_ports(
context.elevated(), router, subnet_id=sub_id)
n_utils.get_elevated_context(context),
router, subnet_id=sub_id)
def _cleanup_related_hosts_after_interface_removal(
self, context, router_id, subnet_id):
@ -895,7 +899,8 @@ class _DVRAgentInterfaceMixin(object):
return []
filters = {'device_id': [fip_agent_id],
'device_owner': [const.DEVICE_OWNER_AGENT_GW]}
ports = self._core_plugin.get_ports(context.elevated(), filters)
ports = self._core_plugin.get_ports(
n_utils.get_elevated_context(context), filters)
LOG.debug("Return the FIP ports: %s ", ports)
return ports
@ -1053,7 +1058,7 @@ class _DVRAgentInterfaceMixin(object):
fip = fips[0]
network_id = fip.get('floating_network_id')
self.create_fip_agent_gw_port_if_not_exists(
context.elevated(), network_id, host)
n_utils.get_elevated_context(context), network_id, host)
def create_fip_agent_gw_port_if_not_exists(self, context, network_id,
host):
@ -1265,7 +1270,8 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
try:
# using admin context as router may belong to admin tenant
router = self._get_router(context.elevated(), router_id)
router = self._get_router(n_utils.get_elevated_context(context),
router_id)
except l3_exc.RouterNotFound:
LOG.warning("Router %s was not found. "
"Skipping agent notification.",
@ -1299,7 +1305,7 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
context, [router_id], dest_host)
else:
centralized_agent_list = self.list_l3_agents_hosting_router(
context.elevated(), router_id)['agents']
n_utils.get_elevated_context(context), router_id)['agents']
for agent in centralized_agent_list:
self.l3_rpc_notifier.routers_updated_on_host(
context, [router_id], agent['host'])
@ -1325,7 +1331,8 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
def is_router_distributed(self, context, router_id):
if router_id:
return is_distributed_router(
self.get_router(context.elevated(), router_id))
self.get_router(n_utils.get_elevated_context(context),
router_id))
return False
def get_ports_under_dvr_connected_subnet(self, context, subnet_id):

View File

@ -205,7 +205,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
if not n_utils.is_dvr_serviced(deleted_port['device_owner']):
return []
admin_context = context.elevated()
admin_context = n_utils.get_elevated_context(context)
port_host = deleted_port[portbindings.HOST_ID]
subnet_ids = [ip['subnet_id'] for ip in deleted_port['fixed_ips']]
router_ids = self.get_dvr_routers_by_subnet_ids(admin_context,
@ -280,7 +280,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(
context.elevated(), filters=filter_rtr)
n_utils.get_elevated_context(context), filters=filter_rtr)
for port in int_ports:
dvr_binding = (ml2_db.
get_distributed_port_binding_by_host(
@ -304,7 +304,8 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
"""Returns all hosts to send notification about router update"""
hosts = super(L3_DVRsch_db_mixin, self).get_hosts_to_notify(
context, router_id)
router = self.get_router(context.elevated(), router_id)
router = self.get_router(n_utils.get_elevated_context(context),
router_id)
if router.get('distributed', False):
dvr_hosts = self._get_dvr_hosts_for_router(context, router_id)
dvr_hosts = set(dvr_hosts) - set(hosts)
@ -398,7 +399,8 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
# TODO(slaweq): move this method to RouterPort OVO object
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
RouterPort = l3_models.RouterPort
query = context.elevated().session.query(RouterPort.router_id)
query = n_utils.get_elevated_context(context).session.query(
RouterPort.router_id)
query = query.join(models_v2.Port)
query = query.join(
models_v2.Subnet,

View File

@ -18,6 +18,7 @@ from neutron_lib import constants as lib_const
from neutron_lib.db import utils as lib_db_utils
from neutron_lib.plugins import directory
from neutron.common import utils as common_utils
from neutron.extensions import floatingip_pools as fip_pools_ext
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
@ -49,7 +50,7 @@ class FloatingIPPoolsDbMixin(object):
# NOTE(hongbin): Use elevated context to make sure we have enough
# permission to retrieve subnets that are not in current tenant
# but belongs to external networks shared with current tenant.
admin_context = context.elevated()
admin_context = common_utils.get_elevated_context(context)
subnet_objs = subnet_obj.Subnet.get_objects(admin_context,
_pager=pager,
network_id=net_ids)

View File

@ -214,7 +214,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
cfg.CONF.l3_ha_network_physical_name)
def _create_ha_network(self, context, tenant_id):
admin_ctx = context.elevated()
admin_ctx = n_utils.get_elevated_context(context)
args = {'network':
{'name': constants.HA_NETWORK_NAME % tenant_id,
@ -311,7 +311,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
return binding.db_obj
def _delete_ha_interfaces(self, context, router_id):
admin_ctx = context.elevated()
admin_ctx = n_utils.get_elevated_context(context)
device_filter = {'device_id': [router_id],
'device_owner':
[constants.DEVICE_OWNER_ROUTER_HA_INTF]}
@ -322,7 +322,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_port_check=False)
def delete_ha_interfaces_on_host(self, context, router_id, host):
admin_ctx = context.elevated()
admin_ctx = n_utils.get_elevated_context(context)
port_ids = (binding.port_id for binding
in self.get_ha_router_port_bindings(admin_ctx,
[router_id], host))
@ -497,7 +497,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
self._notify_router_updated(context, router_db.id)
def _delete_ha_network(self, context, net):
admin_ctx = context.elevated()
admin_ctx = n_utils.get_elevated_context(context)
self._core_plugin.delete_network(admin_ctx, net.network_id)
def safe_delete_ha_network(self, context, ha_network, tenant_id):
@ -707,7 +707,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
self._update_router_port_bindings(context, states, host)
def _update_router_port_bindings(self, context, states, host):
admin_ctx = context.elevated()
admin_ctx = n_utils.get_elevated_context(context)
device_filter = {'device_id': list(states.keys()),
'device_owner':
[constants.DEVICE_OWNER_HA_REPLICATED_INT,
@ -740,7 +740,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
gateway_port_status = gateway_port['status']
gateway_port_binding_host = gateway_port[portbindings.HOST_ID]
admin_ctx = context.elevated()
admin_ctx = n_utils.get_elevated_context(context)
router_id = router['id']
ha_bindings = self.get_l3_bindings_hosting_router_with_ha_states(
admin_ctx, router_id)

View File

@ -75,7 +75,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
@classmethod
def is_shared_with_tenant(cls, context, obj_id, tenant_id):
ctx = context.elevated()
ctx = utils.get_elevated_context(context)
with cls.db_context_reader(ctx):
return cls.get_shared_with_tenant(ctx, cls.rbac_db_cls,
obj_id, tenant_id)
@ -105,7 +105,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
@classmethod
def _validate_rbac_policy_delete(cls, context, obj_id, target_tenant):
ctx_admin = context.elevated()
ctx_admin = utils.get_elevated_context(context)
rb_model = cls.rbac_db_cls.db_model
bound_tenant_ids = cls.get_bound_tenant_ids(ctx_admin, obj_id)
db_obj_sharing_entries = cls._get_db_obj_rbac_entries(
@ -148,7 +148,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
return
target_tenant = policy['target_tenant']
db_obj = obj_db_api.get_object(
cls, context.elevated(), id=policy['object_id'])
cls, utils.get_elevated_context(context), id=policy['object_id'])
if db_obj.tenant_id == target_tenant:
return
cls._validate_rbac_policy_delete(context=context,
@ -200,7 +200,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
if object_type != cls.rbac_db_cls.db_model.object_type:
return
db_obj = obj_db_api.get_object(
cls, context.elevated(), id=policy['object_id'])
cls, utils.get_elevated_context(context), id=policy['object_id'])
if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE):
if (not context.is_admin and
db_obj['tenant_id'] != context.tenant_id):
@ -224,7 +224,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
return self.create_rbac_policy(self.obj_context, rbac_policy)
def update_shared(self, is_shared_new, obj_id):
admin_context = self.obj_context.elevated()
admin_context = utils.get_elevated_context(self.obj_context)
shared_prev = obj_db_api.get_object(self.rbac_db_cls, admin_context,
object_id=obj_id,
target_tenant='*',
@ -266,7 +266,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
# instantiated and without DB interaction (get_object(s), update,
# create), it should be rare case to load 'shared' by that method
shared = self.get_shared_with_tenant(
self.obj_context.elevated(),
utils.get_elevated_context(self.obj_context),
self.rbac_db_cls,
self.id,
self.project_id

View File

@ -22,6 +22,7 @@ from oslo_versionedobjects import fields as obj_fields
from sqlalchemy import and_, or_
from sqlalchemy.sql import exists
from neutron.common import utils as common_utils
from neutron.db.models import dns as dns_models
from neutron.db.models import segment as segment_model
from neutron.db.models import subnet_service_type
@ -273,10 +274,11 @@ class Subnet(base.NeutronDbObject):
# instantiated and without DB interaction (get_object(s), update,
# create), it should be rare case to load 'shared' by that method
shared = (rbac_db.RbacNeutronDbObjectMixin.
get_shared_with_tenant(self.obj_context.elevated(),
network.NetworkRBAC,
self.network_id,
self.project_id))
get_shared_with_tenant(
common_utils.get_elevated_context(self.obj_context),
network.NetworkRBAC,
self.network_id,
self.project_id))
setattr(self, 'shared', shared)
self.obj_reset_changes(['shared'])

View File

@ -20,6 +20,7 @@ from oslo_versionedobjects import fields as obj_fields
import sqlalchemy as sa
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.db import models_v2 as models
from neutron.db import rbac_db_models
from neutron.extensions import rbac as ext_rbac
@ -116,7 +117,8 @@ class SubnetPool(rbac_db.NeutronRbacObject):
policy = payload.request_body
db_obj = obj_db_api.get_object(
cls, context.elevated(), id=policy['object_id'])
cls, common_utils.get_elevated_context(context),
id=policy['object_id'])
if not db_obj["address_scope_id"]:
# Nothing to validate

View File

@ -28,6 +28,7 @@ from sqlalchemy import or_
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.db.models import securitygroup as sg_models
from neutron.db import models_v2
from neutron.objects import base as objects_base
@ -339,7 +340,8 @@ def _prevent_segment_delete_with_port_bound(resource, event, trigger,
plugin = directory.get_plugin()
for port_id in auto_delete_port_ids:
try:
plugin.delete_port(payload.context.elevated(), port_id)
plugin.delete_port(
common_utils.get_elevated_context(payload.context), port_id)
except nlib_exc.PortNotFound:
# Don't raise if something else concurrently deleted the port
LOG.debug("Ignoring PortNotFound when deleting port '%s'. "

View File

@ -263,7 +263,7 @@ class L3Scheduler(object, metaclass=abc.ABCMeta):
def create_ha_port_and_bind(self, plugin, context, router_id,
tenant_id, agent, is_manual_scheduling=False):
"""Creates and binds a new HA port for this agent."""
ctxt = context.elevated()
ctxt = utils.get_elevated_context(context)
router_db = plugin._get_router(ctxt, router_id)
creator = functools.partial(self._add_port_from_net_and_ensure_vr_id,
plugin, ctxt, router_db, tenant_id)

View File

@ -33,6 +33,7 @@ from oslo_utils import excutils
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import extensions
from neutron.common.ovn import utils
from neutron.common import utils as common_utils
from neutron.db.availability_zone import router as router_az_db
from neutron.db import dns_db
from neutron.db import extraroute_db
@ -286,7 +287,7 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase,
return fip
def disassociate_floatingips(self, context, port_id, do_notify=True):
fips = self.get_floatingips(context.elevated(),
fips = self.get_floatingips(common_utils.get_elevated_context(context),
filters={'port_id': [port_id]})
router_ids = super(OVNL3RouterPlugin, self).disassociate_floatingips(
context, port_id, do_notify)

View File

@ -142,8 +142,9 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase):
# dvr_no_external host to one dvr host. So we just do not allow
# all dvr router's floating IP to be binded to a port which
# already has port forwarding.
router = self.l3_plugin.get_router(payload.context.elevated(),
pf_objs[0].router_id)
router = self.l3_plugin.get_router(
utils.get_elevated_context(payload.context),
pf_objs[0].router_id)
if l3_dvr_db.is_distributed_router(router):
raise pf_exc.PortHasPortForwarding(port_id=port_id)
@ -210,7 +211,7 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase):
# context to check if the floatingip or port forwarding resources
# are owned by other tenants.
if not context.is_admin:
context = context.elevated()
context = utils.get_elevated_context(context)
# If the logic arrives here, that means we have got update_ip_set and
# its value is not None. So we need to get all port forwarding
# resources based on the request port_id for preparing the next
@ -330,7 +331,7 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase):
def _check_port_has_binding_floating_ip(self, context, port_forwarding):
port_id = port_forwarding['internal_port_id']
floatingip_objs = l3_obj.FloatingIP.get_objects(
context.elevated(),
utils.get_elevated_context(context),
fixed_port_id=port_id)
if floatingip_objs:
floating_ip_address = floatingip_objs[0].floating_ip_address

View File

@ -41,6 +41,7 @@ from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.db import db_base_plugin_common
from neutron.extensions import qos
from neutron.objects import base as base_obj
@ -250,7 +251,7 @@ class QoSPlugin(qos.QoSPluginBase):
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
common_utils.get_elevated_context(context), id=policy_id)
self.validate_policy_for_port(context, policy, port)
def _check_port_for_placement_allocation_change(self, resource, event,
@ -269,9 +270,10 @@ class QoSPlugin(qos.QoSPluginBase):
if (nl_constants.DEVICE_OWNER_COMPUTE_PREFIX in
orig_port['device_owner']):
original_policy = policy_object.QosPolicy.get_object(
context.elevated(), id=original_policy_id)
common_utils.get_elevated_context(context),
id=original_policy_id)
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
common_utils.get_elevated_context(context), id=policy_id)
self._change_placement_allocation(original_policy, policy,
orig_port)
@ -343,7 +345,7 @@ class QoSPlugin(qos.QoSPluginBase):
updated_port = ports_object.Port.get_object(
context, id=payload.desired_state['id'])
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
common_utils.get_elevated_context(context), id=policy_id)
self.validate_policy_for_port(context, policy, updated_port)
@ -358,7 +360,7 @@ class QoSPlugin(qos.QoSPluginBase):
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
common_utils.get_elevated_context(context), id=policy_id)
self.validate_policy_for_network(context, policy, network_id)
def _validate_update_network_callback(self, resource, event, trigger,
@ -374,7 +376,7 @@ class QoSPlugin(qos.QoSPluginBase):
return
policy = policy_object.QosPolicy.get_object(
context.elevated(), id=policy_id)
common_utils.get_elevated_context(context), id=policy_id)
self.validate_policy_for_network(
context, policy, network_id=updated_network['id'])

View File

@ -29,6 +29,7 @@ from oslo_db import exception as db_exc
from oslo_log import helpers as log_helpers
from oslo_utils import uuidutils
from neutron.common import utils as common_utils
from neutron.db import segments_db as db
from neutron.extensions import segment as extension
from neutron import manager
@ -333,7 +334,7 @@ def _add_segment_host_mapping_for_segment(resource, event, trigger,
def _delete_segments_for_network(resource, event, trigger,
context, network_id):
admin_ctx = context.elevated()
admin_ctx = common_utils.get_elevated_context(context)
global segments_plugin
if not segments_plugin:
segments_plugin = manager.NeutronManager.load_class_for_provider(

View File

@ -28,6 +28,7 @@ from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks.producer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import utils as common_utils
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk.rpc import constants
@ -82,7 +83,7 @@ class TrunkSkeleton(object):
@log_helpers.log_method_call
def update_subport_bindings(self, context, subports):
"""Update subport bindings to match trunk host binding."""
el = context.elevated()
el = common_utils.get_elevated_context(context)
ports_by_trunk_id = collections.defaultdict(list)
updated_ports = collections.defaultdict(list)

View File

@ -25,6 +25,7 @@ from neutron_lib.plugins.ml2 import api
from neutron_lib.services.trunk import constants
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import utils
@ -212,7 +213,8 @@ class SubPortsValidator(object):
for p in ports:
network_port_map[p['network_id']].append({'port_id': p['id']})
networks = core_plugin.get_networks(
context.elevated(), filters={'id': network_port_map})
common_utils.get_elevated_context(context),
filters={'id': network_port_map})
subport_mtus = {}
for net in networks:

View File

@ -15,6 +15,8 @@
from neutron_lib.api import extensions
from neutron_lib.plugins import directory
from neutron.common import utils as common_utils
def get_agent_types_by_host(context, host):
"""Return the agent types registered on the host."""
@ -22,7 +24,8 @@ def get_agent_types_by_host(context, host):
core_plugin = directory.get_plugin()
if extensions.is_extension_supported(core_plugin, 'agent'):
agents = core_plugin.get_agents(
context.elevated(), filters={'host': [host]})
common_utils.get_elevated_context(context),
filters={'host': [host]})
agent_types = [a['agent_type'] for a in agents]
return agent_types

View File

@ -102,7 +102,7 @@ class RequestTestCase(base.BaseTestCase):
user_context = context.Context(
'fake_user', 'fake_project', is_admin=False)
self.assertFalse(user_context.is_admin)
admin_context = user_context.elevated()
admin_context = utils.get_elevated_context(user_context)
self.assertFalse(user_context.is_admin)
self.assertTrue(admin_context.is_admin)
self.assertNotIn('admin', user_context.roles)

View File

@ -30,6 +30,7 @@ from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as plugin_utils
from oslo_utils import uuidutils
from neutron.common import utils as common_utils
from neutron.db import agents_db
from neutron.db import l3_dvr_db
from neutron.db import l3_dvrscheduler_db
@ -327,9 +328,9 @@ class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
check_for_fip_and_create_agent_gw_port_on_host_if_not_exists(
ctxt, port, 'host'))
if fip:
c_fip.assert_called_once_with(ctxt.elevated(),
fip['floating_network_id'],
'host')
c_fip.assert_called_once_with(
common_utils.get_elevated_context(ctxt),
fip['floating_network_id'], 'host')
else:
c_fip.assert_not_called()

View File

@ -25,6 +25,7 @@ from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.common import utils as common_utils
from neutron.db import l3_fip_pools_db
from neutron.extensions import l3
from neutron.objects import network as net_obj
@ -128,7 +129,7 @@ class FloatingIPPoolsDBIntTestCase(test_l3.L3BaseForIntTests,
self.setup_notification_driver()
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.admin_ctxt = self.ctxt.elevated()
self.admin_ctxt = common_utils.get_elevated_context(self.ctxt)
class FloatingIPPoolsDBSepTestCase(test_l3.L3BaseForSepTests,
@ -154,4 +155,4 @@ class FloatingIPPoolsDBSepTestCase(test_l3.L3BaseForSepTests,
self.setup_notification_driver()
self.plugin = directory.get_plugin(plugin_constants.L3)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.admin_ctxt = self.ctxt.elevated()
self.admin_ctxt = common_utils.get_elevated_context(self.ctxt)

View File

@ -18,6 +18,7 @@ from neutron_lib.services.qos import constants as qos_consts
from oslo_utils import uuidutils
from oslo_versionedobjects import exception
from neutron.common import utils as common_utils
from neutron.objects.db import api as db_api
from neutron.objects import network as net_obj
from neutron.objects import ports as port_obj
@ -145,7 +146,7 @@ class QosPolicyObjectTestCase(test_base.BaseObjectIfaceTestCase):
test_to_dict_makes_primitive_field_value())
def test_get_policy_obj_not_found(self):
context = self.context.elevated()
context = common_utils.get_elevated_context(self.context)
self.assertRaises(qos_exc.QosPolicyNotFound,
policy.QosPolicy.get_policy_obj,
context, "fake_id")