Revert "Set system_scope='all' in elevated context"

This reverts commit 062336e59b.

Now, we have proper fix for the system_scope='all' in elevated context
in the neutron-lib so we can revert temporary fix made at the end of the
Wallaby cycle.

Related-Bug: #1920001

Conflicts:
    neutron/api/rpc/agentnotifiers/dhcp_rpc_agent_api.py
    neutron/common/utils.py
    neutron/db/address_group_db.py
    neutron/services/segments/db.py

Change-Id: Ife9b647b403bdd76a8a99984ea8858bf95c96bc3
This commit is contained in:
Slawek Kaplonski 2021-04-26 22:10:34 +02:00
parent 02451f381b
commit d7371e13e4
29 changed files with 96 additions and 155 deletions

View File

@ -28,7 +28,6 @@ from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from neutron.common import utils as common_utils
# Priorities - lower value is higher priority
PRIORITY_NETWORK_CREATE = 0
@ -174,8 +173,7 @@ class DhcpAgentNotifyAPI(object):
num_ports = self.plugin.get_ports_count(
context, {'network_id': [network_id]})
if not network:
admin_ctx = (context if context.is_admin else
common_utils.get_elevated_context(context))
admin_ctx = context if context.is_admin else context.elevated()
network = self.plugin.get_network(admin_ctx, network_id)
notification_required = (
num_ports > 0 and len(network['subnets']) >= 1)
@ -224,8 +222,7 @@ class DhcpAgentNotifyAPI(object):
method == 'port_create_end' and
not self._is_reserved_dhcp_port(payload['port']))
if schedule_required:
admin_ctx = (context if context.is_admin else
common_utils.get_elevated_context(context))
admin_ctx = context if context.is_admin else context.elevated()
network = network or self.plugin.get_network(
admin_ctx, network_id)
if candidate_hosts:

View File

@ -25,7 +25,6 @@ from oslo_log import log as logging
import oslo_messaging
from neutron.api.rpc.agentnotifiers import utils as ag_utils
from neutron.common import utils as common_utils
LOG = logging.getLogger(__name__)
@ -56,9 +55,7 @@ class L3AgentNotifyAPI(object):
def _agent_notification(self, context, method, router_ids, operation,
shuffle_agents):
"""Notify changed routers to hosting l3 agents."""
adminContext = (
context if context.is_admin else
common_utils.get_elevated_context(context))
adminContext = context if context.is_admin else context.elevated()
plugin = directory.get_plugin(plugin_constants.L3)
for router_id in router_ids:
hosts = plugin.get_hosts_to_notify(adminContext, router_id)
@ -95,9 +92,8 @@ class L3AgentNotifyAPI(object):
return
if extensions.is_extension_supported(
plugin, constants.L3_AGENT_SCHEDULER_EXT_ALIAS):
adminContext = (
context.is_admin and
context or common_utils.get_elevated_context(context))
adminContext = (context.is_admin and
context or context.elevated())
if schedule_routers:
plugin.schedule_routers(adminContext, router_ids)
self._agent_notification(

View File

@ -21,7 +21,6 @@ from neutron_lib import rpc as n_rpc
from oslo_log import log as logging
import oslo_messaging
from neutron.common import utils as common_utils
from neutron.db import agentschedulers_db
LOG = logging.getLogger(__name__)
@ -37,9 +36,7 @@ class MeteringAgentNotifyAPI(object):
def _agent_notification(self, context, method, routers):
"""Notify l3 metering agents hosted by l3 agent hosts."""
adminContext = (
context if context.is_admin else
common_utils.get_elevated_context(context))
adminContext = context if context.is_admin else context.elevated()
plugin = directory.get_plugin(plugin_constants.L3)
l3_routers = {}

View File

@ -971,16 +971,6 @@ def with_metaclass(meta, *bases):
return metaclass('temporary_class', None, {})
def get_elevated_context(context):
admin_context = context.elevated()
# NOTE(slaweq): system_scope='all' is needed if new policies are
# enforced. This should be set in context.elevated() method in the
# neutron-lib but as a temporary workaround it is done here
if cfg.CONF.oslo_policy.enforce_new_defaults:
admin_context.system_scope = 'all'
return admin_context
def get_sql_random_method(sql_dialect_name):
"""Return the SQL random method supported depending on the dialect."""
# NOTE(ralonsoh): this method is a good candidate to be implemented in

View File

@ -19,7 +19,6 @@ from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.services.qos import constants as qos_consts
from neutron.common import utils as common_utils
from neutron.core_extensions import base
from neutron.objects.qos import policy as policy_object
@ -48,7 +47,7 @@ class QosCoreResourceExtension(base.CoreResourceExtension):
def _update_port_policy(self, context, port, port_changes):
old_policy = policy_object.QosPolicy.get_port_policy(
common_utils.get_elevated_context(context), port['id'])
context.elevated(), port['id'])
if old_policy:
self._check_policy_change_permission(context, old_policy)
old_policy.detach_port(port['id'])
@ -76,7 +75,7 @@ class QosCoreResourceExtension(base.CoreResourceExtension):
def _update_network_policy(self, context, network, network_changes):
old_policy = policy_object.QosPolicy.get_network_policy(
common_utils.get_elevated_context(context), network['id'])
context.elevated(), network['id'])
if old_policy:
self._check_policy_change_permission(context, old_policy)
old_policy.detach_network(network['id'])

View File

@ -20,7 +20,6 @@ from neutron_lib.db import utils as db_utils
from neutron_lib.exceptions import address_group as ag_exc
from oslo_utils import uuidutils
from neutron.common import utils as common_utils
from neutron.extensions import address_group as ag_ext
from neutron.objects import address_group as ag_obj
from neutron.objects import base as base_obj
@ -176,9 +175,8 @@ class AddressGroupDbMixin(ag_ext.AddressGroupPluginBase):
]
def delete_address_group(self, context, id):
if sg_obj.SecurityGroupRule.get_objects(
common_utils.get_elevated_context(context),
remote_address_group_id=id):
if sg_obj.SecurityGroupRule.get_objects(context.elevated(),
remote_address_group_id=id):
raise ag_exc.AddressGroupInUse(address_group_id=id)
ag = self._get_address_group(context, id)
ag.delete()

View File

@ -488,7 +488,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
_constants.AUTO_DELETE_PORT_OWNERS))]
for port_id in auto_delete_port_ids:
try:
self.delete_port(utils.get_elevated_context(context), port_id)
self.delete_port(context.elevated(), port_id)
except exc.PortNotFound:
# Don't raise if something else concurrently deleted the port
LOG.debug("Ignoring PortNotFound when deleting port '%s'. "
@ -716,7 +716,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
def _update_router_gw_port(self, context, router_id, network, subnet):
l3plugin = directory.get_plugin(plugin_constants.L3)
ctx_admin = utils.get_elevated_context(context)
ctx_admin = context.elevated()
ext_subnets_dict = {s['id']: s for s in network['subnets']}
router = l3plugin.get_router(ctx_admin, router_id)
external_gateway_info = router['external_gateway_info']
@ -1599,7 +1599,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
if device_id:
if hasattr(self, 'get_router'):
try:
ctx_admin = utils.get_elevated_context(context)
ctx_admin = context.elevated()
router = self.get_router(ctx_admin, device_id)
except l3_exc.RouterNotFound:
return
@ -1607,7 +1607,7 @@ class NeutronDbPluginV2(db_base_plugin_common.DbBasePluginCommon,
l3plugin = directory.get_plugin(plugin_constants.L3)
if l3plugin:
try:
ctx_admin = utils.get_elevated_context(context)
ctx_admin = context.elevated()
router = l3plugin.get_router(ctx_admin,
device_id)
except l3_exc.RouterNotFound:

View File

@ -29,7 +29,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.conf.db import extraroute_db
from neutron.db import l3_db
from neutron.objects import router as l3_obj
@ -93,7 +92,7 @@ class ExtraRoute_dbonly_mixin(l3_db.L3_NAT_dbonly_mixin):
router_id=router_id,
quota=cfg.CONF.max_routes)
context = common_utils.get_elevated_context(context)
context = context.elevated()
filters = {'device_id': [router_id]}
ports = self._core_plugin.get_ports(context, filters)
cidrs = []

View File

@ -322,19 +322,17 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# the current transaction.
context.GUARD_TRANSACTION = False
gw_port = plugin_utils.create_port(
self._core_plugin, utils.get_elevated_context(context),
{'port': port_data})
self._core_plugin, context.elevated(), {'port': port_data})
if not gw_port['fixed_ips']:
LOG.debug('No IPs available for external network %s',
network_id)
with plugin_utils.delete_port_on_error(
self._core_plugin, utils.get_elevated_context(context),
gw_port['id']):
self._core_plugin, context.elevated(), gw_port['id']):
with db_api.CONTEXT_WRITER.using(context):
router = self._get_router(context, router['id'])
router.gw_port = self._core_plugin._get_port(
utils.get_elevated_context(context), gw_port['id'])
context.elevated(), gw_port['id'])
router_port = l3_obj.RouterPort(
context,
router_id=router.id,
@ -377,7 +375,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
router.gw_port and router.gw_port['network_id'] != new_network_id)
if not port_requires_deletion:
return
admin_ctx = utils.get_elevated_context(context)
admin_ctx = context.elevated()
old_network_id = router.gw_port['network_id']
if self.router_gw_port_has_floating_ips(admin_ctx, router_id):
@ -466,9 +464,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
resource_id=router_id))
def _update_current_gw_port(self, context, router_id, router, ext_ips):
self._core_plugin.update_port(
utils.get_elevated_context(context), router.gw_port['id'],
{'port': {'fixed_ips': ext_ips}})
self._core_plugin.update_port(context.elevated(), router.gw_port['id'],
{'port': {'fixed_ips': ext_ips}})
def _update_router_gw_info(self, context, router_id, info, router=None):
router = router or self._get_router(context, router_id)
@ -537,9 +534,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
if context.session.is_active:
context.GUARD_TRANSACTION = False
for rp_id in router_ports_ids:
self._core_plugin.delete_port(
utils.get_elevated_context(context), rp_id,
l3_port_check=False)
self._core_plugin.delete_port(context.elevated(), rp_id,
l3_port_check=False)
router = self._get_router(context, id)
registry.notify(resources.ROUTER, events.PRECOMMIT_DELETE,
@ -592,7 +588,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
raise n_exc.BadRequest(resource='router', msg=msg)
if p.get('device_owner') == DEVICE_OWNER_ROUTER_GW:
ext_subts = self._core_plugin.get_subnets(
utils.get_elevated_context(context),
context.elevated(),
filters={'network_id': [p['network_id']]})
for sub in ext_subts:
router_subnets.append(sub['id'])
@ -603,8 +599,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
new_subnets = [s for s in new_subnets
if s['cidr'] != constants.PROVISIONAL_IPV6_PD_PREFIX]
id_filter = {'id': router_subnets}
subnets = self._core_plugin.get_subnets(
utils.get_elevated_context(context), filters=id_filter)
subnets = self._core_plugin.get_subnets(context.elevated(),
filters=id_filter)
for sub in subnets:
cidr = sub['cidr']
ipnet = netaddr.IPNetwork(cidr)
@ -1347,11 +1343,11 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# 'status' in port dict could not be updated by default, use
# check_allow_post to stop the verification of system
external_port = plugin_utils.create_port(
self._core_plugin, utils.get_elevated_context(context),
self._core_plugin, context.elevated(),
{'port': port}, check_allow_post=False)
with plugin_utils.delete_port_on_error(
self._core_plugin, utils.get_elevated_context(context),
self._core_plugin, context.elevated(),
external_port['id']),\
db_api.CONTEXT_WRITER.using(context):
# Ensure IPv4 addresses are allocated on external port
@ -1388,7 +1384,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
floatingip_db=floatingip_obj.db_obj)
self._core_plugin.update_port(
utils.get_elevated_context(context), external_port['id'],
context.elevated(), external_port['id'],
{'port': {'device_id': fip_id,
'project_id': fip['tenant_id']}})
registry.notify(resources.FLOATING_IP,
@ -1512,7 +1508,7 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
# floating IP record once the port is deleted. We can't start
# a transaction first to remove it ourselves because the delete_port
# method will yield in its post-commit activities.
self._core_plugin.delete_port(utils.get_elevated_context(context),
self._core_plugin.delete_port(context.elevated(),
floatingip.floating_port_id,
l3_port_check=False)
registry.notify(resources.FLOATING_IP, events.AFTER_DELETE,
@ -1601,8 +1597,8 @@ class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
"%(port_id)s no longer exists, allowing deletion.",
{'f_id': port['device_id'], 'port_id': port['id']})
return
elif not l3_obj.Router.objects_exist(
utils.get_elevated_context(context), id=port['device_id']):
elif not l3_obj.Router.objects_exist(context.elevated(),
id=port['device_id']):
LOG.debug("Router %(router_id)s corresponding to port "
"%(port_id)s no longer exists, allowing deletion.",
{'router_id': port['device_id'],

View File

@ -211,7 +211,7 @@ class DVRResourceOperationHandler(object):
if (old_router and old_router['distributed'] and not
router['distributed']):
self._core_plugin.delete_distributed_port_bindings_by_router_id(
n_utils.get_elevated_context(context), router_db['id'])
context.elevated(), router_db['id'])
@registry.receives(resources.ROUTER, [events.AFTER_UPDATE],
priority_group.PRIORITY_ROUTER_EXTENDED_ATTRIBUTE)
@ -224,7 +224,7 @@ class DVRResourceOperationHandler(object):
old_router = kwargs['old_router']
if old_router and old_router['distributed']:
self.delete_csnat_router_interface_ports(
n_utils.get_elevated_context(context), router_db)
context.elevated(), router_db)
@registry.receives(resources.ROUTER,
[events.AFTER_CREATE, events.AFTER_UPDATE],
@ -246,8 +246,8 @@ class DVRResourceOperationHandler(object):
not old_router.get(l3_apidef.EXTERNAL_GW_INFO))
if not do_create:
return
if not self._create_snat_intf_ports_if_not_exists(
n_utils.get_elevated_context(context), router_db):
if not self._create_snat_intf_ports_if_not_exists(context.elevated(),
router_db):
LOG.debug("SNAT interface ports not created: %s",
router_db['id'])
return router_db
@ -280,10 +280,9 @@ class DVRResourceOperationHandler(object):
msg = _("Unable to create the SNAT Interface Port")
raise n_exc.BadRequest(resource='router', msg=msg)
with plugin_utils.delete_port_on_error(
self.l3plugin._core_plugin,
n_utils.get_elevated_context(context),
snat_port['id']):
with plugin_utils.delete_port_on_error(self.l3plugin._core_plugin,
context.elevated(),
snat_port['id']):
l3_obj.RouterPort(
context,
port_id=snat_port['id'],
@ -358,8 +357,7 @@ class DVRResourceOperationHandler(object):
return
if not payload.metadata.get('new_network_id'):
self.delete_csnat_router_interface_ports(
n_utils.get_elevated_context(payload.context),
payload.latest_state)
payload.context.elevated(), payload.latest_state)
network_id = payload.metadata.get('network_id')
# NOTE(Swami): Delete the Floatingip agent gateway port
@ -368,11 +366,10 @@ class DVRResourceOperationHandler(object):
filters = {'network_id': [network_id],
'device_owner': [const.DEVICE_OWNER_ROUTER_GW]}
ext_net_gw_ports = self._core_plugin.get_ports(
n_utils.get_elevated_context(payload.context), filters)
payload.context.elevated(), filters)
if not ext_net_gw_ports:
self.delete_floatingip_agent_gateway_port(
n_utils.get_elevated_context(payload.context),
None, network_id)
payload.context.elevated(), None, network_id)
# Send the information to all the L3 Agent hosts
# to clean up the fip namespace as it is no longer required.
self.l3plugin.l3_rpc_notifier.delete_fipnamespace_for_ext_net(
@ -447,7 +444,7 @@ class DVRResourceOperationHandler(object):
floatingIP association happens.
"""
if association_event and router_id:
admin_ctx = n_utils.get_elevated_context(context)
admin_ctx = context.elevated()
router_dict = self.get_router(admin_ctx, router_id)
# Check if distributed router and then create the
# FloatingIP agent gateway port
@ -620,7 +617,7 @@ class DVRResourceOperationHandler(object):
if fixed_ips:
# multiple prefix port - delete prefix from port
self.l3plugin._core_plugin.update_port(
n_utils.get_elevated_context(context),
context.elevated(),
cs_port['id'], {'port': {'fixed_ips': fixed_ips}})
return True
return False
@ -711,8 +708,7 @@ class DVRResourceOperationHandler(object):
if not is_multiple_prefix_csport:
# Single prefix port - go ahead and delete the port
self.delete_csnat_router_interface_ports(
n_utils.get_elevated_context(context),
router, subnet_id=sub_id)
context.elevated(), router, subnet_id=sub_id)
@registry.receives(resources.ROUTER_INTERFACE, [events.AFTER_DELETE])
def _cleanup_after_interface_removal(self, resource, event, trigger,
@ -921,8 +917,7 @@ class _DVRAgentInterfaceMixin(object):
return []
filters = {'device_id': [fip_agent_id],
'device_owner': [const.DEVICE_OWNER_AGENT_GW]}
ports = self._core_plugin.get_ports(
n_utils.get_elevated_context(context), filters)
ports = self._core_plugin.get_ports(context.elevated(), filters)
LOG.debug("Return the FIP ports: %s ", ports)
return ports
@ -1080,7 +1075,7 @@ class _DVRAgentInterfaceMixin(object):
fip = fips[0]
network_id = fip.get('floating_network_id')
self.create_fip_agent_gw_port_if_not_exists(
n_utils.get_elevated_context(context), network_id, host)
context.elevated(), network_id, host)
def create_fip_agent_gw_port_if_not_exists(self, context, network_id,
host):
@ -1292,8 +1287,7 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
try:
# using admin context as router may belong to admin tenant
router = self._get_router(n_utils.get_elevated_context(context),
router_id)
router = self._get_router(context.elevated(), router_id)
except l3_exc.RouterNotFound:
LOG.warning("Router %s was not found. "
"Skipping agent notification.",
@ -1327,7 +1321,7 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
context, [router_id], dest_host)
else:
centralized_agent_list = self.list_l3_agents_hosting_router(
n_utils.get_elevated_context(context), router_id)['agents']
context.elevated(), router_id)['agents']
for agent in centralized_agent_list:
self.l3_rpc_notifier.routers_updated_on_host(
context, [router_id], agent['host'])
@ -1353,8 +1347,7 @@ class L3_NAT_with_dvr_db_mixin(_DVRAgentInterfaceMixin,
def is_router_distributed(self, context, router_id):
if router_id:
return is_distributed_router(
self.get_router(n_utils.get_elevated_context(context),
router_id))
self.get_router(context.elevated(), router_id))
return False
def get_ports_under_dvr_connected_subnet(self, context, subnet_id):

View File

@ -205,7 +205,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
if not n_utils.is_dvr_serviced(deleted_port['device_owner']):
return []
admin_context = n_utils.get_elevated_context(context)
admin_context = context.elevated()
port_host = deleted_port[portbindings.HOST_ID]
subnet_ids = [ip['subnet_id'] for ip in deleted_port['fixed_ips']]
router_ids = self.get_dvr_routers_by_subnet_ids(admin_context,
@ -280,7 +280,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
'device_owner':
[n_const.DEVICE_OWNER_DVR_INTERFACE]}
int_ports = self._core_plugin.get_ports(
n_utils.get_elevated_context(context), filters=filter_rtr)
context.elevated(), filters=filter_rtr)
for port in int_ports:
dvr_binding = (ml2_db.
get_distributed_port_binding_by_host(
@ -304,8 +304,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
"""Returns all hosts to send notification about router update"""
hosts = super(L3_DVRsch_db_mixin, self).get_hosts_to_notify(
context, router_id)
router = self.get_router(n_utils.get_elevated_context(context),
router_id)
router = self.get_router(context.elevated(), router_id)
if router.get('distributed', False):
dvr_hosts = self._get_dvr_hosts_for_router(context, router_id)
dvr_hosts = set(dvr_hosts) - set(hosts)
@ -399,8 +398,7 @@ class L3_DVRsch_db_mixin(l3agent_sch_db.L3AgentSchedulerDbMixin):
# TODO(slaweq): move this method to RouterPort OVO object
subnet_ids = self.get_subnet_ids_on_router(context, router_id)
RouterPort = l3_models.RouterPort
query = n_utils.get_elevated_context(context).session.query(
RouterPort.router_id)
query = context.elevated().session.query(RouterPort.router_id)
query = query.join(models_v2.Port)
query = query.join(
models_v2.Subnet,

View File

@ -18,7 +18,6 @@ from neutron_lib import constants as lib_const
from neutron_lib.db import utils as lib_db_utils
from neutron_lib.plugins import directory
from neutron.common import utils as common_utils
from neutron.extensions import floatingip_pools as fip_pools_ext
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
@ -50,7 +49,7 @@ class FloatingIPPoolsDbMixin(object):
# NOTE(hongbin): Use elevated context to make sure we have enough
# permission to retrieve subnets that are not in current tenant
# but belongs to external networks shared with current tenant.
admin_context = common_utils.get_elevated_context(context)
admin_context = context.elevated()
subnet_objs = subnet_obj.Subnet.get_objects(admin_context,
_pager=pager,
network_id=net_ids)

View File

@ -45,7 +45,6 @@ from sqlalchemy import exc as sql_exc
from sqlalchemy import orm
from neutron._i18n import _
from neutron.common import utils as n_utils
from neutron.conf.db import l3_hamode_db
from neutron.db import _utils as db_utils
from neutron.db.availability_zone import router as router_az_db
@ -214,7 +213,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
cfg.CONF.l3_ha_network_physical_name)
def _create_ha_network(self, context, tenant_id):
admin_ctx = n_utils.get_elevated_context(context)
admin_ctx = context.elevated()
args = {'network':
{'name': constants.HA_NETWORK_NAME % tenant_id,
@ -311,7 +310,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
return binding.db_obj
def _delete_ha_interfaces(self, context, router_id):
admin_ctx = n_utils.get_elevated_context(context)
admin_ctx = context.elevated()
device_filter = {'device_id': [router_id],
'device_owner':
[constants.DEVICE_OWNER_ROUTER_HA_INTF]}
@ -322,7 +321,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_port_check=False)
def delete_ha_interfaces_on_host(self, context, router_id, host):
admin_ctx = n_utils.get_elevated_context(context)
admin_ctx = context.elevated()
port_ids = (binding.port_id for binding
in self.get_ha_router_port_bindings(admin_ctx,
[router_id], host))
@ -483,7 +482,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
self._notify_router_updated(context, router_db.id)
def _delete_ha_network(self, context, net):
admin_ctx = n_utils.get_elevated_context(context)
admin_ctx = context.elevated()
self._core_plugin.delete_network(admin_ctx, net.network_id)
def safe_delete_ha_network(self, context, ha_network, tenant_id):
@ -693,7 +692,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
self._update_router_port_bindings(context, states, host)
def _update_router_port_bindings(self, context, states, host):
admin_ctx = n_utils.get_elevated_context(context)
admin_ctx = context.elevated()
device_filter = {'device_id': list(states.keys()),
'device_owner':
[constants.DEVICE_OWNER_HA_REPLICATED_INT,
@ -726,7 +725,7 @@ class L3_HA_NAT_db_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin,
gateway_port_status = gateway_port['status']
gateway_port_binding_host = gateway_port[portbindings.HOST_ID]
admin_ctx = n_utils.get_elevated_context(context)
admin_ctx = context.elevated()
router_id = router['id']
ha_bindings = self.get_l3_bindings_hosting_router_with_ha_states(
admin_ctx, router_id)

View File

@ -75,7 +75,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
@classmethod
def is_shared_with_tenant(cls, context, obj_id, tenant_id):
ctx = utils.get_elevated_context(context)
ctx = context.elevated()
with cls.db_context_reader(ctx):
return cls.get_shared_with_tenant(ctx, cls.rbac_db_cls,
obj_id, tenant_id)
@ -105,7 +105,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
@classmethod
def _validate_rbac_policy_delete(cls, context, obj_id, target_tenant):
ctx_admin = utils.get_elevated_context(context)
ctx_admin = context.elevated()
rb_model = cls.rbac_db_cls.db_model
bound_tenant_ids = cls.get_bound_tenant_ids(ctx_admin, obj_id)
db_obj_sharing_entries = cls._get_db_obj_rbac_entries(
@ -148,7 +148,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
return
target_tenant = policy['target_tenant']
db_obj = obj_db_api.get_object(
cls, utils.get_elevated_context(context), id=policy['object_id'])
cls, context.elevated(), id=policy['object_id'])
if db_obj.tenant_id == target_tenant:
return
cls._validate_rbac_policy_delete(context=context,
@ -200,7 +200,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
if object_type != cls.rbac_db_cls.db_model.object_type:
return
db_obj = obj_db_api.get_object(
cls, utils.get_elevated_context(context), id=policy['object_id'])
cls, context.elevated(), id=policy['object_id'])
if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE):
if (not context.is_admin and
db_obj['tenant_id'] != context.tenant_id):
@ -224,7 +224,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
return self.create_rbac_policy(self.obj_context, rbac_policy)
def update_shared(self, is_shared_new, obj_id):
admin_context = utils.get_elevated_context(self.obj_context)
admin_context = self.obj_context.elevated()
shared_prev = obj_db_api.get_object(self.rbac_db_cls, admin_context,
object_id=obj_id,
target_tenant='*',
@ -266,7 +266,7 @@ class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
# instantiated and without DB interaction (get_object(s), update,
# create), it should be rare case to load 'shared' by that method
shared = self.get_shared_with_tenant(
utils.get_elevated_context(self.obj_context),
self.obj_context.elevated(),
self.rbac_db_cls,
self.id,
self.project_id

View File

@ -22,7 +22,6 @@ from oslo_versionedobjects import fields as obj_fields
from sqlalchemy import and_, or_
from sqlalchemy.sql import exists
from neutron.common import utils as common_utils
from neutron.db.models import dns as dns_models
from neutron.db.models import segment as segment_model
from neutron.db.models import subnet_service_type
@ -274,11 +273,10 @@ class Subnet(base.NeutronDbObject):
# instantiated and without DB interaction (get_object(s), update,
# create), it should be rare case to load 'shared' by that method
shared = (rbac_db.RbacNeutronDbObjectMixin.
get_shared_with_tenant(
common_utils.get_elevated_context(self.obj_context),
network.NetworkRBAC,
self.network_id,
self.project_id))
get_shared_with_tenant(self.obj_context.elevated(),
network.NetworkRBAC,
self.network_id,
self.project_id))
setattr(self, 'shared', shared)
self.obj_reset_changes(['shared'])

View File

@ -20,7 +20,6 @@ from oslo_versionedobjects import fields as obj_fields
import sqlalchemy as sa
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.db import models_v2 as models
from neutron.db import rbac_db_models
from neutron.extensions import rbac as ext_rbac
@ -117,8 +116,7 @@ class SubnetPool(rbac_db.NeutronRbacObject):
policy = payload.request_body
db_obj = obj_db_api.get_object(
cls, common_utils.get_elevated_context(context),
id=policy['object_id'])
cls, context.elevated(), id=policy['object_id'])
if not db_obj["address_scope_id"]:
# Nothing to validate

View File

@ -28,7 +28,6 @@ from sqlalchemy import or_
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.db.models import securitygroup as sg_models
from neutron.db import models_v2
from neutron.objects import base as objects_base
@ -340,8 +339,7 @@ def _prevent_segment_delete_with_port_bound(resource, event, trigger,
plugin = directory.get_plugin()
for port_id in auto_delete_port_ids:
try:
plugin.delete_port(
common_utils.get_elevated_context(payload.context), port_id)
plugin.delete_port(payload.context.elevated(), port_id)
except nlib_exc.PortNotFound:
# Don't raise if something else concurrently deleted the port
LOG.debug("Ignoring PortNotFound when deleting port '%s'. "

View File

@ -24,7 +24,6 @@ from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import session as se
from neutron._i18n import _
from neutron.common import utils as n_utils
from neutron.conf import quota as quota_conf
from neutron.db.quota import api as quota_api
@ -350,7 +349,7 @@ class TrackedResource(BaseResource):
# TODO(ralonsoh): declare the OVO class instead the DB model and use
# ``NeutronDbObject.count`` with the needed filters and fields to
# retrieve ("project_id").
admin_context = n_utils.get_elevated_context(context)
admin_context = context.elevated()
with db_api.CONTEXT_READER.using(admin_context):
query = admin_context.session.query(self._model_class.project_id)
query = query.filter(self._model_class.project_id == project_id)

View File

@ -263,7 +263,7 @@ class L3Scheduler(object, metaclass=abc.ABCMeta):
def create_ha_port_and_bind(self, plugin, context, router_id,
tenant_id, agent, is_manual_scheduling=False):
"""Creates and binds a new HA port for this agent."""
ctxt = utils.get_elevated_context(context)
ctxt = context.elevated()
router_db = plugin._get_router(ctxt, router_id)
creator = functools.partial(self._add_port_from_net_and_ensure_vr_id,
plugin, ctxt, router_db, tenant_id)

View File

@ -33,7 +33,6 @@ from oslo_utils import excutils
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import extensions
from neutron.common.ovn import utils
from neutron.common import utils as common_utils
from neutron.db.availability_zone import router as router_az_db
from neutron.db import dns_db
from neutron.db import extraroute_db
@ -288,7 +287,7 @@ class OVNL3RouterPlugin(service_base.ServicePluginBase,
return fip
def disassociate_floatingips(self, context, port_id, do_notify=True):
fips = self.get_floatingips(common_utils.get_elevated_context(context),
fips = self.get_floatingips(context.elevated(),
filters={'port_id': [port_id]})
router_ids = super(OVNL3RouterPlugin, self).disassociate_floatingips(
context, port_id, do_notify)

View File

@ -142,9 +142,8 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase):
# dvr_no_external host to one dvr host. So we just do not allow
# all dvr router's floating IP to be binded to a port which
# already has port forwarding.
router = self.l3_plugin.get_router(
utils.get_elevated_context(payload.context),
pf_objs[0].router_id)
router = self.l3_plugin.get_router(payload.context.elevated(),
pf_objs[0].router_id)
if l3_dvr_db.is_distributed_router(router):
raise pf_exc.PortHasPortForwarding(port_id=port_id)
@ -211,7 +210,7 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase):
# context to check if the floatingip or port forwarding resources
# are owned by other tenants.
if not context.is_admin:
context = utils.get_elevated_context(context)
context = context.elevated()
# If the logic arrives here, that means we have got update_ip_set and
# its value is not None. So we need to get all port forwarding
# resources based on the request port_id for preparing the next
@ -331,7 +330,7 @@ class PortForwardingPlugin(fip_pf.PortForwardingPluginBase):
def _check_port_has_binding_floating_ip(self, context, port_forwarding):
port_id = port_forwarding['internal_port_id']
floatingip_objs = l3_obj.FloatingIP.get_objects(
utils.get_elevated_context(context),
context.elevated(),
fixed_port_id=port_id)
if floatingip_objs:
floating_ip_address = floatingip_objs[0].floating_ip_address

View File

@ -41,7 +41,6 @@ from oslo_config import cfg
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.db import db_base_plugin_common
from neutron.extensions import qos
from neutron.objects import base as base_obj
@ -251,7 +250,7 @@ class QoSPlugin(qos.QoSPluginBase):
return
policy = policy_object.QosPolicy.get_object(
common_utils.get_elevated_context(context), id=policy_id)
context.elevated(), id=policy_id)
self.validate_policy_for_port(context, policy, port)
def _check_port_for_placement_allocation_change(self, resource, event,
@ -270,10 +269,9 @@ class QoSPlugin(qos.QoSPluginBase):
if (nl_constants.DEVICE_OWNER_COMPUTE_PREFIX in
orig_port['device_owner']):
original_policy = policy_object.QosPolicy.get_object(
common_utils.get_elevated_context(context),
id=original_policy_id)
context.elevated(), id=original_policy_id)
policy = policy_object.QosPolicy.get_object(
common_utils.get_elevated_context(context), id=policy_id)
context.elevated(), id=policy_id)
self._change_placement_allocation(original_policy, policy,
orig_port)
@ -345,7 +343,7 @@ class QoSPlugin(qos.QoSPluginBase):
updated_port = ports_object.Port.get_object(
context, id=payload.desired_state['id'])
policy = policy_object.QosPolicy.get_object(
common_utils.get_elevated_context(context), id=policy_id)
context.elevated(), id=policy_id)
self.validate_policy_for_port(context, policy, updated_port)
@ -360,7 +358,7 @@ class QoSPlugin(qos.QoSPluginBase):
return
policy = policy_object.QosPolicy.get_object(
common_utils.get_elevated_context(context), id=policy_id)
context.elevated(), id=policy_id)
self.validate_policy_for_network(context, policy, network_id)
def _validate_update_network_callback(self, resource, event, trigger,
@ -376,7 +374,7 @@ class QoSPlugin(qos.QoSPluginBase):
return
policy = policy_object.QosPolicy.get_object(
common_utils.get_elevated_context(context), id=policy_id)
context.elevated(), id=policy_id)
self.validate_policy_for_network(
context, policy, network_id=updated_network['id'])

View File

@ -28,7 +28,6 @@ from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks.producer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import utils as common_utils
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk.rpc import constants
@ -83,7 +82,7 @@ class TrunkSkeleton(object):
@log_helpers.log_method_call
def update_subport_bindings(self, context, subports):
"""Update subport bindings to match trunk host binding."""
el = common_utils.get_elevated_context(context)
el = context.elevated()
ports_by_trunk_id = collections.defaultdict(list)
updated_ports = collections.defaultdict(list)

View File

@ -25,7 +25,6 @@ from neutron_lib.plugins.ml2 import api
from neutron_lib.services.trunk import constants
from neutron._i18n import _
from neutron.common import utils as common_utils
from neutron.objects import trunk as trunk_objects
from neutron.services.trunk import exceptions as trunk_exc
from neutron.services.trunk import utils
@ -213,8 +212,7 @@ class SubPortsValidator(object):
for p in ports:
network_port_map[p['network_id']].append({'port_id': p['id']})
networks = core_plugin.get_networks(
common_utils.get_elevated_context(context),
filters={'id': network_port_map})
context.elevated(), filters={'id': network_port_map})
subport_mtus = {}
for net in networks:

View File

@ -17,8 +17,6 @@ from neutron_lib.plugins import directory
from neutron_lib.utils import runtime
from oslo_config import cfg
from neutron.common import utils as common_utils
def get_agent_types_by_host(context, host):
"""Return the agent types registered on the host."""
@ -26,8 +24,7 @@ def get_agent_types_by_host(context, host):
core_plugin = directory.get_plugin()
if extensions.is_extension_supported(core_plugin, 'agent'):
agents = core_plugin.get_agents(
common_utils.get_elevated_context(context),
filters={'host': [host]})
context.elevated(), filters={'host': [host]})
agent_types = [a['agent_type'] for a in agents]
return agent_types

View File

@ -102,7 +102,7 @@ class RequestTestCase(base.BaseTestCase):
user_context = context.Context(
'fake_user', 'fake_project', is_admin=False)
self.assertFalse(user_context.is_admin)
admin_context = utils.get_elevated_context(user_context)
admin_context = user_context.elevated()
self.assertFalse(user_context.is_admin)
self.assertTrue(admin_context.is_admin)
self.assertNotIn('admin', user_context.roles)

View File

@ -30,7 +30,6 @@ from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as plugin_utils
from oslo_utils import uuidutils
from neutron.common import utils as common_utils
from neutron.db import agents_db
from neutron.db import l3_dvr_db
from neutron.db import l3_dvrscheduler_db
@ -328,9 +327,9 @@ class L3DvrTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
check_for_fip_and_create_agent_gw_port_on_host_if_not_exists(
ctxt, port, 'host'))
if fip:
c_fip.assert_called_once_with(
common_utils.get_elevated_context(ctxt),
fip['floating_network_id'], 'host')
c_fip.assert_called_once_with(ctxt.elevated(),
fip['floating_network_id'],
'host')
else:
c_fip.assert_not_called()

View File

@ -25,7 +25,6 @@ from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.common import utils as common_utils
from neutron.db import l3_fip_pools_db
from neutron.extensions import l3
from neutron.objects import network as net_obj
@ -129,7 +128,7 @@ class FloatingIPPoolsDBIntTestCase(test_l3.L3BaseForIntTests,
self.setup_notification_driver()
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.admin_ctxt = common_utils.get_elevated_context(self.ctxt)
self.admin_ctxt = self.ctxt.elevated()
class FloatingIPPoolsDBSepTestCase(test_l3.L3BaseForSepTests,
@ -155,4 +154,4 @@ class FloatingIPPoolsDBSepTestCase(test_l3.L3BaseForSepTests,
self.setup_notification_driver()
self.plugin = directory.get_plugin(plugin_constants.L3)
self.ctxt = context.Context('fake_user', 'fake_tenant')
self.admin_ctxt = common_utils.get_elevated_context(self.ctxt)
self.admin_ctxt = self.ctxt.elevated()

View File

@ -18,7 +18,6 @@ from neutron_lib.services.qos import constants as qos_consts
from oslo_utils import uuidutils
from oslo_versionedobjects import exception
from neutron.common import utils as common_utils
from neutron.objects.db import api as db_api
from neutron.objects import network as net_obj
from neutron.objects import ports as port_obj
@ -146,7 +145,7 @@ class QosPolicyObjectTestCase(test_base.BaseObjectIfaceTestCase):
test_to_dict_makes_primitive_field_value())
def test_get_policy_obj_not_found(self):
context = common_utils.get_elevated_context(self.context)
context = self.context.elevated()
self.assertRaises(qos_exc.QosPolicyNotFound,
policy.QosPolicy.get_policy_obj,
context, "fake_id")