neutron/neutron/objects/rbac_db.py
Ihar Hrachyshka 10ada71486 objects: expose database model for NeutronDbObject instances
Sometimes object users need access to corresponding models that are used
to persist object data. While it's not encouraged, and object consumers
should try to rely solely on object API and fields, we should fulfill
this special need, at least for now.

One of use cases to access the corresponding database model are
functions registered by plugins to extend core resources. Those
functions are passed into register_dict_extend_funcs and expect the
model as one of its arguments.

Later, when more objects are adopted in base plugin code, and we are
ready to switch extensions to objects, we can pass to those functions
some wrappers that would trigger deprecation warnings on attempts to
access attributes that are not available on objects; and then after a
while finally switch to passing objects directly instead of those
wrappers. Of course, that would not happen overnight, and the path would
take several cycles.

To avoid the stored reference to the model to influence other code
fetching from the session, we detach (expunge) the model from the active
database session on every fetch.  We also refresh the model before
detaching it when the corresponding object had synthetic fields changed,
because that's usually an indication that some relationships may be
stale on the model.

Since we now consistently detach the model from the active session on
each fetch, we cannot reuse it. So every time we hit update, we now need
to refetch the model from the session, otherwise we will hit an error
trying to refresh and/or detach an already detached model. Hence the
change in NeutronDbObject.update to always trigger update_object
irrespective to whether any persistent fields were changed. This makes
test_update_no_changes test case incorrect, hence its removal.

Due to the way RBAC metaclass works, it may trigger cls.get_object in
the middle of object creation (to validate newly created RBAC entry
against the object). It results in duplicate expunge calls for the same
object model (one during object creation, another when fetching the same
object to validate it for RBAC). To avoid that, switched RBAC code from
objects API to direct objects.db_api.get_object calls that will avoid
triggering the whole model expunge/refresh machinery.

Now that we have models stored on objects, the patch switched back
plugin code to passing models in places where we previously, by mistake,
were passing objects into extensions.

Specifically, the switch for allowed address pairs occurred with
I3c937267ce789ed510373616713b3fa9517c18ac. For subnetpools, it happened
in I1415c7a29af86d377ed31cce40888631a34d4811. Neither of those was
released in Mitaka, so it did not break anyone using major releases.
Also, we have not heard from any trunk chaser that would be affected by
the mistake.

There are not other objects used in database code where we would pass
them into extensions, so we should be good.

Closes-Bug: #1621837
Change-Id: I130609194f15b89df89e5606fb8193849edd14d8
Partially-Implements: blueprint adopt-oslo-versioned-objects-for-db
2016-09-10 03:53:17 +00:00

335 lines
14 KiB
Python

# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import itertools
from neutron_lib import exceptions as lib_exc
from six import add_metaclass
from six import with_metaclass
from sqlalchemy import and_
from neutron._i18n import _
from neutron.callbacks import events
from neutron.callbacks import registry
from neutron.common import exceptions as n_exc
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.db import rbac_db_mixin
from neutron.db import rbac_db_models as models
from neutron.extensions import rbac as ext_rbac
from neutron.objects import base
from neutron.objects.db import api as obj_db_api
@add_metaclass(abc.ABCMeta)
class RbacNeutronDbObjectMixin(rbac_db_mixin.RbacPluginMixin,
base.NeutronDbObject):
rbac_db_model = None
@classmethod
@abc.abstractmethod
def get_bound_tenant_ids(cls, context, obj_id):
"""Returns ids of all tenants depending on this db object.
Has to be implemented by classes using RbacNeutronMetaclass.
The tenants are the ones that need the sharing or 'visibility' of the
object to them. E.g: for QosPolicy that would be the tenants using the
Networks and Ports with the shared QosPolicy applied to them.
:returns: set -- a set of tenants' ids dependent on this object.
"""
@staticmethod
def is_network_shared(context, rbac_entries):
# NOTE(korzen) this method is copied from db_base_plugin_common.
# The shared attribute for a network now reflects if the network
# is shared to the calling tenant via an RBAC entry.
matches = ('*',) + ((context.tenant_id,) if context else ())
for entry in rbac_entries:
if (entry.action == models.ACCESS_SHARED and
entry.target_tenant in matches):
return True
return False
@staticmethod
def get_shared_with_tenant(context, rbac_db_model, obj_id, tenant_id):
# NOTE(korzen) This method enables to query within already started
# session
return (common_db_mixin.model_query(context, rbac_db_model).filter(
and_(rbac_db_model.object_id == obj_id,
rbac_db_model.action == models.ACCESS_SHARED,
rbac_db_model.target_tenant.in_(
['*', tenant_id]))).count() != 0)
@classmethod
def is_shared_with_tenant(cls, context, obj_id, tenant_id):
ctx = context.elevated()
rbac_db_model = cls.rbac_db_model
with ctx.session.begin(subtransactions=True):
return cls.get_shared_with_tenant(ctx, rbac_db_model,
obj_id, tenant_id)
@classmethod
def is_accessible(cls, context, db_obj):
return (super(
RbacNeutronDbObjectMixin, cls).is_accessible(context, db_obj) or
cls.is_shared_with_tenant(context, db_obj.id,
context.tenant_id))
@classmethod
def _get_db_obj_rbac_entries(cls, context, rbac_obj_id, rbac_action):
rbac_db_model = cls.rbac_db_model
return common_db_mixin.model_query(context, rbac_db_model).filter(
and_(rbac_db_model.object_id == rbac_obj_id,
rbac_db_model.action == rbac_action))
@classmethod
def _get_tenants_with_shared_access_to_db_obj(cls, context, obj_id):
return set(itertools.chain.from_iterable(context.session.query(
cls.rbac_db_model.target_tenant).filter(
and_(cls.rbac_db_model.object_id == obj_id,
cls.rbac_db_model.action == models.ACCESS_SHARED,
cls.rbac_db_model.target_tenant != '*'))))
@classmethod
def _validate_rbac_policy_delete(cls, context, obj_id, target_tenant):
ctx_admin = context.elevated()
rb_model = cls.rbac_db_model
bound_tenant_ids = cls.get_bound_tenant_ids(ctx_admin, obj_id)
db_obj_sharing_entries = cls._get_db_obj_rbac_entries(
ctx_admin, obj_id, models.ACCESS_SHARED)
def raise_policy_in_use():
raise ext_rbac.RbacPolicyInUse(
object_id=obj_id,
details='tenant_id={}'.format(target_tenant))
if target_tenant != '*':
# if there is a wildcard rule, we can return early because it
# shares the object globally
wildcard_sharing_entries = db_obj_sharing_entries.filter(
rb_model.target_tenant == '*')
if wildcard_sharing_entries.count():
return
if target_tenant in bound_tenant_ids:
raise_policy_in_use()
return
# for the wildcard we need to query all of the rbac entries to
# see if any allow the object sharing
other_target_tenants = cls._get_tenants_with_shared_access_to_db_obj(
ctx_admin, obj_id)
if not bound_tenant_ids.issubset(other_target_tenants):
raise_policy_in_use()
@classmethod
def validate_rbac_policy_delete(cls, resource, event, trigger, context,
object_type, policy, **kwargs):
"""Callback to handle RBAC_POLICY, BEFORE_DELETE callback.
:raises: RbacPolicyInUse -- in case the policy is in use.
"""
if policy['action'] != models.ACCESS_SHARED:
return
target_tenant = policy['target_tenant']
db_obj = obj_db_api.get_object(
context.elevated(), cls.db_model, id=policy['object_id'])
if db_obj.tenant_id == target_tenant:
return
cls._validate_rbac_policy_delete(context=context,
obj_id=policy['object_id'],
target_tenant=target_tenant)
@classmethod
def validate_rbac_policy_update(cls, resource, event, trigger, context,
object_type, policy, **kwargs):
"""Callback to handle RBAC_POLICY, BEFORE_UPDATE callback.
:raises: RbacPolicyInUse -- in case the update is forbidden.
"""
prev_tenant = policy['target_tenant']
new_tenant = kwargs['policy_update']['target_tenant']
if prev_tenant == new_tenant:
return
if new_tenant != '*':
return cls.validate_rbac_policy_delete(
resource, event, trigger, context, object_type, policy)
@classmethod
def validate_rbac_policy_change(cls, resource, event, trigger, context,
object_type, policy, **kwargs):
"""Callback to validate RBAC_POLICY changes.
This is the dispatching function for create, update and delete
callbacks. On creation and update, verify that the creator is an admin
or owns the resource being shared.
"""
# TODO(hdaniel): As this code was shamelessly stolen from
# NeutronDbPluginV2.validate_network_rbac_policy_change(), those pieces
# should be synced and contain the same bugs, until Network RBAC logic
# (hopefully) melded with this one.
if object_type != cls.rbac_db_model.object_type:
return
db_obj = obj_db_api.get_object(
context.elevated(), cls.db_model, id=policy['object_id'])
if event in (events.BEFORE_CREATE, events.BEFORE_UPDATE):
if (not context.is_admin and
db_obj['tenant_id'] != context.tenant_id):
msg = _("Only admins can manipulate policies on objects "
"they do not own")
raise lib_exc.InvalidInput(error_message=msg)
callback_map = {events.BEFORE_UPDATE: cls.validate_rbac_policy_update,
events.BEFORE_DELETE: cls.validate_rbac_policy_delete}
if event in callback_map:
return callback_map[event](resource, event, trigger, context,
object_type, policy, **kwargs)
def attach_rbac(self, obj_id, tenant_id, target_tenant='*'):
obj_type = self.rbac_db_model.object_type
rbac_policy = {'rbac_policy': {'object_id': obj_id,
'target_tenant': target_tenant,
'tenant_id': tenant_id,
'object_type': obj_type,
'action': models.ACCESS_SHARED}}
return self.create_rbac_policy(self.obj_context, rbac_policy)
def update_shared(self, is_shared_new, obj_id):
admin_context = self.obj_context.elevated()
shared_prev = obj_db_api.get_object(admin_context, self.rbac_db_model,
object_id=obj_id,
target_tenant='*',
action=models.ACCESS_SHARED)
is_shared_prev = bool(shared_prev)
if is_shared_prev == is_shared_new:
return
# 'shared' goes False -> True
if not is_shared_prev and is_shared_new:
self.attach_rbac(obj_id, self.obj_context.tenant_id)
return
# 'shared' goes True -> False is actually an attempt to delete
# rbac rule for sharing obj_id with target_tenant = '*'
self._validate_rbac_policy_delete(self.obj_context, obj_id, '*')
return self.obj_context.session.delete(shared_prev)
def _update_post(self, obj_changes):
if "shared" in obj_changes:
self.update_shared(self.shared, self.id)
def _update_hook(self, update_orig):
with db_api.autonested_transaction(self.obj_context.session):
# NOTE(slaweq): copy of object changes is required to pass it later to
# _update_post method because update() will reset all those changes
obj_changes = self.obj_get_changes()
update_orig(self)
_update_post(self, obj_changes)
def _create_post(self):
if self.shared:
self.attach_rbac(self.id, self.obj_context.tenant_id)
def _create_hook(self, orig_create):
with db_api.autonested_transaction(self.obj_context.session):
orig_create(self)
_create_post(self)
def _to_dict_hook(self, to_dict_orig):
dct = to_dict_orig(self)
dct['shared'] = self.is_shared_with_tenant(self.obj_context,
self.id,
self.obj_context.tenant_id)
return dct
class RbacNeutronMetaclass(type):
"""Adds support for RBAC in NeutronDbObjects.
Injects code for CRUD operations and modifies existing ops to do so.
"""
@classmethod
def _get_attribute(mcs, attribute_name, bases):
for b in bases:
attribute = getattr(b, attribute_name, None)
if attribute:
return attribute
@classmethod
def get_attribute(mcs, attribute_name, bases, dct):
return (dct.get(attribute_name, None) or
mcs._get_attribute(attribute_name, bases))
@classmethod
def update_synthetic_fields(mcs, bases, dct):
if not dct.get('synthetic_fields', None):
synthetic_attr = mcs.get_attribute('synthetic_fields', bases, dct)
dct['synthetic_fields'] = synthetic_attr or []
if 'shared' in dct['synthetic_fields']:
raise n_exc.ObjectActionError(
action=_('shared attribute switching to synthetic'),
reason=_('already a synthetic attribute'))
dct['synthetic_fields'].append('shared')
@staticmethod
def subscribe_to_rbac_events(class_instance):
for e in (events.BEFORE_CREATE, events.BEFORE_UPDATE,
events.BEFORE_DELETE):
registry.subscribe(class_instance.validate_rbac_policy_change,
rbac_db_mixin.RBAC_POLICY, e)
@staticmethod
def validate_existing_attrs(cls_name, dct):
if 'shared' not in dct['fields']:
raise KeyError(_('No shared key in %s fields') % cls_name)
if 'rbac_db_model' not in dct:
raise AttributeError(_('rbac_db_model not found in %s') % cls_name)
@staticmethod
def get_replaced_method(orig_method, new_method):
def func(self):
return new_method(self, orig_method)
return func
@classmethod
def replace_class_methods_with_hooks(mcs, bases, dct):
methods_replacement_map = {'create': _create_hook,
'update': _update_hook,
'to_dict': _to_dict_hook}
for orig_method_name, new_method in methods_replacement_map.items():
orig_method = mcs.get_attribute(orig_method_name, bases, dct)
hook_method = mcs.get_replaced_method(orig_method,
new_method)
dct[orig_method_name] = hook_method
def __new__(mcs, name, bases, dct):
mcs.validate_existing_attrs(name, dct)
mcs.update_synthetic_fields(bases, dct)
mcs.replace_class_methods_with_hooks(bases, dct)
cls = type(name, (RbacNeutronDbObjectMixin,) + bases, dct)
cls.add_extra_filter_name('shared')
mcs.subscribe_to_rbac_events(cls)
return cls
NeutronRbacObject = with_metaclass(RbacNeutronMetaclass, base.NeutronDbObject)