Use oslo policy

Adjust tacker/policy and context.py according to ones
used in neutron.

Change-Id: I93434ce78f87b6e59b5ef6367ea2596a80eb4676
Partial-bug: #1552282
This commit is contained in:
gong yong sheng 2016-06-11 11:38:25 +08:00
parent df7c0dad5b
commit 36aadfc8ea
7 changed files with 205 additions and 1012 deletions

View File

@ -31,6 +31,7 @@ oslo.db>=4.1.0 # Apache-2.0
oslo.log>=1.14.0 # Apache-2.0
oslo.messaging>=5.2.0 # Apache-2.0
oslo.middleware>=3.0.0 # Apache-2.0
oslo.policy>=1.9.0 # Apache-2.0
oslo.rootwrap>=2.0.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
oslo.service>=1.10.0 # Apache-2.0

View File

@ -26,6 +26,7 @@ from tacker.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
ATTRIBUTES_TO_UPDATE = 'attributes_to_update'
ATTR_NOT_SPECIFIED = object()
# Defining a constant to avoid repeating string literal in several modules
SHARED = 'shared'
@ -608,6 +609,8 @@ validators = {'type:dict': _validate_dict,
RESOURCE_ATTRIBUTE_MAP = {}
RESOURCE_FOREIGN_KEYS = {}
PLURALS = {'extensions': 'extension'}
EXT_NSES = {}

View File

@ -19,10 +19,10 @@ import copy
import datetime
from oslo_context import context as oslo_context
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from tacker.db import api as db_api
from tacker.openstack.common import local
from tacker import policy
@ -36,51 +36,32 @@ class ContextBase(oslo_context.RequestContext):
"""
def __init__(self, user_id, tenant_id, is_admin=None, read_deleted="no",
roles=None, timestamp=None, load_admin_roles=True,
request_id=None, tenant_name=None, user_name=None,
overwrite=True, auth_token=None, **kwargs):
def __init__(self, user_id, tenant_id, is_admin=None, roles=None,
timestamp=None, request_id=None, tenant_name=None,
user_name=None, overwrite=True, auth_token=None,
**kwargs):
"""Object initialization.
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
*only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
super(ContextBase, self).__init__(user=user_id, tenant=tenant_id,
super(ContextBase, self).__init__(auth_token=auth_token,
user=user_id, tenant=tenant_id,
is_admin=is_admin,
request_id=request_id,
auth_token=auth_token)
overwrite=overwrite,
roles=roles)
self.user_name = user_name
self.tenant_name = tenant_name
self.read_deleted = read_deleted
if not timestamp:
timestamp = datetime.datetime.utcnow()
self.timestamp = timestamp
self._session = None
self.roles = roles or []
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
elif self.is_admin and load_admin_roles:
# Ensure context is populated with admin roles
admin_roles = policy.get_admin_roles()
if admin_roles:
self.roles = list(set(self.roles) | set(admin_roles))
# Allow openstack.common.log to access the context
if overwrite or not hasattr(local.store, 'context'):
local.store.context = self
# Log only once the context has been configured to prevent
# format errors.
if kwargs:
LOG.debug(_('Arguments dropped when creating '
'context: %s'), kwargs)
@property
def project_id(self):
@ -102,75 +83,63 @@ class ContextBase(oslo_context.RequestContext):
def user_id(self, user_id):
self.user = user_id
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
return {'user_id': self.user_id,
'tenant_id': self.tenant_id,
'project_id': self.project_id,
'is_admin': self.is_admin,
'read_deleted': self.read_deleted,
'roles': self.roles,
'timestamp': str(self.timestamp),
'request_id': self.request_id,
'tenant': self.tenant,
'user': self.user,
'tenant_name': self.tenant_name,
'project_name': self.tenant_name,
'user_name': self.user_name,
'auth_token': self.auth_token,
}
context = super(ContextBase, self).to_dict()
context.update({
'user_id': self.user_id,
'tenant_id': self.tenant_id,
'project_id': self.project_id,
'timestamp': str(self.timestamp),
'tenant_name': self.tenant_name,
'project_name': self.tenant_name,
'user_name': self.user_name,
})
return context
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None):
def elevated(self):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in [x.lower() for x in context.roles]:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
context.roles = context.roles + ["admin"]
return context
class Context(ContextBase):
@enginefacade.transaction_context_provider
class ContextBaseWithSession(ContextBase):
pass
class Context(ContextBaseWithSession):
def __init__(self, *args, **kwargs):
super(Context, self).__init__(*args, **kwargs)
self._session = None
@property
def session(self):
# TODO(akamyshnikova): checking for session attribute won't be needed
# when reader and writer will be used
if hasattr(super(Context, self), 'session'):
return super(Context, self).session
if self._session is None:
self._session = db_api.get_session()
return self._session
def get_admin_context(read_deleted="no", load_admin_roles=True):
def get_admin_context():
return Context(user_id=None,
tenant_id=None,
is_admin=True,
read_deleted=read_deleted,
load_admin_roles=load_admin_roles,
overwrite=False)
def get_admin_context_without_session(read_deleted="no"):
def get_admin_context_without_session():
return ContextBase(user_id=None,
tenant_id=None,
is_admin=True,
read_deleted=read_deleted)
is_admin=True)

View File

@ -1,778 +0,0 @@
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Common Policy Engine Implementation
Policies can be expressed in one of two forms: A list of lists, or a
string written in the new policy language.
In the list-of-lists representation, each check inside the innermost
list is combined as with an "and" conjunction--for that check to pass,
all the specified checks must pass. These innermost lists are then
combined as with an "or" conjunction. This is the original way of
expressing policies, but there now exists a new way: the policy
language.
In the policy language, each check is specified the same way as in the
list-of-lists representation: a simple "a:b" pair that is matched to
the correct code to perform that check. However, conjunction
operators are available, allowing for more expressiveness in crafting
policies.
As an example, take the following rule, expressed in the list-of-lists
representation::
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
In the policy language, this becomes::
role:admin or (project_id:%(project_id)s and role:projectadmin)
The policy language also has the "not" operator, allowing a richer
policy rule::
project_id:%(project_id)s and not role:dunce
Finally, two special policy checks should be mentioned; the policy
check "@" will always accept an access, and the policy check "!" will
always reject an access. (Note that if a rule is either the empty
list ("[]") or the empty string, this is equivalent to the "@" policy
check.) Of these, the "!" policy check is probably the most useful,
as it allows particular rules to be explicitly disabled.
"""
import abc
import re
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from six.moves.urllib import parse as urllib_parse
from six.moves.urllib import request as urlrequest
from tacker.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
_rules = None
_checks = {}
class Rules(dict):
"""
A store for rules. Handles the default_rule setting directly.
"""
@classmethod
def load_json(cls, data, default_rule=None):
"""
Allow loading of JSON rule data.
"""
# Suck in the JSON data and parse the rules
rules = dict((k, parse_rule(v)) for k, v in
jsonutils.loads(data).items())
return cls(rules, default_rule)
def __init__(self, rules=None, default_rule=None):
"""Initialize the Rules store."""
super(Rules, self).__init__(rules or {})
self.default_rule = default_rule
def __missing__(self, key):
"""Implements the default rule handling."""
# If the default rule isn't actually defined, do something
# reasonably intelligent
if not self.default_rule or self.default_rule not in self:
raise KeyError(key)
return self[self.default_rule]
def __str__(self):
"""Dumps a string representation of the rules."""
# Start by building the canonical strings for the rules
out_rules = {}
for key, value in self.items():
# Use empty string for singleton TrueCheck instances
if isinstance(value, TrueCheck):
out_rules[key] = ''
else:
out_rules[key] = str(value)
# Dump a pretty-printed JSON representation
return jsonutils.dumps(out_rules, indent=4)
# Really have to figure out a way to deprecate this
def set_rules(rules):
"""Set the rules in use for policy checks."""
global _rules
_rules = rules
# Ditto
def reset():
"""Clear the rules used for policy checks."""
global _rules
_rules = None
def check(rule, target, creds, exc=None, *args, **kwargs):
"""
Checks authorization of a rule against the target and credentials.
:param rule: The rule to evaluate.
:param target: As much information about the object being operated
on as possible, as a dictionary.
:param creds: As much information about the user performing the
action as possible, as a dictionary.
:param exc: Class of the exception to raise if the check fails.
Any remaining arguments passed to check() (both
positional and keyword arguments) will be passed to
the exception class. If exc is not provided, returns
False.
:return: Returns False if the policy does not allow the action and
exc is not provided; otherwise, returns a value that
evaluates to True. Note: for rules using the "case"
expression, this True value will be the specified string
from the expression.
"""
# Allow the rule to be a Check tree
if isinstance(rule, BaseCheck):
result = rule(target, creds)
elif not _rules:
# No rules to reference means we're going to fail closed
result = False
else:
try:
# Evaluate the rule
result = _rules[rule](target, creds)
except KeyError:
# If the rule doesn't exist, fail closed
result = False
# If it is False, raise the exception if requested
if exc and result is False:
raise exc(*args, **kwargs)
return result
class BaseCheck(object):
"""
Abstract base class for Check classes.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __str__(self):
"""
Retrieve a string representation of the Check tree rooted at
this node.
"""
pass
@abc.abstractmethod
def __call__(self, target, cred):
"""
Perform the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""
A policy check that always returns False (disallow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "!"
def __call__(self, target, cred):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""
A policy check that always returns True (allow).
"""
def __str__(self):
"""Return a string representation of this check."""
return "@"
def __call__(self, target, cred):
"""Check the policy."""
return True
class Check(BaseCheck):
"""
A base class to allow for user-defined policy checks.
"""
def __init__(self, kind, match):
"""
:param kind: The kind of the check, i.e., the field before the
':'.
:param match: The match of the check, i.e., the field after
the ':'.
"""
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return "%s:%s" % (self.kind, self.match)
class NotCheck(BaseCheck):
"""
A policy check that inverts the result of another policy check.
Implements the "not" operator.
"""
def __init__(self, rule):
"""
Initialize the 'not' check.
:param rule: The rule to negate. Must be a Check.
"""
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return "not %s" % self.rule
def __call__(self, target, cred):
"""
Check the policy. Returns the logical inverse of the wrapped
check.
"""
return not self.rule(target, cred)
class AndCheck(BaseCheck):
"""
A policy check that requires that a list of other checks all
return True. Implements the "and" operator.
"""
def __init__(self, rules):
"""
Initialize the 'and' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that all rules accept in order to
return True.
"""
for rule in self.rules:
if not rule(target, cred):
return False
return True
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the AndCheck object for convenience.
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""
A policy check that requires that at least one of a list of other
checks returns True. Implements the "or" operator.
"""
def __init__(self, rules):
"""
Initialize the 'or' check.
:param rules: A list of rules that will be tested.
"""
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return "(%s)" % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred):
"""
Check the policy. Requires that at least one rule accept in
order to return True.
"""
for rule in self.rules:
if rule(target, cred):
return True
return False
def add_check(self, rule):
"""
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def _parse_check(rule):
"""
Parse a single base check rule into an appropriate Check object.
"""
# Handle the special checks
if rule == '!':
return FalseCheck()
elif rule == '@':
return TrueCheck()
try:
kind, match = rule.split(':', 1)
except Exception:
LOG.exception(_("Failed to understand rule %(rule)s"), locals())
# If the rule is invalid, we'll fail closed
return FalseCheck()
# Find what implements the check
if kind in _checks:
return _checks[kind](kind, match)
elif None in _checks:
return _checks[None](kind, match)
else:
LOG.error(_("No handler for matches of kind %s"), kind)
return FalseCheck()
def _parse_list_rule(rule):
"""
Provided for backwards compatibility. Translates the old
list-of-lists syntax into a tree of Check objects.
"""
# Empty rule defaults to True
if not rule:
return TrueCheck()
# Outer list is joined by "or"; inner list by "and"
or_list = []
for inner_rule in rule:
# Elide empty inner lists
if not inner_rule:
continue
# Handle bare strings
if isinstance(inner_rule, basestring):
inner_rule = [inner_rule]
# Parse the inner rules into Check objects
and_list = [_parse_check(r) for r in inner_rule]
# Append the appropriate check to the or_list
if len(and_list) == 1:
or_list.append(and_list[0])
else:
or_list.append(AndCheck(and_list))
# If we have only one check, omit the "or"
if not or_list:
return FalseCheck()
elif len(or_list) == 1:
return or_list[0]
return OrCheck(or_list)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\s+')
def _parse_tokenize(rule):
"""
Tokenizer for the policy language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
for tok in _tokenize_re.split(rule):
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in ('and', 'or', 'not'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'check', _parse_check(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
class ParseStateMeta(type):
"""
Metaclass for the ParseState class. Facilitates identifying
reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""
Create the class. Injects the 'reducers' list, a list of
tuples matching token sequences to the names of the
corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
def reducer(*tokens):
"""
Decorator for reduction methods. Arguments are a sequence of
tokens, in order, which should trigger running this reduction
method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseState(object):
"""
Implement the core of parsing the policy language. Uses a greedy
reduction algorithm to reduce a sequence of tokens into a single
terminal, the value of which will be the root of the Check tree.
Note: error reporting is rather lacking. The best we can get with
this parser formulation is an overall "parse failed" error.
Fortunately, the policy language is simple enough that this
shouldn't be that big a problem.
"""
__metaclass__ = ParseStateMeta
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""
Perform a greedy reduction of the token stream. If a reducer
method matches, it will be executed, then the reduce() method
will be called recursively to search for any more possible
reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state. Calls reduce()."""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""
Obtain the final result of the parse. Raises ValueError if
the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError("Could not parse rule")
return self.values[0]
@reducer('(', 'check', ')')
@reducer('(', 'and_expr', ')')
@reducer('(', 'or_expr', ')')
def _wrap_check(self, _p1, check, _p2):
"""Turn parenthesized expressions into a 'check' token."""
return [('check', check)]
@reducer('check', 'and', 'check')
def _make_and_expr(self, check1, _and, check2):
"""
Create an 'and_expr' from two checks joined by the 'and'
operator.
"""
return [('and_expr', AndCheck([check1, check2]))]
@reducer('and_expr', 'and', 'check')
def _extend_and_expr(self, and_expr, _and, check):
"""
Extend an 'and_expr' by adding one more check.
"""
return [('and_expr', and_expr.add_check(check))]
@reducer('check', 'or', 'check')
def _make_or_expr(self, check1, _or, check2):
"""
Create an 'or_expr' from two checks joined by the 'or'
operator.
"""
return [('or_expr', OrCheck([check1, check2]))]
@reducer('or_expr', 'or', 'check')
def _extend_or_expr(self, or_expr, _or, check):
"""
Extend an 'or_expr' by adding one more check.
"""
return [('or_expr', or_expr.add_check(check))]
@reducer('not', 'check')
def _make_not_expr(self, _not, check):
"""Invert the result of another check."""
return [('check', NotCheck(check))]
def _parse_text_rule(rule):
"""
Translates a policy written in the policy language into a tree of
Check objects.
"""
# Empty rule means always accept
if not rule:
return TrueCheck()
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(rule):
state.shift(tok, value)
try:
return state.result
except ValueError:
# Couldn't parse the rule
LOG.exception(_("Failed to understand rule %(rule)r"), locals())
# Fail closed
return FalseCheck()
def parse_rule(rule):
"""
Parses a policy rule into a tree of Check objects.
"""
# If the rule is a string, it's in the policy language
if isinstance(rule, basestring):
return _parse_text_rule(rule)
return _parse_list_rule(rule)
def register(name, func=None):
"""
Register a function or Check class as a policy check.
:param name: Gives the name of the check type, e.g., 'rule',
'role', etc. If name is None, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register("rule")
class RuleCheck(Check):
def __call__(self, target, creds):
"""
Recursively checks credentials based on the defined rules.
"""
try:
return _rules[self.match](target, creds)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register("role")
class RoleCheck(Check):
def __call__(self, target, creds):
"""Check that there is a matching role in the cred dict."""
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
def __call__(self, target, creds):
"""
Check http: rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly 'True'.
"""
url = ('http:' + self.match) % target
data = {'target': jsonutils.dumps(target),
'credentials': jsonutils.dumps(creds)}
post_data = urllib_parse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == "True"
@register(None)
class GenericCheck(Check):
def __call__(self, target, creds):
"""
Check an individual match.
Matches look like:
tenant:%(tenant_id)s
role:compute:admin
"""
# TODO(termie): do dict inspection via dot syntax
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
return False

View File

@ -13,133 +13,114 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy engine for tacker. Largely copied from nova.
"""
import itertools
import collections
import re
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_policy import policy
from oslo_utils import excutils
from oslo_utils import importutils
import six
from tacker.i18n import _, _LE, _LW
from tacker.api.v1 import attributes
from tacker.common import exceptions
import tacker.common.utils as utils
from tacker.openstack.common import policy
LOG = logging.getLogger(__name__)
_POLICY_PATH = None
_POLICY_CACHE = {}
ADMIN_CTX_POLICY = 'context_is_admin'
# Maps deprecated 'extension' policies to new-style policies
DEPRECATED_POLICY_MAP = {
'extension:provider_network':
['network:provider:network_type',
'network:provider:physical_network',
'network:provider:segmentation_id'],
'extension:router':
['network:router:external'],
'extension:port_binding':
['port:binding:vif_type', 'port:binding:vif_details',
'port:binding:profile', 'port:binding:host_id']
}
DEPRECATED_ACTION_MAP = {
'view': ['get'],
'set': ['create', 'update']
}
cfg.CONF.import_opt('policy_file', 'tacker.common.config')
_ENFORCER = None
ADMIN_CTX_POLICY = 'context_is_admin'
def reset():
global _POLICY_PATH
global _POLICY_CACHE
_POLICY_PATH = None
_POLICY_CACHE = {}
policy.reset()
global _ENFORCER
if _ENFORCER:
_ENFORCER.clear()
_ENFORCER = None
def init():
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
_POLICY_PATH = utils.find_config_file({}, cfg.CONF.policy_file)
if not _POLICY_PATH:
raise exceptions.PolicyFileNotFound(path=cfg.CONF.policy_file)
# pass _set_brain to read_cached_file so that the policy brain
# is reset only if the file has changed
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_rules)
def init(conf=cfg.CONF, policy_file=None):
"""Init an instance of the Enforcer class."""
global _ENFORCER
if not _ENFORCER:
_ENFORCER = policy.Enforcer(conf, policy_file=policy_file)
_ENFORCER.load_rules(True)
def get_resource_and_action(action):
"""Extract resource and action (write, read) from api operation."""
def refresh(policy_file=None):
"""Reset policy and init a new instance of Enforcer."""
reset()
init(policy_file=policy_file)
def get_resource_and_action(action, pluralized=None):
"""Return resource and enforce_attr_based_check(boolean).
It is per resource and action extracted from api operation.
"""
data = action.split(':', 1)[0].split('_', 1)
return ("%ss" % data[-1], data[0] != 'get')
resource = pluralized or ("%ss" % data[-1])
enforce_attr_based_check = data[0] not in ('get', 'delete')
return (resource, enforce_attr_based_check)
def _set_rules(data):
default_rule = 'default'
LOG.debug(_("Loading policies from file: %s"), _POLICY_PATH)
# Ensure backward compatibility with folsom/grizzly convention
# for extension rules
policies = policy.Rules.load_json(data, default_rule)
for pol in policies.keys():
if any([pol.startswith(depr_pol) for depr_pol in
DEPRECATED_POLICY_MAP.keys()]):
LOG.warning(_("Found deprecated policy rule:%s. Please consider "
"upgrading your policy configuration file"), pol)
pol_name, action = pol.rsplit(':', 1)
try:
new_actions = DEPRECATED_ACTION_MAP[action]
new_policies = DEPRECATED_POLICY_MAP[pol_name]
# bind new actions and policies together
for actual_policy in ['_'.join(item) for item in
itertools.product(new_actions,
new_policies)]:
if actual_policy not in policies:
# New policy, same rule
LOG.info(_("Inserting policy:%(new_policy)s in place "
"of deprecated policy:%(old_policy)s"),
{'new_policy': actual_policy,
'old_policy': pol})
policies[actual_policy] = policies[pol]
# Remove old-style policy
del policies[pol]
except KeyError:
LOG.error(_("Backward compatibility unavailable for "
"deprecated policy %s. The policy will "
"not be enforced"), pol)
policy.set_rules(policies)
def set_rules(policies, overwrite=True):
"""Set rules based on the provided dict of rules.
:param policies: New policies to use. It should be an instance of dict.
:param overwrite: Whether to overwrite current rules or update them
with the new rules.
"""
LOG.debug("Loading policies from file: %s", _ENFORCER.policy_path)
init()
_ENFORCER.set_rules(policies, overwrite)
def _is_attribute_explicitly_set(attribute_name, resource, target):
"""Verify that an attribute is present and has a non-default value."""
def _is_attribute_explicitly_set(attribute_name, resource, target, action):
"""Verify that an attribute is present and is explicitly set."""
if 'update' in action:
# In the case of update, the function should not pay attention to a
# default value of an attribute, but check whether it was explicitly
# marked as being updated instead.
return (attribute_name in target[attributes.ATTRIBUTES_TO_UPDATE] and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED)
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _should_validate_sub_attributes(attribute, sub_attr):
"""Verify that sub-attributes are iterable and should be validated."""
validate = attribute.get('validate')
return (validate and isinstance(sub_attr, collections.Iterable) and
any([k.startswith('type:dict') and
v for (k, v) in six.iteritems(validate)]))
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = filter(lambda k: k.startswith('type:dict'), validate.keys())
key = list(filter(lambda k: k.startswith('type:dict'), validate.keys()))
if not key:
LOG.warning(_("Unable to find data type descriptor for attribute %s"),
LOG.warning(_LW("Unable to find data type descriptor "
"for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug(_("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s."),
LOG.debug("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s.",
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
@ -150,7 +131,17 @@ def _build_subattr_match_rule(attr_name, attr, action, target):
return policy.AndCheck(sub_attr_rules)
def _build_match_rule(action, target):
def _process_rules_list(rules, match_rule):
"""Recursively walk a policy rule to extract a list of match entries."""
if isinstance(match_rule, policy.RuleCheck):
rules.append(match_rule.match)
elif isinstance(match_rule, policy.AndCheck):
for rule in match_rule.rules:
_process_rules_list(rules, rule)
return rules
def _build_match_rule(action, target, pluralized):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
@ -163,25 +154,23 @@ def _build_match_rule(action, target):
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action)
# Attribute-based checks shall not be enforced on GETs
if is_write:
resource, enforce_attr_based_check = get_resource_and_action(
action, pluralized)
if enforce_attr_based_check:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target):
target, action):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes, if present
validate = attribute.get('validate')
if (validate and any([k.startswith('type:dict') and v
for (k, v) in
six.iteritems(validate)])):
# Build match entries for sub-attributes
if _should_validate_sub_attributes(
attribute, target[attribute_name]):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
@ -208,11 +197,11 @@ class OwnerCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall('^\%\((.*)\)s$',
self.target_field = re.findall(r'^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s."
"match should be in the form %%(<field_name>)s") %
err_reason = (_("Unable to identify a target field from:%s. "
"Match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
@ -220,7 +209,7 @@ class OwnerCheck(policy.Check):
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds):
def __call__(self, target, creds, enforcer):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
@ -237,13 +226,13 @@ class OwnerCheck(policy.Check):
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug(_("Unable to find ':' as separator in %s."),
LOG.debug("Unable to find ':' as separator in %s.",
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
LOG.error(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
@ -253,7 +242,7 @@ class OwnerCheck(policy.Check):
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
LOG.error(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
@ -261,9 +250,9 @@ class OwnerCheck(policy.Check):
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
# FIXME(ihrachys): if import is put in global, circular
# NOTE(ihrachys): if import is put in global, circular
# import failure occurs
from tacker import manager
manager = importutils.import_module('tacker.manager')
f = getattr(manager.TackerManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let tacker
@ -274,9 +263,17 @@ class OwnerCheck(policy.Check):
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except exceptions.NotFound as e:
# NOTE(kevinbenton): a NotFound exception can occur if a
# list operation is happening at the same time as one of
# the parents and its children being deleted. So we issue
# a RetryRequest so the API will redo the lookup and the
# problem items will be gone.
raise db_exc.RetryRequest(e)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Policy check error while calling %s!'), f)
LOG.exception(_LE('Policy check error while calling %s!'),
f)
match = self.match % target
if self.kind in creds:
return match == six.text_type(creds[self.kind])
@ -302,30 +299,39 @@ class FieldCheck(policy.Check):
self.field = field
self.value = conv_func(value)
self.regex = re.compile(value[1:]) if value.startswith('~') else None
def __call__(self, target_dict, cred_dict):
def __call__(self, target_dict, cred_dict, enforcer):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug(_("Unable to find requested field: %(field)s in "
"target: %(target_dict)s"),
{'field': self.field,
'target_dict': target_dict})
LOG.debug("Unable to find requested field: %(field)s in target: "
"%(target_dict)s",
{'field': self.field, 'target_dict': target_dict})
return False
if self.regex:
return bool(self.regex.match(target_value))
return target_value == self.value
def _prepare_check(context, action, target):
def _prepare_check(context, action, target, pluralized):
"""Prepare rule, target, and credentials for the policy engine."""
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target)
match_rule = _build_match_rule(action, target, pluralized)
credentials = context.to_dict()
return match_rule, target, credentials
def check(context, action, target, plugin=None, might_not_exist=False):
def log_rule_list(match_rule):
if LOG.isEnabledFor(logging.DEBUG):
rules = _process_rules_list([], match_rule)
LOG.debug("Enforcing rules: %s", rules)
def check(context, action, target, plugin=None, might_not_exist=False,
pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: tacker context
@ -339,15 +345,32 @@ def check(context, action, target, plugin=None, might_not_exist=False):
:param might_not_exist: If True the policy check is skipped (and the
function returns True) if the specified policy does not exist.
Defaults to false.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:return: Returns True if access is permitted else False.
"""
if might_not_exist and not (policy._rules and action in policy._rules):
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
return policy.check(*(_prepare_check(context, action, target)))
if might_not_exist and not (_ENFORCER.rules and action in _ENFORCER.rules):
return True
match_rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
result = _ENFORCER.enforce(match_rule,
target,
credentials,
pluralized=pluralized)
# logging applied rules in case of failure
if not result:
log_rule_list(match_rule)
return result
def enforce(context, action, target, plugin=None):
def enforce(context, action, target, plugin=None, pluralized=None):
"""Verifies that the action is valid on the target in this context.
:param context: tacker context
@ -358,15 +381,27 @@ def enforce(context, action, target, plugin=None):
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:param pluralized: pluralized case of resource
e.g. firewall_policy -> pluralized = "firewall_policies"
:raises tacker.exceptions.PolicyNotAuthorized: if verification fails.
:raises oslo_policy.policy.PolicyNotAuthorized:
if verification fails.
"""
rule, target, credentials = _prepare_check(context, action, target)
result = policy.check(rule, target, credentials, action=action)
if not result:
LOG.debug(_("Failed policy check for '%s'"), action)
raise exceptions.PolicyNotAuthorized(action=action)
# If we already know the context has admin rights do not perform an
# additional check and authorize the operation
if context.is_admin:
return True
rule, target, credentials = _prepare_check(context,
action,
target,
pluralized)
try:
result = _ENFORCER.enforce(rule, target, credentials, action=action,
do_raise=True)
except policy.PolicyNotAuthorized:
with excutils.save_and_reraise_exception():
log_rule_list(rule)
LOG.debug("Failed policy check for '%s'", action)
return result
@ -375,43 +410,6 @@ def check_is_admin(context):
init()
# the target is user-self
credentials = context.to_dict()
target = credentials
# Backward compatibility: if ADMIN_CTX_POLICY is not
# found, default to validating role:admin
admin_policy = (ADMIN_CTX_POLICY in policy._rules
and ADMIN_CTX_POLICY or 'role:admin')
return policy.check(admin_policy, target, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(policy._rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
def get_admin_roles():
"""Get Admin roles.
Return a list of roles which are granted admin rights according
to policy settings.
"""
# NOTE(salvatore-orlando): This function provides a solution for
# populating implicit contexts with the appropriate roles so that
# they correctly pass policy checks, and will become superseded
# once all explicit policy checks are removed from db logic and
# plugin modules. For backward compatibility it returns the literal
# admin if ADMIN_CTX_POLICY is not defined
init()
if not policy._rules or ADMIN_CTX_POLICY not in policy._rules:
return ['admin']
try:
admin_ctx_rule = policy._rules[ADMIN_CTX_POLICY]
except (KeyError, TypeError):
return
roles = []
_extract_roles(admin_ctx_rule, roles)
return roles
if ADMIN_CTX_POLICY not in _ENFORCER.rules:
return False
return _ENFORCER.enforce(ADMIN_CTX_POLICY, credentials, credentials)

View File

@ -17,6 +17,7 @@ import os
import mock
from oslo_config import cfg
from oslo_policy import policy as common_policy
from oslo_utils import uuidutils
import six
import six.moves.urllib.parse as urlparse
@ -32,7 +33,6 @@ from tacker.api.v1 import router
from tacker.common import exceptions as n_exc
from tacker import context
from tacker import manager
from tacker.openstack.common import policy as common_policy
from tacker import policy
from tacker.tests import base
from tacker.tests import fake_notifier

View File

@ -19,6 +19,7 @@ import fixtures
import mock
import six
from oslo_policy import policy as common_policy
from oslo_serialization import jsonutils as json
from oslo_utils import importutils
from six.moves.urllib import request as urlrequest
@ -28,7 +29,6 @@ from tacker.api.v1 import attributes
from tacker.common import exceptions
from tacker import context
from tacker import manager
from tacker.openstack.common import policy as common_policy
from tacker import policy
from tacker.tests import base