Add filter rule engine to process filter query

Added filter rule engine to process filter query parameter as
defined in ETSI GS NFV-SOL 013 V2.6.1 (2019-03), section 5.2
`Attribute-based filtering`.

For example,

Request:
GET .../vnfpkgm/v1/vnf_packages/filter=(eq,onboardingState,CREATED)

It will return list of vnf packages matching `onboardingState` to
`CREATED`.

The concept of filter rule engine is based on the oslo.policy
rule engine.

Change-Id: I25bd70291b93b734148d19740536065b10aaf524
Implements: bp/enhance-vnf-package-support-part1
This commit is contained in:
tpatil 2020-03-30 04:19:15 +00:00
parent 12badc2455
commit 7fb68faeda
3 changed files with 662 additions and 0 deletions

View File

View File

@ -0,0 +1,403 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from tacker.common import exceptions as exception
registered_filters = {}
SUPPORTED_OP_ONE = ['eq', 'neq', 'gt', 'lt', 'gte', 'lte']
SUPPORTED_OP_MULTI = ['in', 'nin', 'cont', 'ncont']
@six.add_metaclass(abc.ABCMeta)
class BaseFilter(object):
"""Abstract base class for Filter classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the filter tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target):
"""Triggers if instance of the class is called.
Performs the checks against operators, attribute and datatype
of the value. Raises exception if it's invalid and finally attribute
is mapped to the database model that's present in the target dict.
"""
pass
class Filter(BaseFilter):
SUPPORTED_OPERATORS = None
FILTER_OPERATOR_SPEC_MAPPING = {
'eq': '==',
'neq': '!=',
'in': 'in',
'nin': 'not_in',
'gt': '>',
'gte': '>=',
'lt': '<',
'lte': '<=',
'cont': 'in',
'ncont': 'not_in'
}
OPERATOR_SUPPORTED_DATA_TYPES = {
'eq': ['string', 'number', 'enum', 'boolean', 'key_value_pair'],
'neq': ['string', 'number', 'enum', 'boolean', 'key_value_pair'],
'in': ['string', 'number', 'enum', 'key_value_pair'],
'nin': ['string', 'number', 'enum', 'key_value_pair'],
'gt': ['string', 'number', 'datetime', 'key_value_pair'],
'gte': ['string', 'number', 'datetime', 'key_value_pair'],
'lt': ['string', 'number', 'datetime', 'key_value_pair'],
'lte': ['string', 'number', 'datetime', 'key_value_pair'],
'cont': ['string', 'key_value_pair'],
'ncont': ['string', 'key_value_pair'],
}
def __init__(self, operator, attribute, values):
self.operator = operator
self.attribute = attribute
self.values = values
def __str__(self):
"""Return a string representation of this filter."""
return '%s,%s,%s' % (self.operator, self.attribute,
",".join(self.values))
def _attribute_special_field(self, target):
"""Check if an attribute is a special field in the target
Look for attributes in the target that ends with '*' as
these are special attributes whose type could be 'key_value'
which requires special treatment. For example
if attribute in target is 'userDefinedData/*' and if self.attribute
is userDefinedData/key1, then it's valid even though there is no
exact match in the target because key/value pair values are
dynamic.
"""
special_attributes = [attribute for attribute in target.keys() if '*'
in attribute]
for attribute in special_attributes:
field = attribute.split('*')[0]
if self.attribute.startswith(field):
return attribute
def _validate_operators(self):
if not self.operator:
msg = ("Rule '%(rule)s' cannot contain operator")
raise exception.ValidationError(msg % {"rule": self})
if self.SUPPORTED_OPERATORS and self.operator not in \
self.SUPPORTED_OPERATORS:
msg = ("Rule '%(rule)s' contains invalid operator "
"'%(operator)s'")
raise exception.ValidationError(msg % {"rule": self,
"operator": self.operator})
def _validate_attribute_name(self, target):
if not self.attribute:
msg = ("Rule '%(rule)s' doesn't contain attribute name")
raise exception.ValidationError(msg % {"rule": self})
if '*' in self.attribute:
msg = ("Rule '%(rule)s' contains invalid attribute name "
"'%(attribute)s'")
raise exception.ValidationError(msg % {"rule": self,
"attribute": self.attribute})
if target and self.attribute not in target:
if not self._attribute_special_field(target):
msg = ("Rule '%(rule)s' contains invalid attribute name "
"'%(attribute)s'")
raise exception.ValidationError(msg % {"rule": self,
"attribute": self.attribute})
def _handle_string(self, value):
if value[0] == "'" and value[-1] == "'":
value = value.strip("'")
# The logic below enforces single quotes to be in pairs.
# Raises exception otherwise. It also replaces a pair of
# single quotes with one single quote.
# NFV_SOL013 Section 5.2.2
num_quotes = value.count("'")
value = value.replace("''", "'")
if (value.count("'") * 2) != num_quotes:
msg = ("Rule '%(rule)s' value doesn't have single "
"quotes in pairs")
raise exception.ValidationError(msg % {"rule": self})
elif any(c in value for c in [",", ")", "'"]):
msg = ("Rule '%(rule)s' value must be enclosed in "
"single quotes when it contains either of "
"comma, single quote, closing bracket")
raise exception.ValidationError(msg % {"rule": self})
return value
def _handle_values(self, target):
special_attribute = self._attribute_special_field(target)
if special_attribute:
attribute_info = target.get(special_attribute)
else:
attribute_info = target.get(self.attribute)
if attribute_info[1] in ['string', 'key_value_pair']:
values = [self._handle_string(v) for v in self.values]
self.values = values
def _validate_data_type(self, target):
if not self.values:
msg = ("Rule '%(rule)s' contains empty value")
raise exception.ValidationError(msg % {"rule": self})
special_attribute = self._attribute_special_field(target)
if special_attribute:
attribute_info = target.get(special_attribute)
else:
attribute_info = target.get(self.attribute)
for value in self.values:
error = False
if attribute_info[1] == 'string' and not isinstance(value,
six.string_types):
error = True
elif attribute_info[1] == 'number':
if not strutils.is_int_like(value):
error = True
elif attribute_info[1] == 'uuid':
if not uuidutils.is_uuid_like(value):
error = True
elif attribute_info[1] == 'datetime':
try:
timeutils.parse_isotime(value)
except ValueError:
error = True
elif attribute_info[1] == 'enum':
if value not in attribute_info[3]:
msg = ("Rule '%(rule)s' contains data type '%(type)s' "
"with invalid value. It should be one of "
"%(valid_value)s")
raise exception.ValidationError(msg % {"rule": self,
"valid_value": ",".join(attribute_info[3]),
'type': attribute_info[1]})
if error:
msg = ("Rule '%(rule)s' contains invalid data type for value "
"'%(value)s'. The data type should be '%(type)s'")
raise exception.ValidationError(msg % {"rule": self,
"value": value,
'type': attribute_info[1]})
# Also, check whether the data type is supported by operator
if attribute_info[1] not in \
self.OPERATOR_SUPPORTED_DATA_TYPES.get(self.operator):
msg = ("Rule '%(rule)s' contains operator '%(operator)s' "
"which doesn't support data type '%(type)s' for "
"attribute '%(attribute)s'")
raise exception.ValidationError(msg % {"rule": self,
"operator": self.operator,
'type': attribute_info[1],
'attribute': self.attribute})
def generate_expression(self, target, multiple_values=False):
special_attribute = self._attribute_special_field(target)
if special_attribute:
attribute_info = target.get(special_attribute)
else:
attribute_info = target.get(self.attribute)
attributes = attribute_info[0].split('.')
key_token = self.attribute.split('/')[-1]
if attribute_info[1] == 'key_value_pair':
filter_spec = []
expression_key = {'field': attribute_info[2]['key_column'],
'model': attribute_info[2]['model'],
'value': key_token,
'op': self.FILTER_OPERATOR_SPEC_MAPPING.get(self.operator)}
expression_value = {'field': attribute_info[2]['value_column'],
'model': attribute_info[2]['model'],
'value': self.values if multiple_values else self.values[0],
'op': self.FILTER_OPERATOR_SPEC_MAPPING.get(self.operator)}
filter_spec.append(expression_key)
filter_spec.append(expression_value)
expression = {'and': filter_spec}
else:
expression = {'field': attributes[-1],
'model': attribute_info[2],
'value': self.values if multiple_values else self.values[0],
'op': self.FILTER_OPERATOR_SPEC_MAPPING.get(self.operator)}
return expression
class AndFilter(BaseFilter):
def __init__(self, filter_rule):
self.filter_rules = filter_rule
def __str__(self):
"""Return a string representation of this filter."""
return '(%s)' % ' and '.join(str(r) for r in self.filter_rules)
def __call__(self, target):
"""Run through this filter and maps it to the database model
:returns
A dict containing list of filter-specs required by
sqlalchemy-filter.
Example::
filter=(eq,onboardingState,'onboarded');(eq,softwareImages/size, 10)
Result would be:
{
'and': [
{
'field': 'onboarding_state', 'model': 'Foo',
'value': "'onboarded'", 'op': '=='
},
{
'field': 'size', 'model': 'Foo',
'value': '10', 'op': '=='
}
]
}
"""
filter_spec = []
for filter_rule in self.filter_rules:
result = filter_rule(target)
filter_spec.append(result)
return {'and': filter_spec}
def add_filter_rule(self, filter_rule):
"""Adds filter rule to be tested.
Allows addition of another filter rule to the list of filter rules
that will be tested.
:returns: self
:rtype: :class:`.AndFilter`
"""
self.filter_rules.append(filter_rule)
return self
def register(name, func=None):
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
registered_filters[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register('simple_filter_expr_one')
class SimpleFilterExprOne(Filter):
SUPPORTED_OPERATORS = SUPPORTED_OP_ONE
def __call__(self, target):
"""Run through this filter and maps it to the database model
:returns
A dict containing list of filter-specs required by
sqlalchemy-filter.
Example::
operator=eq, attribute=onBoardingState, and value='onboarded', then
it would be mapped to following expression.
{
'field': 'onboarding_state', -> Mapped to the version field
'model': 'Foo', -> Mapped to database model
'value': "onboarded", -> Value to be used for filtering
records
'op': '==', -> Operator for comparison
},
"""
self._validate_operators()
self._validate_attribute_name(target)
self._validate_data_type(target)
self._handle_values(target)
return self.generate_expression(target, multiple_values=False)
def _validate_operators(self):
super(SimpleFilterExprOne, self)._validate_operators()
if self.values and isinstance(self.values, list) and \
len(self.values) > 1:
msg = _("Rule '%(rule)s' contains operator '%(operator)s' "
"which supports only one value, but multiple values "
"'%(values)s' are provided")
raise exception.ValidationError(msg % {"rule": self,
"operator": self.operator,
'values': ",".join(self.values)})
@register('simple_filter_expr_multi')
class SimpleFilterExprMulti(Filter):
SUPPORTED_OPERATORS = SUPPORTED_OP_MULTI
def __call__(self, target):
"""Run through this filter and maps it to the database model
This filter is exactly same as SimpleFilterExprOne, except
it supports different operators like 'in'|'nin'|'cont'|'ncont'
which contains more than one value in the list.
:returns
A dict containing list of filter-specs required by
sqlalchemy-filter.
Example::
operator=in, attribute=softwareImages/size, and value=[10,20]',
then it would be mapped to following expression.
{
'field': 'size', -> Mapped to the version field
'model': 'Foo', -> Mapped to database model
'value': [10,20], -> Value to be used for filtering
records
'op': 'in', -> Attribute equal to one of the values in the
list ("in set" relationship)
},
"""
self._validate_operators()
self._validate_attribute_name(target)
self._validate_data_type(target)
self._handle_values(target)
return self.generate_expression(target, multiple_values=True)

View File

@ -0,0 +1,259 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import six
from tacker.api.common import _filters
from tacker.common import exceptions as exception
def reducer(*tokens):
"""Decorator for reduction methods.
Arguments are a sequence of tokens, in order, which should trigger running
this reduction method.
"""
def decorator(func):
# Make sure we have a list of reducer sequences
if not hasattr(func, 'reducers'):
func.reducers = []
# Add the tokens to the list of reducer sequences
func.reducers.append(list(tokens))
return func
return decorator
class ParseStateMeta(type):
"""Metaclass for the :class:`.ParseState` class.
Facilitates identifying reduction methods.
"""
def __new__(mcs, name, bases, cls_dict):
"""Create the class.
Injects the 'reducers' list, a list of tuples matching token sequences
to the names of the corresponding reduction methods.
"""
reducers = []
for key, value in cls_dict.items():
if not hasattr(value, 'reducers'):
continue
for reduction in value.reducers:
reducers.append((reduction, key))
cls_dict['reducers'] = reducers
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
@six.add_metaclass(ParseStateMeta)
class ParseState(object):
"""Implement the core of parsing the policy language.
Uses a greedy reduction algorithm to reduce a sequence of tokens into
a single terminal, the value of which will be the root of the
:class:`Filter` tree.
.. note::
Error reporting is rather lacking. The best we can get with this
parser formulation is an overall "parse failed" error. Fortunately, the
policy language is simple enough that this shouldn't be that big a
problem.
"""
def __init__(self):
"""Initialize the ParseState."""
self.tokens = []
self.values = []
def reduce(self):
"""Perform a greedy reduction of the token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search for any more
possible reductions.
"""
for reduction, methname in self.reducers:
if (len(self.tokens) >= len(reduction) and
self.tokens[-len(reduction):] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-len(reduction):])
# Update the tokens and values
self.tokens[-len(reduction):] = [r[0] for r in results]
self.values[-len(reduction):] = [r[1] for r in results]
# Check for any more reductions
return self.reduce()
def shift(self, tok, value):
"""Adds one more token to the state.
Calls :meth:`reduce`.
"""
self.tokens.append(tok)
self.values.append(value)
# Do a greedy reduce...
self.reduce()
@property
def result(self):
"""Obtain the final result of the parse.
:raises ValueError: If the parse failed to reduce to a single result.
"""
if len(self.values) != 1:
raise ValueError('Could not parse rule')
return self.values[0]
@reducer('(', 'filter', ')')
@reducer('(', 'and_expr', ')')
def _wrap_check(self, _p1, filter_data, _p2):
"""Turn parenthesized expressions into a 'filter' token."""
return [('filter', filter_data)]
@reducer('filter', 'and', 'filter')
def _make_and_expr(self, filter_data1, _and, filter_data2):
"""Create an 'and_expr'.
Join two filters by the 'and' operator.
"""
return [('and_expr', _filters.AndFilter([filter_data1, filter_data2]))]
@reducer('and_expr', 'and', 'filter')
def _extend_and_expr(self, and_expr, _and, filter_data):
"""Extend an 'and_expr' by adding one more filter."""
return [('and_expr', and_expr.add_filter_rule(filter_data))]
def _parse_filter(filter_rule):
"""Parse a filter rule and return an appropriate Filter object."""
try:
tokens = filter_rule.split(',')
filter_type = None
if len(tokens) >= 3:
if tokens[0] in _filters.SUPPORTED_OP_ONE:
filter_type = 'simple_filter_expr_one'
elif tokens[0] in _filters.SUPPORTED_OP_MULTI:
filter_type = 'simple_filter_expr_multi'
except Exception:
msg = 'Failed to understand filter %s' % filter_rule
raise exception.ValidationError(msg)
if filter_type in _filters.registered_filters:
return _filters.registered_filters[filter_type](tokens[0],
tokens[1], tokens[2:])
else:
msg = 'Failed to understand filter %s' % filter_rule
raise exception.ValidationError(msg)
# Used for tokenizing the policy language
_tokenize_re = re.compile(r'\;+')
def _parse_tokenize(filter_rule):
"""Tokenizer for the attribute filtering language.
Most of the single-character tokens are specified in the
_tokenize_re; however, parentheses need to be handled specially,
because they can appear inside a check string. Thankfully, those
parentheses that appear inside a check string can never occur at
the very beginning or end ("%(variable)s" is the correct syntax).
"""
main_tokens = _tokenize_re.split(filter_rule)
index = 0
for tok in main_tokens:
# Skip empty tokens
if not tok or tok.isspace():
continue
# Handle leading parens on the token
clean = tok.lstrip('(')
for i in range(len(tok) - len(clean)):
yield '(', '('
# If it was only parentheses, continue
if not clean:
continue
else:
tok = clean
# Handle trailing parens on the token
clean = tok.rstrip(')')
trail = len(tok) - len(clean)
# Yield the cleaned token
lowered = clean.lower()
if lowered in (';', 'and'):
# Special tokens
yield lowered, clean
elif clean:
# Not a special token, but not composed solely of ')'
if len(tok) >= 2 and ((tok[0], tok[-1]) in
[('"', '"'), ("'", "'")]):
# It's a quoted string
yield 'string', tok[1:-1]
else:
yield 'filter', _parse_filter(clean)
# Yield the trailing parens
for i in range(trail):
yield ')', ')'
if (index < len(main_tokens) - 1) and len(main_tokens) > 1:
yield 'and', 'and'
index += 1
def parse_filter_rule(filter_rule, target=None):
"""Parses filter query parameter to the tree.
Translates a filter written in the filter language into a tree of
Filter objects.
"""
# Parse the token stream
state = ParseState()
for tok, value in _parse_tokenize(filter_rule):
state.shift(tok, value)
try:
return state.result(target)
except ValueError:
err_msg = 'Failed to understand filter %s' % filter_rule
raise exception.ValidationError(err_msg)