diff --git a/neutron_fwaas/openstack/__init__.py b/neutron_fwaas/openstack/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron_fwaas/openstack/common/__init__.py b/neutron_fwaas/openstack/common/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron_fwaas/openstack/common/_i18n.py b/neutron_fwaas/openstack/common/_i18n.py deleted file mode 100644 index 9aaa071de..000000000 --- a/neutron_fwaas/openstack/common/_i18n.py +++ /dev/null @@ -1,45 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""oslo.i18n integration module. - -See http://docs.openstack.org/developer/oslo.i18n/usage.html - -""" - -try: - import oslo.i18n - - # NOTE(dhellmann): This reference to o-s-l-o will be replaced by the - # application name when this module is synced into the separate - # repository. It is OK to have more than one translation function - # using the same domain, since there will still only be one message - # catalog. - _translators = oslo.i18n.TranslatorFactory(domain='neutron_fwaas') - - # The primary translation function using the well-known name "_" - _ = _translators.primary - - # Translators for log levels. - # - # The abbreviated names are meant to reflect the usual use of a short - # name like '_'. The "L" is for "log" and the other letter comes from - # the level. - _LI = _translators.log_info - _LW = _translators.log_warning - _LE = _translators.log_error - _LC = _translators.log_critical -except ImportError: - # NOTE(dims): Support for cases where a project wants to use - # code from neutron_fwaas-incubator, but is not ready to be internationalized - # (like tempest) - _ = _LI = _LW = _LE = _LC = lambda x: x diff --git a/neutron_fwaas/openstack/common/cache/__init__.py b/neutron_fwaas/openstack/common/cache/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron_fwaas/openstack/common/cache/backends.py b/neutron_fwaas/openstack/common/cache/backends.py deleted file mode 100644 index 1bea8912a..000000000 --- a/neutron_fwaas/openstack/common/cache/backends.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import abc - -import six - - -NOTSET = object() - - -@six.add_metaclass(abc.ABCMeta) -class BaseCache(object): - """Base Cache Abstraction - - :params parsed_url: Parsed url object. - :params options: A dictionary with configuration parameters - for the cache. For example: - - - default_ttl: An integer defining the default ttl for keys. - """ - - def __init__(self, parsed_url, options=None): - self._parsed_url = parsed_url - self._options = options or {} - self._default_ttl = int(self._options.get('default_ttl', 0)) - - @abc.abstractmethod - def _set(self, key, value, ttl, not_exists=False): - """Implementations of this class have to override this method.""" - - def set(self, key, value, ttl, not_exists=False): - """Sets or updates a cache entry - - .. note:: Thread-safety is required and has to be guaranteed by the - backend implementation. - - :params key: Item key as string. - :type key: `unicode string` - :params value: Value to assign to the key. This can be anything that - is handled by the current backend. - :params ttl: Key's timeout in seconds. 0 means no timeout. - :type ttl: int - :params not_exists: If True, the key will be set if it doesn't exist. - Otherwise, it'll always be set. - :type not_exists: bool - - :returns: True if the operation succeeds, False otherwise. - """ - if ttl is None: - ttl = self._default_ttl - - return self._set(key, value, ttl, not_exists) - - def __setitem__(self, key, value): - self.set(key, value, self._default_ttl) - - def setdefault(self, key, value): - """Sets the key value to `value` if it doesn't exist - - :params key: Item key as string. - :type key: `unicode string` - :params value: Value to assign to the key. This can be anything that - is handled by the current backend. - """ - try: - return self[key] - except KeyError: - self[key] = value - return value - - @abc.abstractmethod - def _get(self, key, default): - """Implementations of this class have to override this method.""" - - def get(self, key, default=None): - """Gets one item from the cache - - .. note:: Thread-safety is required and it has to be guaranteed - by the backend implementation. - - :params key: Key for the item to retrieve from the cache. - :params default: The default value to return. - - :returns: `key`'s value in the cache if it exists, otherwise - `default` should be returned. - """ - return self._get(key, default) - - def __getitem__(self, key): - value = self.get(key, NOTSET) - - if value is NOTSET: - raise KeyError - - return value - - @abc.abstractmethod - def __delitem__(self, key): - """Removes an item from cache. - - .. note:: Thread-safety is required and it has to be guaranteed by - the backend implementation. - - :params key: The key to remove. - - :returns: The key value if there's one - """ - - @abc.abstractmethod - def _clear(self): - """Implementations of this class have to override this method.""" - - def clear(self): - """Removes all items from the cache. - - .. note:: Thread-safety is required and it has to be guaranteed by - the backend implementation. - """ - return self._clear() - - @abc.abstractmethod - def _incr(self, key, delta): - """Implementations of this class have to override this method.""" - - def incr(self, key, delta=1): - """Increments the value for a key - - :params key: The key for the value to be incremented - :params delta: Number of units by which to increment the value. - Pass a negative number to decrement the value. - - :returns: The new value - """ - return self._incr(key, delta) - - @abc.abstractmethod - def _append_tail(self, key, tail): - """Implementations of this class have to override this method.""" - - def append_tail(self, key, tail): - """Appends `tail` to `key`'s value. - - :params key: The key of the value to which `tail` should be appended. - :params tail: The list of values to append to the original. - - :returns: The new value - """ - - if not hasattr(tail, "__iter__"): - raise TypeError('Tail must be an iterable') - - if not isinstance(tail, list): - # NOTE(flaper87): Make sure we pass a list - # down to the implementation. Not all drivers - # have support for generators, sets or other - # iterables. - tail = list(tail) - - return self._append_tail(key, tail) - - def append(self, key, value): - """Appends `value` to `key`'s value. - - :params key: The key of the value to which `tail` should be appended. - :params value: The value to append to the original. - - :returns: The new value - """ - return self.append_tail(key, [value]) - - @abc.abstractmethod - def __contains__(self, key): - """Verifies that a key exists. - - :params key: The key to verify. - - :returns: True if the key exists, otherwise False. - """ - - @abc.abstractmethod - def _get_many(self, keys, default): - """Implementations of this class have to override this method.""" - return ((k, self.get(k, default=default)) for k in keys) - - def get_many(self, keys, default=NOTSET): - """Gets keys' value from cache - - :params keys: List of keys to retrieve. - :params default: The default value to return for each key that is not - in the cache. - - :returns: A generator of (key, value) - """ - return self._get_many(keys, default) - - @abc.abstractmethod - def _set_many(self, data, ttl): - """Implementations of this class have to override this method.""" - - for key, value in data.items(): - self.set(key, value, ttl=ttl) - - def set_many(self, data, ttl=None): - """Puts several items into the cache at once - - Depending on the backend, this operation may or may not be efficient. - The default implementation calls set for each (key, value) pair - passed, other backends support set_many operations as part of their - protocols. - - :params data: A dictionary like {key: val} to store in the cache. - :params ttl: Key's timeout in seconds. - """ - - if ttl is None: - ttl = self._default_ttl - - self._set_many(data, ttl) - - def update(self, **kwargs): - """Sets several (key, value) paris. - - Refer to the `set_many` docstring. - """ - self.set_many(kwargs, ttl=self._default_ttl) - - @abc.abstractmethod - def _unset_many(self, keys): - """Implementations of this class have to override this method.""" - for key in keys: - del self[key] - - def unset_many(self, keys): - """Removes several keys from the cache at once - - :params keys: List of keys to unset. - """ - self._unset_many(keys) diff --git a/neutron_fwaas/openstack/common/cache/cache.py b/neutron_fwaas/openstack/common/cache/cache.py deleted file mode 100644 index f551ed6e1..000000000 --- a/neutron_fwaas/openstack/common/cache/cache.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2013 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Cache library. - -Supported configuration options: - -`default_backend`: Name of the cache backend to use. -`key_namespace`: Namespace under which keys will be created. -""" - -######################################################################## -# -# THIS MODULE IS DEPRECATED -# -# Please refer to -# https://etherpad.openstack.org/p/kilo-neutron_fwaas-library-proposals for -# the discussion leading to this deprecation. -# -# We recommend helping with the new neutron_fwaas.cache library being created -# as a wrapper for dogpile. -# -######################################################################## - - -from six.moves.urllib import parse -from stevedore import driver - - -def _get_oslo.configs(): - """Returns the oslo.config options to register.""" - # NOTE(flaper87): Oslo config should be - # optional. Instead of doing try / except - # at the top of this file, lets import cfg - # here and assume that the caller of this - # function already took care of this dependency. - from oslo.config import cfg - - return [ - cfg.StrOpt('cache_url', default='memory://', - help='URL to connect to the cache back end.') - ] - - -def register_oslo.configs(conf): - """Registers a cache configuration options - - :params conf: Config object. - :type conf: `cfg.ConfigOptions` - """ - conf.register_opts(_get_oslo.configs()) - - -def get_cache(url='memory://'): - """Loads the cache backend - - This function loads the cache backend - specified in the given configuration. - - :param conf: Configuration instance to use - """ - - parsed = parse.urlparse(url) - backend = parsed.scheme - - query = parsed.query - # NOTE(flaper87): We need the following hack - # for python versions < 2.7.5. Previous versions - # of python parsed query params just for 'known' - # schemes. This was changed in this patch: - # http://hg.python.org/cpython/rev/79e6ff3d9afd - if not query and '?' in parsed.path: - query = parsed.path.split('?', 1)[-1] - parameters = parse.parse_qsl(query) - kwargs = {'options': dict(parameters)} - - mgr = driver.DriverManager('neutron_fwaas.openstack.common.cache.backends', backend, - invoke_on_load=True, - invoke_args=[parsed], - invoke_kwds=kwargs) - return mgr.driver diff --git a/neutron_fwaas/openstack/common/context.py b/neutron_fwaas/openstack/common/context.py deleted file mode 100644 index 168989004..000000000 --- a/neutron_fwaas/openstack/common/context.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Simple class that stores security context information in the web request. - -Projects should subclass this class if they wish to enhance the request -context or provide additional information in their specific WSGI pipeline. -""" - -import itertools -import uuid - - -def generate_request_id(): - return b'req-' + str(uuid.uuid4()).encode('ascii') - - -class RequestContext(object): - - """Helper class to represent useful information about a request context. - - Stores information about the security context under which the user - accesses the system, as well as additional request information. - """ - - user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}' - - def __init__(self, auth_token=None, user=None, tenant=None, domain=None, - user_domain=None, project_domain=None, is_admin=False, - read_only=False, show_deleted=False, request_id=None, - instance_uuid=None): - self.auth_token = auth_token - self.user = user - self.tenant = tenant - self.domain = domain - self.user_domain = user_domain - self.project_domain = project_domain - self.is_admin = is_admin - self.read_only = read_only - self.show_deleted = show_deleted - self.instance_uuid = instance_uuid - if not request_id: - request_id = generate_request_id() - self.request_id = request_id - - def to_dict(self): - user_idt = ( - self.user_idt_format.format(user=self.user or '-', - tenant=self.tenant or '-', - domain=self.domain or '-', - user_domain=self.user_domain or '-', - p_domain=self.project_domain or '-')) - - return {'user': self.user, - 'tenant': self.tenant, - 'domain': self.domain, - 'user_domain': self.user_domain, - 'project_domain': self.project_domain, - 'is_admin': self.is_admin, - 'read_only': self.read_only, - 'show_deleted': self.show_deleted, - 'auth_token': self.auth_token, - 'request_id': self.request_id, - 'instance_uuid': self.instance_uuid, - 'user_identity': user_idt} - - @classmethod - def from_dict(cls, ctx): - return cls( - auth_token=ctx.get("auth_token"), - user=ctx.get("user"), - tenant=ctx.get("tenant"), - domain=ctx.get("domain"), - user_domain=ctx.get("user_domain"), - project_domain=ctx.get("project_domain"), - is_admin=ctx.get("is_admin", False), - read_only=ctx.get("read_only", False), - show_deleted=ctx.get("show_deleted", False), - request_id=ctx.get("request_id"), - instance_uuid=ctx.get("instance_uuid")) - - -def get_admin_context(show_deleted=False): - context = RequestContext(None, - tenant=None, - is_admin=True, - show_deleted=show_deleted) - return context - - -def get_context_from_function_and_args(function, args, kwargs): - """Find an arg of type RequestContext and return it. - - This is useful in a couple of decorators where we don't - know much about the function we're wrapping. - """ - - for arg in itertools.chain(kwargs.values(), args): - if isinstance(arg, RequestContext): - return arg - - return None - - -def is_user_context(context): - """Indicates if the request context is a normal user.""" - if not context or context.is_admin: - return False - return context.user_id and context.project_id diff --git a/neutron_fwaas/openstack/common/eventlet_backdoor.py b/neutron_fwaas/openstack/common/eventlet_backdoor.py deleted file mode 100644 index 8e97ddcd9..000000000 --- a/neutron_fwaas/openstack/common/eventlet_backdoor.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright (c) 2012 OpenStack Foundation. -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from __future__ import print_function - -import copy -import errno -import gc -import os -import pprint -import socket -import sys -import traceback - -import eventlet -import eventlet.backdoor -import greenlet -from oslo.config import cfg - -from neutron_fwaas.openstack.common._i18n import _LI -from neutron_fwaas.openstack.common import log as logging - -help_for_backdoor_port = ( - "Acceptable values are 0, , and :, where 0 results " - "in listening on a random tcp port number; results in listening " - "on the specified port number (and not enabling backdoor if that port " - "is in use); and : results in listening on the smallest " - "unused port number within the specified range of port numbers. The " - "chosen port is displayed in the service's log file.") -eventlet_backdoor_opts = [ - cfg.StrOpt('backdoor_port', - help="Enable eventlet backdoor. %s" % help_for_backdoor_port) -] - -CONF = cfg.CONF -CONF.register_opts(eventlet_backdoor_opts) -LOG = logging.getLogger(__name__) - - -def list_opts(): - """Entry point for oslo.config-generator. - """ - return [(None, copy.deepcopy(eventlet_backdoor_opts))] - - -class EventletBackdoorConfigValueError(Exception): - def __init__(self, port_range, help_msg, ex): - msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. ' - '%(help)s' % - {'range': port_range, 'ex': ex, 'help': help_msg}) - super(EventletBackdoorConfigValueError, self).__init__(msg) - self.port_range = port_range - - -def _dont_use_this(): - print("Don't use this, just disconnect instead") - - -def _find_objects(t): - return [o for o in gc.get_objects() if isinstance(o, t)] - - -def _print_greenthreads(): - for i, gt in enumerate(_find_objects(greenlet.greenlet)): - print(i, gt) - traceback.print_stack(gt.gr_frame) - print() - - -def _print_nativethreads(): - for threadId, stack in sys._current_frames().items(): - print(threadId) - traceback.print_stack(stack) - print() - - -def _parse_port_range(port_range): - if ':' not in port_range: - start, end = port_range, port_range - else: - start, end = port_range.split(':', 1) - try: - start, end = int(start), int(end) - if end < start: - raise ValueError - return start, end - except ValueError as ex: - raise EventletBackdoorConfigValueError(port_range, ex, - help_for_backdoor_port) - - -def _listen(host, start_port, end_port, listen_func): - try_port = start_port - while True: - try: - return listen_func((host, try_port)) - except socket.error as exc: - if (exc.errno != errno.EADDRINUSE or - try_port >= end_port): - raise - try_port += 1 - - -def initialize_if_enabled(): - backdoor_locals = { - 'exit': _dont_use_this, # So we don't exit the entire process - 'quit': _dont_use_this, # So we don't exit the entire process - 'fo': _find_objects, - 'pgt': _print_greenthreads, - 'pnt': _print_nativethreads, - } - - if CONF.backdoor_port is None: - return None - - start_port, end_port = _parse_port_range(str(CONF.backdoor_port)) - - # NOTE(johannes): The standard sys.displayhook will print the value of - # the last expression and set it to __builtin__._, which overwrites - # the __builtin__._ that gettext sets. Let's switch to using pprint - # since it won't interact poorly with gettext, and it's easier to - # read the output too. - def displayhook(val): - if val is not None: - pprint.pprint(val) - sys.displayhook = displayhook - - sock = _listen('localhost', start_port, end_port, eventlet.listen) - - # In the case of backdoor port being zero, a port number is assigned by - # listen(). In any case, pull the port number out here. - port = sock.getsockname()[1] - LOG.info( - _LI('Eventlet backdoor listening on %(port)s for process %(pid)d') % - {'port': port, 'pid': os.getpid()} - ) - eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock, - locals=backdoor_locals) - return port diff --git a/neutron_fwaas/openstack/common/fileutils.py b/neutron_fwaas/openstack/common/fileutils.py deleted file mode 100644 index ec26eaf9c..000000000 --- a/neutron_fwaas/openstack/common/fileutils.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import errno -import logging -import os -import tempfile - -from oslo.utils import excutils - -LOG = logging.getLogger(__name__) - -_FILE_CACHE = {} - - -def ensure_tree(path): - """Create a directory (and any ancestor directories required) - - :param path: Directory to create - """ - try: - os.makedirs(path) - except OSError as exc: - if exc.errno == errno.EEXIST: - if not os.path.isdir(path): - raise - else: - raise - - -def read_cached_file(filename, force_reload=False): - """Read from a file if it has been modified. - - :param force_reload: Whether to reload the file. - :returns: A tuple with a boolean specifying if the data is fresh - or not. - """ - global _FILE_CACHE - - if force_reload: - delete_cached_file(filename) - - reloaded = False - mtime = os.path.getmtime(filename) - cache_info = _FILE_CACHE.setdefault(filename, {}) - - if not cache_info or mtime > cache_info.get('mtime', 0): - LOG.debug("Reloading cached file %s" % filename) - with open(filename) as fap: - cache_info['data'] = fap.read() - cache_info['mtime'] = mtime - reloaded = True - return (reloaded, cache_info['data']) - - -def delete_cached_file(filename): - """Delete cached file if present. - - :param filename: filename to delete - """ - global _FILE_CACHE - - if filename in _FILE_CACHE: - del _FILE_CACHE[filename] - - -def delete_if_exists(path, remove=os.unlink): - """Delete a file, but ignore file not found error. - - :param path: File to delete - :param remove: Optional function to remove passed path - """ - - try: - remove(path) - except OSError as e: - if e.errno != errno.ENOENT: - raise - - -@contextlib.contextmanager -def remove_path_on_error(path, remove=delete_if_exists): - """Protect code that wants to operate on PATH atomically. - Any exception will cause PATH to be removed. - - :param path: File to work with - :param remove: Optional function to remove passed path - """ - - try: - yield - except Exception: - with excutils.save_and_reraise_exception(): - remove(path) - - -def file_open(*args, **kwargs): - """Open file - - see built-in open() documentation for more details - - Note: The reason this is kept in a separate module is to easily - be able to provide a stub module that doesn't alter system - state at all (for unit tests) - """ - return open(*args, **kwargs) - - -def write_to_tempfile(content, path=None, suffix='', prefix='tmp'): - """Create temporary file or use existing file. - - This util is needed for creating temporary file with - specified content, suffix and prefix. If path is not None, - it will be used for writing content. If the path doesn't - exist it'll be created. - - :param content: content for temporary file. - :param path: same as parameter 'dir' for mkstemp - :param suffix: same as parameter 'suffix' for mkstemp - :param prefix: same as parameter 'prefix' for mkstemp - - For example: it can be used in database tests for creating - configuration files. - """ - if path: - ensure_tree(path) - - (fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix) - try: - os.write(fd, content) - finally: - os.close(fd) - return path diff --git a/neutron_fwaas/openstack/common/fixture/__init__.py b/neutron_fwaas/openstack/common/fixture/__init__.py deleted file mode 100644 index d1223eaf7..000000000 --- a/neutron_fwaas/openstack/common/fixture/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - - -six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox')) diff --git a/neutron_fwaas/openstack/common/fixture/lockutils.py b/neutron_fwaas/openstack/common/fixture/lockutils.py deleted file mode 100644 index a02ae4f1d..000000000 --- a/neutron_fwaas/openstack/common/fixture/lockutils.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - -from neutron_fwaas.openstack.common import lockutils - - -class LockFixture(fixtures.Fixture): - """External locking fixture. - - This fixture is basically an alternative to the synchronized decorator with - the external flag so that tearDowns and addCleanups will be included in - the lock context for locking between tests. The fixture is recommended to - be the first line in a test method, like so:: - - def test_method(self): - self.useFixture(LockFixture) - ... - - or the first line in setUp if all the test methods in the class are - required to be serialized. Something like:: - - class TestCase(testtools.testcase): - def setUp(self): - self.useFixture(LockFixture) - super(TestCase, self).setUp() - ... - - This is because addCleanups are put on a LIFO queue that gets run after the - test method exits. (either by completing or raising an exception) - """ - def __init__(self, name, lock_file_prefix=None): - self.mgr = lockutils.lock(name, lock_file_prefix, True) - - def setUp(self): - super(LockFixture, self).setUp() - self.addCleanup(self.mgr.__exit__, None, None, None) - self.lock = self.mgr.__enter__() diff --git a/neutron_fwaas/openstack/common/fixture/logging.py b/neutron_fwaas/openstack/common/fixture/logging.py deleted file mode 100644 index 3823a0355..000000000 --- a/neutron_fwaas/openstack/common/fixture/logging.py +++ /dev/null @@ -1,34 +0,0 @@ -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import fixtures - - -def get_logging_handle_error_fixture(): - """returns a fixture to make logging raise formatting exceptions. - - Usage: - self.useFixture(logging.get_logging_handle_error_fixture()) - """ - return fixtures.MonkeyPatch('logging.Handler.handleError', - _handleError) - - -def _handleError(self, record): - """Monkey patch for logging.Handler.handleError. - - The default handleError just logs the error to stderr but we want - the option of actually raising an exception. - """ - raise diff --git a/neutron_fwaas/openstack/common/local.py b/neutron_fwaas/openstack/common/local.py deleted file mode 100644 index 0819d5b97..000000000 --- a/neutron_fwaas/openstack/common/local.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Local storage of variables using weak references""" - -import threading -import weakref - - -class WeakLocal(threading.local): - def __getattribute__(self, attr): - rval = super(WeakLocal, self).__getattribute__(attr) - if rval: - # NOTE(mikal): this bit is confusing. What is stored is a weak - # reference, not the value itself. We therefore need to lookup - # the weak reference and return the inner value here. - rval = rval() - return rval - - def __setattr__(self, attr, value): - value = weakref.ref(value) - return super(WeakLocal, self).__setattr__(attr, value) - - -# NOTE(mikal): the name "store" should be deprecated in the future -store = WeakLocal() - -# A "weak" store uses weak references and allows an object to fall out of scope -# when it falls out of scope in the code that uses the thread local storage. A -# "strong" store will hold a reference to the object so that it never falls out -# of scope. -weak_store = WeakLocal() -strong_store = threading.local() diff --git a/neutron_fwaas/openstack/common/lockutils.py b/neutron_fwaas/openstack/common/lockutils.py deleted file mode 100644 index 33d79f00c..000000000 --- a/neutron_fwaas/openstack/common/lockutils.py +++ /dev/null @@ -1,326 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import contextlib -import errno -import functools -import logging -import os -import shutil -import subprocess -import sys -import tempfile -import threading -import time -import weakref - -from oslo.config import cfg - -from neutron_fwaas.openstack.common import fileutils -from neutron_fwaas.openstack.common._i18n import _, _LE, _LI - - -LOG = logging.getLogger(__name__) - - -util_opts = [ - cfg.BoolOpt('disable_process_locking', default=False, - help='Enables or disables inter-process locks.'), - cfg.StrOpt('lock_path', - default=os.environ.get("NEUTRON_FWAAS_LOCK_PATH"), - help='Directory to use for lock files.') -] - - -CONF = cfg.CONF -CONF.register_opts(util_opts) - - -def set_defaults(lock_path): - cfg.set_defaults(util_opts, lock_path=lock_path) - - -class _FileLock(object): - """Lock implementation which allows multiple locks, working around - issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does - not require any cleanup. Since the lock is always held on a file - descriptor rather than outside of the process, the lock gets dropped - automatically if the process crashes, even if __exit__ is not executed. - - There are no guarantees regarding usage by multiple green threads in a - single process here. This lock works only between processes. Exclusive - access between local threads should be achieved using the semaphores - in the @synchronized decorator. - - Note these locks are released when the descriptor is closed, so it's not - safe to close the file descriptor while another green thread holds the - lock. Just opening and closing the lock file can break synchronisation, - so lock files must be accessed only using this abstraction. - """ - - def __init__(self, name): - self.lockfile = None - self.fname = name - - def acquire(self): - basedir = os.path.dirname(self.fname) - - if not os.path.exists(basedir): - fileutils.ensure_tree(basedir) - LOG.info(_LI('Created lock path: %s'), basedir) - - self.lockfile = open(self.fname, 'w') - - while True: - try: - # Using non-blocking locks since green threads are not - # patched to deal with blocking locking calls. - # Also upon reading the MSDN docs for locking(), it seems - # to have a laughable 10 attempts "blocking" mechanism. - self.trylock() - LOG.debug('Got file lock "%s"', self.fname) - return True - except IOError as e: - if e.errno in (errno.EACCES, errno.EAGAIN): - # external locks synchronise things like iptables - # updates - give it some time to prevent busy spinning - time.sleep(0.01) - else: - raise threading.ThreadError(_("Unable to acquire lock on" - " `%(filename)s` due to" - " %(exception)s") % - {'filename': self.fname, - 'exception': e}) - - def __enter__(self): - self.acquire() - return self - - def release(self): - try: - self.unlock() - self.lockfile.close() - LOG.debug('Released file lock "%s"', self.fname) - except IOError: - LOG.exception(_LE("Could not release the acquired lock `%s`"), - self.fname) - - def __exit__(self, exc_type, exc_val, exc_tb): - self.release() - - def exists(self): - return os.path.exists(self.fname) - - def trylock(self): - raise NotImplementedError() - - def unlock(self): - raise NotImplementedError() - - -class _WindowsLock(_FileLock): - def trylock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1) - - def unlock(self): - msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1) - - -class _FcntlLock(_FileLock): - def trylock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB) - - def unlock(self): - fcntl.lockf(self.lockfile, fcntl.LOCK_UN) - - -if os.name == 'nt': - import msvcrt - InterProcessLock = _WindowsLock -else: - import fcntl - InterProcessLock = _FcntlLock - -_semaphores = weakref.WeakValueDictionary() -_semaphores_lock = threading.Lock() - - -def _get_lock_path(name, lock_file_prefix, lock_path=None): - # NOTE(mikal): the lock name cannot contain directory - # separators - name = name.replace(os.sep, '_') - if lock_file_prefix: - sep = '' if lock_file_prefix.endswith('-') else '-' - name = '%s%s%s' % (lock_file_prefix, sep, name) - - local_lock_path = lock_path or CONF.lock_path - - if not local_lock_path: - raise cfg.RequiredOptError('lock_path') - - return os.path.join(local_lock_path, name) - - -def external_lock(name, lock_file_prefix=None, lock_path=None): - LOG.debug('Attempting to grab external lock "%(lock)s"', - {'lock': name}) - - lock_file_path = _get_lock_path(name, lock_file_prefix, lock_path) - - return InterProcessLock(lock_file_path) - - -def remove_external_lock_file(name, lock_file_prefix=None): - """Remove an external lock file when it's not used anymore - This will be helpful when we have a lot of lock files - """ - with internal_lock(name): - lock_file_path = _get_lock_path(name, lock_file_prefix) - try: - os.remove(lock_file_path) - except OSError: - LOG.info(_LI('Failed to remove file %(file)s'), - {'file': lock_file_path}) - - -def internal_lock(name): - with _semaphores_lock: - try: - sem = _semaphores[name] - LOG.debug('Using existing semaphore "%s"', name) - except KeyError: - sem = threading.Semaphore() - _semaphores[name] = sem - LOG.debug('Created new semaphore "%s"', name) - - return sem - - -@contextlib.contextmanager -def lock(name, lock_file_prefix=None, external=False, lock_path=None): - """Context based lock - - This function yields a `threading.Semaphore` instance (if we don't use - eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is - True, in which case, it'll yield an InterProcessLock instance. - - :param lock_file_prefix: The lock_file_prefix argument is used to provide - lock files on disk with a meaningful prefix. - - :param external: The external keyword argument denotes whether this lock - should work across multiple processes. This means that if two different - workers both run a method decorated with @synchronized('mylock', - external=True), only one of them will execute at a time. - """ - int_lock = internal_lock(name) - with int_lock: - LOG.debug('Acquired semaphore "%(lock)s"', {'lock': name}) - try: - if external and not CONF.disable_process_locking: - ext_lock = external_lock(name, lock_file_prefix, lock_path) - with ext_lock: - yield ext_lock - else: - yield int_lock - finally: - LOG.debug('Releasing semaphore "%(lock)s"', {'lock': name}) - - -def synchronized(name, lock_file_prefix=None, external=False, lock_path=None): - """Synchronization decorator. - - Decorating a method like so:: - - @synchronized('mylock') - def foo(self, *args): - ... - - ensures that only one thread will execute the foo method at a time. - - Different methods can share the same lock:: - - @synchronized('mylock') - def foo(self, *args): - ... - - @synchronized('mylock') - def bar(self, *args): - ... - - This way only one of either foo or bar can be executing at a time. - """ - - def wrap(f): - @functools.wraps(f) - def inner(*args, **kwargs): - try: - with lock(name, lock_file_prefix, external, lock_path): - LOG.debug('Got semaphore / lock "%(function)s"', - {'function': f.__name__}) - return f(*args, **kwargs) - finally: - LOG.debug('Semaphore / lock released "%(function)s"', - {'function': f.__name__}) - return inner - return wrap - - -def synchronized_with_prefix(lock_file_prefix): - """Partial object generator for the synchronization decorator. - - Redefine @synchronized in each project like so:: - - (in nova/utils.py) - from nova.openstack.common import lockutils - - synchronized = lockutils.synchronized_with_prefix('nova-') - - - (in nova/foo.py) - from nova import utils - - @utils.synchronized('mylock') - def bar(self, *args): - ... - - The lock_file_prefix argument is used to provide lock files on disk with a - meaningful prefix. - """ - - return functools.partial(synchronized, lock_file_prefix=lock_file_prefix) - - -def main(argv): - """Create a dir for locks and pass it to command from arguments - - If you run this: - python -m openstack.common.lockutils python setup.py testr - - a temporary directory will be created for all your locks and passed to all - your tests in an environment variable. The temporary dir will be deleted - afterwards and the return value will be preserved. - """ - - lock_dir = tempfile.mkdtemp() - os.environ["NEUTRON_FWAAS_LOCK_PATH"] = lock_dir - try: - ret_val = subprocess.call(argv[1:]) - finally: - shutil.rmtree(lock_dir, ignore_errors=True) - return ret_val - - -if __name__ == '__main__': - sys.exit(main(sys.argv)) diff --git a/neutron_fwaas/openstack/common/log.py b/neutron_fwaas/openstack/common/log.py deleted file mode 100644 index f0c8582e2..000000000 --- a/neutron_fwaas/openstack/common/log.py +++ /dev/null @@ -1,718 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""OpenStack logging handler. - -This module adds to logging functionality by adding the option to specify -a context object when calling the various log methods. If the context object -is not specified, default formatting is used. Additionally, an instance uuid -may be passed as part of the log message, which is intended to make it easier -for admins to find messages related to a specific instance. - -It also allows setting of formatting information through conf. - -""" - -import copy -import inspect -import itertools -import logging -import logging.config -import logging.handlers -import os -import socket -import sys -import traceback - -from oslo.config import cfg -from oslo.serialization import jsonutils -from oslo.utils import importutils -import six -from six import moves - -_PY26 = sys.version_info[0:2] == (2, 6) - -from neutron_fwaas.openstack.common._i18n import _ -from neutron_fwaas.openstack.common import local - - -_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" - - -common_cli_opts = [ - cfg.BoolOpt('debug', - short='d', - default=False, - help='Print debugging output (set logging level to ' - 'DEBUG instead of default WARNING level).'), - cfg.BoolOpt('verbose', - short='v', - default=False, - help='Print more verbose output (set logging level to ' - 'INFO instead of default WARNING level).'), -] - -logging_cli_opts = [ - cfg.StrOpt('log-config-append', - metavar='PATH', - deprecated_name='log-config', - help='The name of a logging configuration file. This file ' - 'is appended to any existing logging configuration ' - 'files. For details about logging configuration files, ' - 'see the Python logging module documentation.'), - cfg.StrOpt('log-format', - metavar='FORMAT', - help='DEPRECATED. ' - 'A logging.Formatter log message format string which may ' - 'use any of the available logging.LogRecord attributes. ' - 'This option is deprecated. Please use ' - 'logging_context_format_string and ' - 'logging_default_format_string instead.'), - cfg.StrOpt('log-date-format', - default=_DEFAULT_LOG_DATE_FORMAT, - metavar='DATE_FORMAT', - help='Format string for %%(asctime)s in log records. ' - 'Default: %(default)s .'), - cfg.StrOpt('log-file', - metavar='PATH', - deprecated_name='logfile', - help='(Optional) Name of log file to output to. ' - 'If no default is set, logging will go to stdout.'), - cfg.StrOpt('log-dir', - deprecated_name='logdir', - help='(Optional) The base directory used for relative ' - '--log-file paths.'), - cfg.BoolOpt('use-syslog', - default=False, - help='Use syslog for logging. ' - 'Existing syslog format is DEPRECATED during I, ' - 'and will change in J to honor RFC5424.'), - cfg.BoolOpt('use-syslog-rfc-format', - # TODO(bogdando) remove or use True after existing - # syslog format deprecation in J - default=False, - help='(Optional) Enables or disables syslog rfc5424 format ' - 'for logging. If enabled, prefixes the MSG part of the ' - 'syslog message with APP-NAME (RFC5424). The ' - 'format without the APP-NAME is deprecated in I, ' - 'and will be removed in J.'), - cfg.StrOpt('syslog-log-facility', - default='LOG_USER', - help='Syslog facility to receive log lines.') -] - -generic_log_opts = [ - cfg.BoolOpt('use_stderr', - default=True, - help='Log output to standard error.') -] - -DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', - 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', - 'oslo.messaging=INFO', 'iso8601=WARN', - 'requests.packages.urllib3.connectionpool=WARN', - 'urllib3.connectionpool=WARN', 'websocket=WARN', - "keystonemiddleware=WARN", "routes.middleware=WARN", - "stevedore=WARN"] - -log_opts = [ - cfg.StrOpt('logging_context_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [%(request_id)s %(user_identity)s] ' - '%(instance)s%(message)s', - help='Format string to use for log messages with context.'), - cfg.StrOpt('logging_default_format_string', - default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s ' - '%(name)s [-] %(instance)s%(message)s', - help='Format string to use for log messages without context.'), - cfg.StrOpt('logging_debug_format_suffix', - default='%(funcName)s %(pathname)s:%(lineno)d', - help='Data to append to log format when level is DEBUG.'), - cfg.StrOpt('logging_exception_prefix', - default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s ' - '%(instance)s', - help='Prefix each line of exception output with this format.'), - cfg.ListOpt('default_log_levels', - default=DEFAULT_LOG_LEVELS, - help='List of logger=LEVEL pairs.'), - cfg.BoolOpt('publish_errors', - default=False, - help='Enables or disables publication of error events.'), - cfg.BoolOpt('fatal_deprecations', - default=False, - help='Enables or disables fatal status of deprecations.'), - - # NOTE(mikal): there are two options here because sometimes we are handed - # a full instance (and could include more information), and other times we - # are just handed a UUID for the instance. - cfg.StrOpt('instance_format', - default='[instance: %(uuid)s] ', - help='The format for an instance that is passed with the log ' - 'message.'), - cfg.StrOpt('instance_uuid_format', - default='[instance: %(uuid)s] ', - help='The format for an instance UUID that is passed with the ' - 'log message.'), -] - -CONF = cfg.CONF -CONF.register_cli_opts(common_cli_opts) -CONF.register_cli_opts(logging_cli_opts) -CONF.register_opts(generic_log_opts) -CONF.register_opts(log_opts) - - -def list_opts(): - """Entry point for oslo.config-generator.""" - return [(None, copy.deepcopy(common_cli_opts)), - (None, copy.deepcopy(logging_cli_opts)), - (None, copy.deepcopy(generic_log_opts)), - (None, copy.deepcopy(log_opts)), - ] - - -# our new audit level -# NOTE(jkoelker) Since we synthesized an audit level, make the logging -# module aware of it so it acts like other levels. -logging.AUDIT = logging.INFO + 1 -logging.addLevelName(logging.AUDIT, 'AUDIT') - - -try: - NullHandler = logging.NullHandler -except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7 - class NullHandler(logging.Handler): - def handle(self, record): - pass - - def emit(self, record): - pass - - def createLock(self): - self.lock = None - - -def _dictify_context(context): - if context is None: - return None - if not isinstance(context, dict) and getattr(context, 'to_dict', None): - context = context.to_dict() - return context - - -def _get_binary_name(): - return os.path.basename(inspect.stack()[-1][1]) - - -def _get_log_file_path(binary=None): - logfile = CONF.log_file - logdir = CONF.log_dir - - if logfile and not logdir: - return logfile - - if logfile and logdir: - return os.path.join(logdir, logfile) - - if logdir: - binary = binary or _get_binary_name() - return '%s.log' % (os.path.join(logdir, binary),) - - return None - - -class BaseLoggerAdapter(logging.LoggerAdapter): - - def audit(self, msg, *args, **kwargs): - self.log(logging.AUDIT, msg, *args, **kwargs) - - def isEnabledFor(self, level): - if _PY26: - # This method was added in python 2.7 (and it does the exact - # same logic, so we need to do the exact same logic so that - # python 2.6 has this capability as well). - return self.logger.isEnabledFor(level) - else: - return super(BaseLoggerAdapter, self).isEnabledFor(level) - - -class LazyAdapter(BaseLoggerAdapter): - def __init__(self, name='unknown', version='unknown'): - self._logger = None - self.extra = {} - self.name = name - self.version = version - - @property - def logger(self): - if not self._logger: - self._logger = getLogger(self.name, self.version) - if six.PY3: - # In Python 3, the code fails because the 'manager' attribute - # cannot be found when using a LoggerAdapter as the - # underlying logger. Work around this issue. - self._logger.manager = self._logger.logger.manager - return self._logger - - -class ContextAdapter(BaseLoggerAdapter): - warn = logging.LoggerAdapter.warning - - def __init__(self, logger, project_name, version_string): - self.logger = logger - self.project = project_name - self.version = version_string - self._deprecated_messages_sent = dict() - - @property - def handlers(self): - return self.logger.handlers - - def deprecated(self, msg, *args, **kwargs): - """Call this method when a deprecated feature is used. - - If the system is configured for fatal deprecations then the message - is logged at the 'critical' level and :class:`DeprecatedConfig` will - be raised. - - Otherwise, the message will be logged (once) at the 'warn' level. - - :raises: :class:`DeprecatedConfig` if the system is configured for - fatal deprecations. - - """ - stdmsg = _("Deprecated: %s") % msg - if CONF.fatal_deprecations: - self.critical(stdmsg, *args, **kwargs) - raise DeprecatedConfig(msg=stdmsg) - - # Using a list because a tuple with dict can't be stored in a set. - sent_args = self._deprecated_messages_sent.setdefault(msg, list()) - - if args in sent_args: - # Already logged this message, so don't log it again. - return - - sent_args.append(args) - self.warn(stdmsg, *args, **kwargs) - - def process(self, msg, kwargs): - # NOTE(jecarey): If msg is not unicode, coerce it into unicode - # before it can get to the python logging and - # possibly cause string encoding trouble - if not isinstance(msg, six.text_type): - msg = six.text_type(msg) - - if 'extra' not in kwargs: - kwargs['extra'] = {} - extra = kwargs['extra'] - - context = kwargs.pop('context', None) - if not context: - context = getattr(local.store, 'context', None) - if context: - extra.update(_dictify_context(context)) - - instance = kwargs.pop('instance', None) - instance_uuid = (extra.get('instance_uuid') or - kwargs.pop('instance_uuid', None)) - instance_extra = '' - if instance: - instance_extra = CONF.instance_format % instance - elif instance_uuid: - instance_extra = (CONF.instance_uuid_format - % {'uuid': instance_uuid}) - extra['instance'] = instance_extra - - extra.setdefault('user_identity', kwargs.pop('user_identity', None)) - - extra['project'] = self.project - extra['version'] = self.version - extra['extra'] = extra.copy() - return msg, kwargs - - -class JSONFormatter(logging.Formatter): - def __init__(self, fmt=None, datefmt=None): - # NOTE(jkoelker) we ignore the fmt argument, but its still there - # since logging.config.fileConfig passes it. - self.datefmt = datefmt - - def formatException(self, ei, strip_newlines=True): - lines = traceback.format_exception(*ei) - if strip_newlines: - lines = [moves.filter( - lambda x: x, - line.rstrip().splitlines()) for line in lines] - lines = list(itertools.chain(*lines)) - return lines - - def format(self, record): - message = {'message': record.getMessage(), - 'asctime': self.formatTime(record, self.datefmt), - 'name': record.name, - 'msg': record.msg, - 'args': record.args, - 'levelname': record.levelname, - 'levelno': record.levelno, - 'pathname': record.pathname, - 'filename': record.filename, - 'module': record.module, - 'lineno': record.lineno, - 'funcname': record.funcName, - 'created': record.created, - 'msecs': record.msecs, - 'relative_created': record.relativeCreated, - 'thread': record.thread, - 'thread_name': record.threadName, - 'process_name': record.processName, - 'process': record.process, - 'traceback': None} - - if hasattr(record, 'extra'): - message['extra'] = record.extra - - if record.exc_info: - message['traceback'] = self.formatException(record.exc_info) - - return jsonutils.dumps(message) - - -def _create_logging_excepthook(product_name): - def logging_excepthook(exc_type, value, tb): - extra = {'exc_info': (exc_type, value, tb)} - getLogger(product_name).critical( - "".join(traceback.format_exception_only(exc_type, value)), - **extra) - return logging_excepthook - - -class LogConfigError(Exception): - - message = _('Error loading logging config %(log_config)s: %(err_msg)s') - - def __init__(self, log_config, err_msg): - self.log_config = log_config - self.err_msg = err_msg - - def __str__(self): - return self.message % dict(log_config=self.log_config, - err_msg=self.err_msg) - - -def _load_log_config(log_config_append): - try: - logging.config.fileConfig(log_config_append, - disable_existing_loggers=False) - except (moves.configparser.Error, KeyError) as exc: - raise LogConfigError(log_config_append, six.text_type(exc)) - - -def setup(product_name, version='unknown'): - """Setup logging.""" - if CONF.log_config_append: - _load_log_config(CONF.log_config_append) - else: - _setup_logging_from_conf(product_name, version) - sys.excepthook = _create_logging_excepthook(product_name) - - -def set_defaults(logging_context_format_string=None, - default_log_levels=None): - # Just in case the caller is not setting the - # default_log_level. This is insurance because - # we introduced the default_log_level parameter - # later in a backwards in-compatible change - if default_log_levels is not None: - cfg.set_defaults( - log_opts, - default_log_levels=default_log_levels) - if logging_context_format_string is not None: - cfg.set_defaults( - log_opts, - logging_context_format_string=logging_context_format_string) - - -def _find_facility_from_conf(): - facility_names = logging.handlers.SysLogHandler.facility_names - facility = getattr(logging.handlers.SysLogHandler, - CONF.syslog_log_facility, - None) - - if facility is None and CONF.syslog_log_facility in facility_names: - facility = facility_names.get(CONF.syslog_log_facility) - - if facility is None: - valid_facilities = facility_names.keys() - consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON', - 'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS', - 'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP', - 'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3', - 'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7'] - valid_facilities.extend(consts) - raise TypeError(_('syslog facility must be one of: %s') % - ', '.join("'%s'" % fac - for fac in valid_facilities)) - - return facility - - -class RFCSysLogHandler(logging.handlers.SysLogHandler): - def __init__(self, *args, **kwargs): - self.binary_name = _get_binary_name() - # Do not use super() unless type(logging.handlers.SysLogHandler) - # is 'type' (Python 2.7). - # Use old style calls, if the type is 'classobj' (Python 2.6) - logging.handlers.SysLogHandler.__init__(self, *args, **kwargs) - - def format(self, record): - # Do not use super() unless type(logging.handlers.SysLogHandler) - # is 'type' (Python 2.7). - # Use old style calls, if the type is 'classobj' (Python 2.6) - msg = logging.handlers.SysLogHandler.format(self, record) - msg = self.binary_name + ' ' + msg - return msg - - -def _setup_logging_from_conf(project, version): - log_root = getLogger(None).logger - for handler in log_root.handlers: - log_root.removeHandler(handler) - - logpath = _get_log_file_path() - if logpath: - filelog = logging.handlers.WatchedFileHandler(logpath) - log_root.addHandler(filelog) - - if CONF.use_stderr: - streamlog = ColorHandler() - log_root.addHandler(streamlog) - - elif not logpath: - # pass sys.stdout as a positional argument - # python2.6 calls the argument strm, in 2.7 it's stream - streamlog = logging.StreamHandler(sys.stdout) - log_root.addHandler(streamlog) - - if CONF.publish_errors: - handler = importutils.import_object( - "oslo.messaging.notify.log_handler.PublishErrorsHandler", - logging.ERROR) - log_root.addHandler(handler) - - datefmt = CONF.log_date_format - for handler in log_root.handlers: - # NOTE(alaski): CONF.log_format overrides everything currently. This - # should be deprecated in favor of context aware formatting. - if CONF.log_format: - handler.setFormatter(logging.Formatter(fmt=CONF.log_format, - datefmt=datefmt)) - log_root.info('Deprecated: log_format is now deprecated and will ' - 'be removed in the next release') - else: - handler.setFormatter(ContextFormatter(project=project, - version=version, - datefmt=datefmt)) - - if CONF.debug: - log_root.setLevel(logging.DEBUG) - elif CONF.verbose: - log_root.setLevel(logging.INFO) - else: - log_root.setLevel(logging.WARNING) - - for pair in CONF.default_log_levels: - mod, _sep, level_name = pair.partition('=') - logger = logging.getLogger(mod) - # NOTE(AAzza) in python2.6 Logger.setLevel doesn't convert string name - # to integer code. - if sys.version_info < (2, 7): - level = logging.getLevelName(level_name) - logger.setLevel(level) - else: - logger.setLevel(level_name) - - if CONF.use_syslog: - try: - facility = _find_facility_from_conf() - # TODO(bogdando) use the format provided by RFCSysLogHandler - # after existing syslog format deprecation in J - if CONF.use_syslog_rfc_format: - syslog = RFCSysLogHandler(address='/dev/log', - facility=facility) - else: - syslog = logging.handlers.SysLogHandler(address='/dev/log', - facility=facility) - log_root.addHandler(syslog) - except socket.error: - log_root.error('Unable to add syslog handler. Verify that syslog ' - 'is running.') - - -_loggers = {} - - -def getLogger(name='unknown', version='unknown'): - if name not in _loggers: - _loggers[name] = ContextAdapter(logging.getLogger(name), - name, - version) - return _loggers[name] - - -def getLazyLogger(name='unknown', version='unknown'): - """Returns lazy logger. - - Creates a pass-through logger that does not create the real logger - until it is really needed and delegates all calls to the real logger - once it is created. - """ - return LazyAdapter(name, version) - - -class WritableLogger(object): - """A thin wrapper that responds to `write` and logs.""" - - def __init__(self, logger, level=logging.INFO): - self.logger = logger - self.level = level - - def write(self, msg): - self.logger.log(self.level, msg.rstrip()) - - -class ContextFormatter(logging.Formatter): - """A context.RequestContext aware formatter configured through flags. - - The flags used to set format strings are: logging_context_format_string - and logging_default_format_string. You can also specify - logging_debug_format_suffix to append extra formatting if the log level is - debug. - - For information about what variables are available for the formatter see: - http://docs.python.org/library/logging.html#formatter - - If available, uses the context value stored in TLS - local.store.context - - """ - - def __init__(self, *args, **kwargs): - """Initialize ContextFormatter instance - - Takes additional keyword arguments which can be used in the message - format string. - - :keyword project: project name - :type project: string - :keyword version: project version - :type version: string - - """ - - self.project = kwargs.pop('project', 'unknown') - self.version = kwargs.pop('version', 'unknown') - - logging.Formatter.__init__(self, *args, **kwargs) - - def format(self, record): - """Uses contextstring if request_id is set, otherwise default.""" - - # NOTE(jecarey): If msg is not unicode, coerce it into unicode - # before it can get to the python logging and - # possibly cause string encoding trouble - if not isinstance(record.msg, six.text_type): - record.msg = six.text_type(record.msg) - - # store project info - record.project = self.project - record.version = self.version - - # store request info - context = getattr(local.store, 'context', None) - if context: - d = _dictify_context(context) - for k, v in d.items(): - setattr(record, k, v) - - # NOTE(sdague): default the fancier formatting params - # to an empty string so we don't throw an exception if - # they get used - for key in ('instance', 'color', 'user_identity'): - if key not in record.__dict__: - record.__dict__[key] = '' - - if record.__dict__.get('request_id'): - fmt = CONF.logging_context_format_string - else: - fmt = CONF.logging_default_format_string - - if (record.levelno == logging.DEBUG and - CONF.logging_debug_format_suffix): - fmt += " " + CONF.logging_debug_format_suffix - - if sys.version_info < (3, 2): - self._fmt = fmt - else: - self._style = logging.PercentStyle(fmt) - self._fmt = self._style._fmt - # Cache this on the record, Logger will respect our formatted copy - if record.exc_info: - record.exc_text = self.formatException(record.exc_info, record) - return logging.Formatter.format(self, record) - - def formatException(self, exc_info, record=None): - """Format exception output with CONF.logging_exception_prefix.""" - if not record: - return logging.Formatter.formatException(self, exc_info) - - stringbuffer = moves.StringIO() - traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], - None, stringbuffer) - lines = stringbuffer.getvalue().split('\n') - stringbuffer.close() - - if CONF.logging_exception_prefix.find('%(asctime)') != -1: - record.asctime = self.formatTime(record, self.datefmt) - - formatted_lines = [] - for line in lines: - pl = CONF.logging_exception_prefix % record.__dict__ - fl = '%s%s' % (pl, line) - formatted_lines.append(fl) - return '\n'.join(formatted_lines) - - -class ColorHandler(logging.StreamHandler): - LEVEL_COLORS = { - logging.DEBUG: '\033[00;32m', # GREEN - logging.INFO: '\033[00;36m', # CYAN - logging.AUDIT: '\033[01;36m', # BOLD CYAN - logging.WARN: '\033[01;33m', # BOLD YELLOW - logging.ERROR: '\033[01;31m', # BOLD RED - logging.CRITICAL: '\033[01;31m', # BOLD RED - } - - def format(self, record): - record.color = self.LEVEL_COLORS[record.levelno] - return logging.StreamHandler.format(self, record) - - -class DeprecatedConfig(Exception): - message = _("Fatal call to deprecated config: %(msg)s") - - def __init__(self, msg): - super(Exception, self).__init__(self.message % dict(msg=msg)) diff --git a/neutron_fwaas/openstack/common/loopingcall.py b/neutron_fwaas/openstack/common/loopingcall.py deleted file mode 100644 index 1257f1941..000000000 --- a/neutron_fwaas/openstack/common/loopingcall.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import sys -import time - -from eventlet import event -from eventlet import greenthread - -from neutron_fwaas.openstack.common._i18n import _LE, _LW -from neutron_fwaas.openstack.common import log as logging - -LOG = logging.getLogger(__name__) - -# NOTE(zyluo): This lambda function was declared to avoid mocking collisions -# with time.time() called in the standard logging module -# during unittests. -_ts = lambda: time.time() - - -class LoopingCallDone(Exception): - """Exception to break out and stop a LoopingCallBase. - - The poll-function passed to LoopingCallBase can raise this exception to - break out of the loop normally. This is somewhat analogous to - StopIteration. - - An optional return-value can be included as the argument to the exception; - this return-value will be returned by LoopingCallBase.wait() - - """ - - def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCallBase.wait() should return.""" - self.retvalue = retvalue - - -class LoopingCallBase(object): - def __init__(self, f=None, *args, **kw): - self.args = args - self.kw = kw - self.f = f - self._running = False - self.done = None - - def stop(self): - self._running = False - - def wait(self): - return self.done.wait() - - -class FixedIntervalLoopingCall(LoopingCallBase): - """A fixed interval looping call.""" - - def start(self, interval, initial_delay=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - start = _ts() - self.f(*self.args, **self.kw) - end = _ts() - if not self._running: - break - delay = end - start - interval - if delay > 0: - LOG.warn(_LW('task %(func_name)s run outlasted ' - 'interval by %(delay).2f sec'), - {'func_name': repr(self.f), 'delay': delay}) - greenthread.sleep(-delay if delay < 0 else 0) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_LE('in fixed duration looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn_n(_inner) - return self.done - - -class DynamicLoopingCall(LoopingCallBase): - """A looping call which sleeps until the next known event. - - The function called should return how long to sleep for before being - called again. - """ - - def start(self, initial_delay=None, periodic_interval_max=None): - self._running = True - done = event.Event() - - def _inner(): - if initial_delay: - greenthread.sleep(initial_delay) - - try: - while self._running: - idle = self.f(*self.args, **self.kw) - if not self._running: - break - - if periodic_interval_max is not None: - idle = min(idle, periodic_interval_max) - LOG.debug('Dynamic looping call %(func_name)s sleeping ' - 'for %(idle).02f seconds', - {'func_name': repr(self.f), 'idle': idle}) - greenthread.sleep(idle) - except LoopingCallDone as e: - self.stop() - done.send(e.retvalue) - except Exception: - LOG.exception(_LE('in dynamic looping call')) - done.send_exception(*sys.exc_info()) - return - else: - done.send(True) - - self.done = done - - greenthread.spawn(_inner) - return self.done diff --git a/neutron_fwaas/openstack/common/middleware/__init__.py b/neutron_fwaas/openstack/common/middleware/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/neutron_fwaas/openstack/common/middleware/catch_errors.py b/neutron_fwaas/openstack/common/middleware/catch_errors.py deleted file mode 100644 index d9981ce5b..000000000 --- a/neutron_fwaas/openstack/common/middleware/catch_errors.py +++ /dev/null @@ -1,23 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Compatibility shim for Kilo, while operators migrate to oslo.middleware.""" - -from oslo.middleware import catch_errors - -from neutron_fwaas.openstack.common import versionutils - - -@versionutils.deprecated(as_of=versionutils.deprecated.KILO, - in_favor_of='oslo.middleware.CatchErrors') -class CatchErrorsMiddleware(catch_errors.CatchErrors): - pass diff --git a/neutron_fwaas/openstack/common/middleware/request_id.py b/neutron_fwaas/openstack/common/middleware/request_id.py deleted file mode 100644 index 4576375bb..000000000 --- a/neutron_fwaas/openstack/common/middleware/request_id.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Compatibility shim for Kilo, while operators migrate to oslo.middleware.""" - -from oslo.middleware import request_id - -from neutron_fwaas.openstack.common import versionutils - - -ENV_REQUEST_ID = 'openstack.request_id' -HTTP_RESP_HEADER_REQUEST_ID = 'x-openstack-request-id' - - -@versionutils.deprecated(as_of=versionutils.deprecated.KILO, - in_favor_of='oslo.middleware.RequestId') -class RequestIdMiddleware(request_id.RequestId): - pass diff --git a/neutron_fwaas/openstack/common/periodic_task.py b/neutron_fwaas/openstack/common/periodic_task.py deleted file mode 100644 index 6816eccea..000000000 --- a/neutron_fwaas/openstack/common/periodic_task.py +++ /dev/null @@ -1,212 +0,0 @@ -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy -import random -import time - -from oslo.config import cfg -import six - -from neutron_fwaas.openstack.common._i18n import _, _LE, _LI -from neutron_fwaas.openstack.common import log as logging - - -periodic_opts = [ - cfg.BoolOpt('run_external_periodic_tasks', - default=True, - help='Some periodic tasks can be run in a separate process. ' - 'Should we run them here?'), -] - -CONF = cfg.CONF -CONF.register_opts(periodic_opts) - -LOG = logging.getLogger(__name__) - -DEFAULT_INTERVAL = 60.0 - - -def list_opts(): - """Entry point for oslo.config-generator.""" - return [(None, copy.deepcopy(periodic_opts))] - - -class InvalidPeriodicTaskArg(Exception): - message = _("Unexpected argument for periodic task creation: %(arg)s.") - - -def periodic_task(*args, **kwargs): - """Decorator to indicate that a method is a periodic task. - - This decorator can be used in two ways: - - 1. Without arguments '@periodic_task', this will be run on the default - interval of 60 seconds. - - 2. With arguments: - @periodic_task(spacing=N [, run_immediately=[True|False]]) - this will be run on approximately every N seconds. If this number is - negative the periodic task will be disabled. If the run_immediately - argument is provided and has a value of 'True', the first run of the - task will be shortly after task scheduler starts. If - run_immediately is omitted or set to 'False', the first time the - task runs will be approximately N seconds after the task scheduler - starts. - """ - def decorator(f): - # Test for old style invocation - if 'ticks_between_runs' in kwargs: - raise InvalidPeriodicTaskArg(arg='ticks_between_runs') - - # Control if run at all - f._periodic_task = True - f._periodic_external_ok = kwargs.pop('external_process_ok', False) - if f._periodic_external_ok and not CONF.run_external_periodic_tasks: - f._periodic_enabled = False - else: - f._periodic_enabled = kwargs.pop('enabled', True) - - # Control frequency - f._periodic_spacing = kwargs.pop('spacing', 0) - f._periodic_immediate = kwargs.pop('run_immediately', False) - if f._periodic_immediate: - f._periodic_last_run = None - else: - f._periodic_last_run = time.time() - return f - - # NOTE(sirp): The `if` is necessary to allow the decorator to be used with - # and without parenthesis. - # - # In the 'with-parenthesis' case (with kwargs present), this function needs - # to return a decorator function since the interpreter will invoke it like: - # - # periodic_task(*args, **kwargs)(f) - # - # In the 'without-parenthesis' case, the original function will be passed - # in as the first argument, like: - # - # periodic_task(f) - if kwargs: - return decorator - else: - return decorator(args[0]) - - -class _PeriodicTasksMeta(type): - def __init__(cls, names, bases, dict_): - """Metaclass that allows us to collect decorated periodic tasks.""" - super(_PeriodicTasksMeta, cls).__init__(names, bases, dict_) - - # NOTE(sirp): if the attribute is not present then we must be the base - # class, so, go ahead an initialize it. If the attribute is present, - # then we're a subclass so make a copy of it so we don't step on our - # parent's toes. - try: - cls._periodic_tasks = cls._periodic_tasks[:] - except AttributeError: - cls._periodic_tasks = [] - - try: - cls._periodic_spacing = cls._periodic_spacing.copy() - except AttributeError: - cls._periodic_spacing = {} - - for value in cls.__dict__.values(): - if getattr(value, '_periodic_task', False): - task = value - name = task.__name__ - - if task._periodic_spacing < 0: - LOG.info(_LI('Skipping periodic task %(task)s because ' - 'its interval is negative'), - {'task': name}) - continue - if not task._periodic_enabled: - LOG.info(_LI('Skipping periodic task %(task)s because ' - 'it is disabled'), - {'task': name}) - continue - - # A periodic spacing of zero indicates that this task should - # be run on the default interval to avoid running too - # frequently. - if task._periodic_spacing == 0: - task._periodic_spacing = DEFAULT_INTERVAL - - cls._periodic_tasks.append((name, task)) - cls._periodic_spacing[name] = task._periodic_spacing - - -def _nearest_boundary(last_run, spacing): - """Find nearest boundary which is in the past, which is a multiple of the - spacing with the last run as an offset. - - Eg if last run was 10 and spacing was 7, the new last run could be: 17, 24, - 31, 38... - - 0% to 5% of the spacing value will be added to this value to ensure tasks - do not synchronize. This jitter is rounded to the nearest second, this - means that spacings smaller than 20 seconds will not have jitter. - """ - current_time = time.time() - if last_run is None: - return current_time - delta = current_time - last_run - offset = delta % spacing - # Add up to 5% jitter - jitter = int(spacing * (random.random() / 20)) - return current_time - offset + jitter - - -@six.add_metaclass(_PeriodicTasksMeta) -class PeriodicTasks(object): - def __init__(self): - super(PeriodicTasks, self).__init__() - self._periodic_last_run = {} - for name, task in self._periodic_tasks: - self._periodic_last_run[name] = task._periodic_last_run - - def run_periodic_tasks(self, context, raise_on_error=False): - """Tasks to be run at a periodic interval.""" - idle_for = DEFAULT_INTERVAL - for task_name, task in self._periodic_tasks: - full_task_name = '.'.join([self.__class__.__name__, task_name]) - - spacing = self._periodic_spacing[task_name] - last_run = self._periodic_last_run[task_name] - - # Check if due, if not skip - idle_for = min(idle_for, spacing) - if last_run is not None: - delta = last_run + spacing - time.time() - if delta > 0: - idle_for = min(idle_for, delta) - continue - - LOG.debug("Running periodic task %(full_task_name)s", - {"full_task_name": full_task_name}) - self._periodic_last_run[task_name] = _nearest_boundary( - last_run, spacing) - - try: - task(self, context) - except Exception as e: - if raise_on_error: - raise - LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), - {"full_task_name": full_task_name, "e": e}) - time.sleep(0) - - return idle_for diff --git a/neutron_fwaas/openstack/common/policy.py b/neutron_fwaas/openstack/common/policy.py deleted file mode 100644 index 22417bab4..000000000 --- a/neutron_fwaas/openstack/common/policy.py +++ /dev/null @@ -1,962 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (c) 2012 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Common Policy Engine Implementation - -Policies can be expressed in one of two forms: A list of lists, or a -string written in the new policy language. - -In the list-of-lists representation, each check inside the innermost -list is combined as with an "and" conjunction--for that check to pass, -all the specified checks must pass. These innermost lists are then -combined as with an "or" conjunction. As an example, take the following -rule, expressed in the list-of-lists representation:: - - [["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]] - -This is the original way of expressing policies, but there now exists a -new way: the policy language. - -In the policy language, each check is specified the same way as in the -list-of-lists representation: a simple "a:b" pair that is matched to -the correct class to perform that check:: - - +===========================================================================+ - | TYPE | SYNTAX | - +===========================================================================+ - |User's Role | role:admin | - +---------------------------------------------------------------------------+ - |Rules already defined on policy | rule:admin_required | - +---------------------------------------------------------------------------+ - |Against URL's¹ | http://my-url.org/check | - +---------------------------------------------------------------------------+ - |User attributes² | project_id:%(target.project.id)s | - +---------------------------------------------------------------------------+ - |Strings | :'xpto2035abc' | - | | 'myproject': | - +---------------------------------------------------------------------------+ - | | project_id:xpto2035abc | - |Literals | domain_id:20 | - | | True:%(user.enabled)s | - +===========================================================================+ - -¹URL checking must return 'True' to be valid -²User attributes (obtained through the token): user_id, domain_id or project_id - -Conjunction operators are available, allowing for more expressiveness -in crafting policies. So, in the policy language, the previous check in -list-of-lists becomes:: - - role:admin or (project_id:%(project_id)s and role:projectadmin) - -The policy language also has the "not" operator, allowing a richer -policy rule:: - - project_id:%(project_id)s and not role:dunce - -Attributes sent along with API calls can be used by the policy engine -(on the right side of the expression), by using the following syntax:: - - :%(user.id)s - -Contextual attributes of objects identified by their IDs are loaded -from the database. They are also available to the policy engine and -can be checked through the `target` keyword:: - - :%(target.role.name)s - -Finally, two special policy checks should be mentioned; the policy -check "@" will always accept an access, and the policy check "!" will -always reject an access. (Note that if a rule is either the empty -list ("[]") or the empty string, this is equivalent to the "@" policy -check.) Of these, the "!" policy check is probably the most useful, -as it allows particular rules to be explicitly disabled. -""" - -import abc -import ast -import copy -import os -import re - -from oslo.config import cfg -from oslo.serialization import jsonutils -import six -import six.moves.urllib.parse as urlparse -import six.moves.urllib.request as urlrequest - -from neutron_fwaas.openstack.common import fileutils -from neutron_fwaas.openstack.common._i18n import _, _LE, _LI -from neutron_fwaas.openstack.common import log as logging - - -policy_opts = [ - cfg.StrOpt('policy_file', - default='policy.json', - help=_('The JSON file that defines policies.')), - cfg.StrOpt('policy_default_rule', - default='default', - help=_('Default rule. Enforced when a requested rule is not ' - 'found.')), - cfg.MultiStrOpt('policy_dirs', - default=['policy.d'], - help=_('Directories where policy configuration files are ' - 'stored. They can be relative to any directory ' - 'in the search path defined by the config_dir ' - 'option, or absolute paths. The file defined by ' - 'policy_file must exist for these directories to ' - 'be searched.')), -] - -CONF = cfg.CONF -CONF.register_opts(policy_opts) - -LOG = logging.getLogger(__name__) - -_checks = {} - - -def list_opts(): - """Entry point for oslo.config-generator.""" - return [(None, copy.deepcopy(policy_opts))] - - -class PolicyNotAuthorized(Exception): - - def __init__(self, rule): - msg = _("Policy doesn't allow %s to be performed.") % rule - super(PolicyNotAuthorized, self).__init__(msg) - - -class Rules(dict): - """A store for rules. Handles the default_rule setting directly.""" - - @classmethod - def load_json(cls, data, default_rule=None): - """Allow loading of JSON rule data.""" - - # Suck in the JSON data and parse the rules - rules = dict((k, parse_rule(v)) for k, v in - jsonutils.loads(data).items()) - - return cls(rules, default_rule) - - def __init__(self, rules=None, default_rule=None): - """Initialize the Rules store.""" - - super(Rules, self).__init__(rules or {}) - self.default_rule = default_rule - - def __missing__(self, key): - """Implements the default rule handling.""" - - if isinstance(self.default_rule, dict): - raise KeyError(key) - - # If the default rule isn't actually defined, do something - # reasonably intelligent - if not self.default_rule: - raise KeyError(key) - - if isinstance(self.default_rule, BaseCheck): - return self.default_rule - - # We need to check this or we can get infinite recursion - if self.default_rule not in self: - raise KeyError(key) - - elif isinstance(self.default_rule, six.string_types): - return self[self.default_rule] - - def __str__(self): - """Dumps a string representation of the rules.""" - - # Start by building the canonical strings for the rules - out_rules = {} - for key, value in self.items(): - # Use empty string for singleton TrueCheck instances - if isinstance(value, TrueCheck): - out_rules[key] = '' - else: - out_rules[key] = str(value) - - # Dump a pretty-printed JSON representation - return jsonutils.dumps(out_rules, indent=4) - - -class Enforcer(object): - """Responsible for loading and enforcing rules. - - :param policy_file: Custom policy file to use, if none is - specified, `CONF.policy_file` will be - used. - :param rules: Default dictionary / Rules to use. It will be - considered just in the first instantiation. If - `load_rules(True)`, `clear()` or `set_rules(True)` - is called this will be overwritten. - :param default_rule: Default rule to use, CONF.default_rule will - be used if none is specified. - :param use_conf: Whether to load rules from cache or config file. - :param overwrite: Whether to overwrite existing rules when reload rules - from config file. - """ - - def __init__(self, policy_file=None, rules=None, - default_rule=None, use_conf=True, overwrite=True): - self.default_rule = default_rule or CONF.policy_default_rule - self.rules = Rules(rules, self.default_rule) - - self.policy_path = None - self.policy_file = policy_file or CONF.policy_file - self.use_conf = use_conf - self.overwrite = overwrite - - def set_rules(self, rules, overwrite=True, use_conf=False): - """Create a new Rules object based on the provided dict of rules. - - :param rules: New rules to use. It should be an instance of dict. - :param overwrite: Whether to overwrite current rules or update them - with the new rules. - :param use_conf: Whether to reload rules from cache or config file. - """ - - if not isinstance(rules, dict): - raise TypeError(_("Rules must be an instance of dict or Rules, " - "got %s instead") % type(rules)) - self.use_conf = use_conf - if overwrite: - self.rules = Rules(rules, self.default_rule) - else: - self.rules.update(rules) - - def clear(self): - """Clears Enforcer rules, policy's cache and policy's path.""" - self.set_rules({}) - fileutils.delete_cached_file(self.policy_path) - self.default_rule = None - self.policy_path = None - - def load_rules(self, force_reload=False): - """Loads policy_path's rules. - - Policy file is cached and will be reloaded if modified. - - :param force_reload: Whether to reload rules from config file. - """ - - if force_reload: - self.use_conf = force_reload - - if self.use_conf: - if not self.policy_path: - self.policy_path = self._get_policy_path(self.policy_file) - - self._load_policy_file(self.policy_path, force_reload, - overwrite=self.overwrite) - for path in CONF.policy_dirs: - try: - path = self._get_policy_path(path) - except cfg.ConfigFilesNotFoundError: - LOG.info(_LI("Can not find policy directory: %s"), path) - continue - self._walk_through_policy_directory(path, - self._load_policy_file, - force_reload, False) - - @staticmethod - def _walk_through_policy_directory(path, func, *args): - # We do not iterate over sub-directories. - policy_files = next(os.walk(path))[2] - policy_files.sort() - for policy_file in [p for p in policy_files if not p.startswith('.')]: - func(os.path.join(path, policy_file), *args) - - def _load_policy_file(self, path, force_reload, overwrite=True): - reloaded, data = fileutils.read_cached_file( - path, force_reload=force_reload) - if reloaded or not self.rules or not overwrite: - rules = Rules.load_json(data, self.default_rule) - self.set_rules(rules, overwrite=overwrite, use_conf=True) - LOG.debug("Rules successfully reloaded") - - def _get_policy_path(self, path): - """Locate the policy json data file/path. - - :param path: It's value can be a full path or related path. When - full path specified, this function just returns the full - path. When related path specified, this function will - search configuration directories to find one that exists. - - :returns: The policy path - - :raises: ConfigFilesNotFoundError if the file/path couldn't - be located. - """ - policy_path = CONF.find_file(path) - - if policy_path: - return policy_path - - raise cfg.ConfigFilesNotFoundError((path,)) - - def enforce(self, rule, target, creds, do_raise=False, - exc=None, *args, **kwargs): - """Checks authorization of a rule against the target and credentials. - - :param rule: A string or BaseCheck instance specifying the rule - to evaluate. - :param target: As much information about the object being operated - on as possible, as a dictionary. - :param creds: As much information about the user performing the - action as possible, as a dictionary. - :param do_raise: Whether to raise an exception or not if check - fails. - :param exc: Class of the exception to raise if the check fails. - Any remaining arguments passed to enforce() (both - positional and keyword arguments) will be passed to - the exception class. If not specified, PolicyNotAuthorized - will be used. - - :return: Returns False if the policy does not allow the action and - exc is not provided; otherwise, returns a value that - evaluates to True. Note: for rules using the "case" - expression, this True value will be the specified string - from the expression. - """ - - self.load_rules() - - # Allow the rule to be a Check tree - if isinstance(rule, BaseCheck): - result = rule(target, creds, self) - elif not self.rules: - # No rules to reference means we're going to fail closed - result = False - else: - try: - # Evaluate the rule - result = self.rules[rule](target, creds, self) - except KeyError: - LOG.debug("Rule [%s] doesn't exist" % rule) - # If the rule doesn't exist, fail closed - result = False - - # If it is False, raise the exception if requested - if do_raise and not result: - if exc: - raise exc(*args, **kwargs) - - raise PolicyNotAuthorized(rule) - - return result - - -@six.add_metaclass(abc.ABCMeta) -class BaseCheck(object): - """Abstract base class for Check classes.""" - - @abc.abstractmethod - def __str__(self): - """String representation of the Check tree rooted at this node.""" - - pass - - @abc.abstractmethod - def __call__(self, target, cred, enforcer): - """Triggers if instance of the class is called. - - Performs the check. Returns False to reject the access or a - true value (not necessary True) to accept the access. - """ - - pass - - -class FalseCheck(BaseCheck): - """A policy check that always returns False (disallow).""" - - def __str__(self): - """Return a string representation of this check.""" - - return "!" - - def __call__(self, target, cred, enforcer): - """Check the policy.""" - - return False - - -class TrueCheck(BaseCheck): - """A policy check that always returns True (allow).""" - - def __str__(self): - """Return a string representation of this check.""" - - return "@" - - def __call__(self, target, cred, enforcer): - """Check the policy.""" - - return True - - -class Check(BaseCheck): - """A base class to allow for user-defined policy checks.""" - - def __init__(self, kind, match): - """Initiates Check instance. - - :param kind: The kind of the check, i.e., the field before the - ':'. - :param match: The match of the check, i.e., the field after - the ':'. - """ - - self.kind = kind - self.match = match - - def __str__(self): - """Return a string representation of this check.""" - - return "%s:%s" % (self.kind, self.match) - - -class NotCheck(BaseCheck): - """Implements the "not" logical operator. - - A policy check that inverts the result of another policy check. - """ - - def __init__(self, rule): - """Initialize the 'not' check. - - :param rule: The rule to negate. Must be a Check. - """ - - self.rule = rule - - def __str__(self): - """Return a string representation of this check.""" - - return "not %s" % self.rule - - def __call__(self, target, cred, enforcer): - """Check the policy. - - Returns the logical inverse of the wrapped check. - """ - - return not self.rule(target, cred, enforcer) - - -class AndCheck(BaseCheck): - """Implements the "and" logical operator. - - A policy check that requires that a list of other checks all return True. - """ - - def __init__(self, rules): - """Initialize the 'and' check. - - :param rules: A list of rules that will be tested. - """ - - self.rules = rules - - def __str__(self): - """Return a string representation of this check.""" - - return "(%s)" % ' and '.join(str(r) for r in self.rules) - - def __call__(self, target, cred, enforcer): - """Check the policy. - - Requires that all rules accept in order to return True. - """ - - for rule in self.rules: - if not rule(target, cred, enforcer): - return False - - return True - - def add_check(self, rule): - """Adds rule to be tested. - - Allows addition of another rule to the list of rules that will - be tested. Returns the AndCheck object for convenience. - """ - - self.rules.append(rule) - return self - - -class OrCheck(BaseCheck): - """Implements the "or" operator. - - A policy check that requires that at least one of a list of other - checks returns True. - """ - - def __init__(self, rules): - """Initialize the 'or' check. - - :param rules: A list of rules that will be tested. - """ - - self.rules = rules - - def __str__(self): - """Return a string representation of this check.""" - - return "(%s)" % ' or '.join(str(r) for r in self.rules) - - def __call__(self, target, cred, enforcer): - """Check the policy. - - Requires that at least one rule accept in order to return True. - """ - - for rule in self.rules: - if rule(target, cred, enforcer): - return True - return False - - def add_check(self, rule): - """Adds rule to be tested. - - Allows addition of another rule to the list of rules that will - be tested. Returns the OrCheck object for convenience. - """ - - self.rules.append(rule) - return self - - -def _parse_check(rule): - """Parse a single base check rule into an appropriate Check object.""" - - # Handle the special checks - if rule == '!': - return FalseCheck() - elif rule == '@': - return TrueCheck() - - try: - kind, match = rule.split(':', 1) - except Exception: - LOG.exception(_LE("Failed to understand rule %s") % rule) - # If the rule is invalid, we'll fail closed - return FalseCheck() - - # Find what implements the check - if kind in _checks: - return _checks[kind](kind, match) - elif None in _checks: - return _checks[None](kind, match) - else: - LOG.error(_LE("No handler for matches of kind %s") % kind) - return FalseCheck() - - -def _parse_list_rule(rule): - """Translates the old list-of-lists syntax into a tree of Check objects. - - Provided for backwards compatibility. - """ - - # Empty rule defaults to True - if not rule: - return TrueCheck() - - # Outer list is joined by "or"; inner list by "and" - or_list = [] - for inner_rule in rule: - # Elide empty inner lists - if not inner_rule: - continue - - # Handle bare strings - if isinstance(inner_rule, six.string_types): - inner_rule = [inner_rule] - - # Parse the inner rules into Check objects - and_list = [_parse_check(r) for r in inner_rule] - - # Append the appropriate check to the or_list - if len(and_list) == 1: - or_list.append(and_list[0]) - else: - or_list.append(AndCheck(and_list)) - - # If we have only one check, omit the "or" - if not or_list: - return FalseCheck() - elif len(or_list) == 1: - return or_list[0] - - return OrCheck(or_list) - - -# Used for tokenizing the policy language -_tokenize_re = re.compile(r'\s+') - - -def _parse_tokenize(rule): - """Tokenizer for the policy language. - - Most of the single-character tokens are specified in the - _tokenize_re; however, parentheses need to be handled specially, - because they can appear inside a check string. Thankfully, those - parentheses that appear inside a check string can never occur at - the very beginning or end ("%(variable)s" is the correct syntax). - """ - - for tok in _tokenize_re.split(rule): - # Skip empty tokens - if not tok or tok.isspace(): - continue - - # Handle leading parens on the token - clean = tok.lstrip('(') - for i in range(len(tok) - len(clean)): - yield '(', '(' - - # If it was only parentheses, continue - if not clean: - continue - else: - tok = clean - - # Handle trailing parens on the token - clean = tok.rstrip(')') - trail = len(tok) - len(clean) - - # Yield the cleaned token - lowered = clean.lower() - if lowered in ('and', 'or', 'not'): - # Special tokens - yield lowered, clean - elif clean: - # Not a special token, but not composed solely of ')' - if len(tok) >= 2 and ((tok[0], tok[-1]) in - [('"', '"'), ("'", "'")]): - # It's a quoted string - yield 'string', tok[1:-1] - else: - yield 'check', _parse_check(clean) - - # Yield the trailing parens - for i in range(trail): - yield ')', ')' - - -class ParseStateMeta(type): - """Metaclass for the ParseState class. - - Facilitates identifying reduction methods. - """ - - def __new__(mcs, name, bases, cls_dict): - """Create the class. - - Injects the 'reducers' list, a list of tuples matching token sequences - to the names of the corresponding reduction methods. - """ - - reducers = [] - - for key, value in cls_dict.items(): - if not hasattr(value, 'reducers'): - continue - for reduction in value.reducers: - reducers.append((reduction, key)) - - cls_dict['reducers'] = reducers - - return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict) - - -def reducer(*tokens): - """Decorator for reduction methods. - - Arguments are a sequence of tokens, in order, which should trigger running - this reduction method. - """ - - def decorator(func): - # Make sure we have a list of reducer sequences - if not hasattr(func, 'reducers'): - func.reducers = [] - - # Add the tokens to the list of reducer sequences - func.reducers.append(list(tokens)) - - return func - - return decorator - - -@six.add_metaclass(ParseStateMeta) -class ParseState(object): - """Implement the core of parsing the policy language. - - Uses a greedy reduction algorithm to reduce a sequence of tokens into - a single terminal, the value of which will be the root of the Check tree. - - Note: error reporting is rather lacking. The best we can get with - this parser formulation is an overall "parse failed" error. - Fortunately, the policy language is simple enough that this - shouldn't be that big a problem. - """ - - def __init__(self): - """Initialize the ParseState.""" - - self.tokens = [] - self.values = [] - - def reduce(self): - """Perform a greedy reduction of the token stream. - - If a reducer method matches, it will be executed, then the - reduce() method will be called recursively to search for any more - possible reductions. - """ - - for reduction, methname in self.reducers: - if (len(self.tokens) >= len(reduction) and - self.tokens[-len(reduction):] == reduction): - # Get the reduction method - meth = getattr(self, methname) - - # Reduce the token stream - results = meth(*self.values[-len(reduction):]) - - # Update the tokens and values - self.tokens[-len(reduction):] = [r[0] for r in results] - self.values[-len(reduction):] = [r[1] for r in results] - - # Check for any more reductions - return self.reduce() - - def shift(self, tok, value): - """Adds one more token to the state. Calls reduce().""" - - self.tokens.append(tok) - self.values.append(value) - - # Do a greedy reduce... - self.reduce() - - @property - def result(self): - """Obtain the final result of the parse. - - Raises ValueError if the parse failed to reduce to a single result. - """ - - if len(self.values) != 1: - raise ValueError("Could not parse rule") - return self.values[0] - - @reducer('(', 'check', ')') - @reducer('(', 'and_expr', ')') - @reducer('(', 'or_expr', ')') - def _wrap_check(self, _p1, check, _p2): - """Turn parenthesized expressions into a 'check' token.""" - - return [('check', check)] - - @reducer('check', 'and', 'check') - def _make_and_expr(self, check1, _and, check2): - """Create an 'and_expr'. - - Join two checks by the 'and' operator. - """ - - return [('and_expr', AndCheck([check1, check2]))] - - @reducer('and_expr', 'and', 'check') - def _extend_and_expr(self, and_expr, _and, check): - """Extend an 'and_expr' by adding one more check.""" - - return [('and_expr', and_expr.add_check(check))] - - @reducer('check', 'or', 'check') - def _make_or_expr(self, check1, _or, check2): - """Create an 'or_expr'. - - Join two checks by the 'or' operator. - """ - - return [('or_expr', OrCheck([check1, check2]))] - - @reducer('or_expr', 'or', 'check') - def _extend_or_expr(self, or_expr, _or, check): - """Extend an 'or_expr' by adding one more check.""" - - return [('or_expr', or_expr.add_check(check))] - - @reducer('not', 'check') - def _make_not_expr(self, _not, check): - """Invert the result of another check.""" - - return [('check', NotCheck(check))] - - -def _parse_text_rule(rule): - """Parses policy to the tree. - - Translates a policy written in the policy language into a tree of - Check objects. - """ - - # Empty rule means always accept - if not rule: - return TrueCheck() - - # Parse the token stream - state = ParseState() - for tok, value in _parse_tokenize(rule): - state.shift(tok, value) - - try: - return state.result - except ValueError: - # Couldn't parse the rule - LOG.exception(_LE("Failed to understand rule %s") % rule) - - # Fail closed - return FalseCheck() - - -def parse_rule(rule): - """Parses a policy rule into a tree of Check objects.""" - - # If the rule is a string, it's in the policy language - if isinstance(rule, six.string_types): - return _parse_text_rule(rule) - return _parse_list_rule(rule) - - -def register(name, func=None): - """Register a function or Check class as a policy check. - - :param name: Gives the name of the check type, e.g., 'rule', - 'role', etc. If name is None, a default check type - will be registered. - :param func: If given, provides the function or class to register. - If not given, returns a function taking one argument - to specify the function or class to register, - allowing use as a decorator. - """ - - # Perform the actual decoration by registering the function or - # class. Returns the function or class for compliance with the - # decorator interface. - def decorator(func): - _checks[name] = func - return func - - # If the function or class is given, do the registration - if func: - return decorator(func) - - return decorator - - -@register("rule") -class RuleCheck(Check): - def __call__(self, target, creds, enforcer): - """Recursively checks credentials based on the defined rules.""" - - try: - return enforcer.rules[self.match](target, creds, enforcer) - except KeyError: - # We don't have any matching rule; fail closed - return False - - -@register("role") -class RoleCheck(Check): - def __call__(self, target, creds, enforcer): - """Check that there is a matching role in the cred dict.""" - - return self.match.lower() in [x.lower() for x in creds['roles']] - - -@register('http') -class HttpCheck(Check): - def __call__(self, target, creds, enforcer): - """Check http: rules by calling to a remote server. - - This example implementation simply verifies that the response - is exactly 'True'. - """ - - url = ('http:' + self.match) % target - - # Convert instances of object() in target temporarily to - # empty dict to avoid circular reference detection - # errors in jsonutils.dumps(). - temp_target = copy.deepcopy(target) - for key in target.keys(): - element = target.get(key) - if type(element) is object: - temp_target[key] = {} - - data = {'target': jsonutils.dumps(temp_target), - 'credentials': jsonutils.dumps(creds)} - post_data = urlparse.urlencode(data) - f = urlrequest.urlopen(url, post_data) - return f.read() == "True" - - -@register(None) -class GenericCheck(Check): - def __call__(self, target, creds, enforcer): - """Check an individual match. - - Matches look like: - - tenant:%(tenant_id)s - role:compute:admin - True:%(user.enabled)s - 'Member':%(role.name)s - """ - - try: - match = self.match % target - except KeyError: - # While doing GenericCheck if key not - # present in Target return false - return False - - try: - # Try to interpret self.kind as a literal - leftval = ast.literal_eval(self.kind) - except ValueError: - try: - kind_parts = self.kind.split('.') - leftval = creds - for kind_part in kind_parts: - leftval = leftval[kind_part] - except KeyError: - return False - return match == six.text_type(leftval) diff --git a/neutron_fwaas/openstack/common/processutils.py b/neutron_fwaas/openstack/common/processutils.py deleted file mode 100644 index 65c196ee0..000000000 --- a/neutron_fwaas/openstack/common/processutils.py +++ /dev/null @@ -1,289 +0,0 @@ -# Copyright 2011 OpenStack Foundation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -System-level utilities and helper functions. -""" - -import errno -import logging -import multiprocessing -import os -import random -import shlex -import signal - -from eventlet.green import subprocess -from eventlet import greenthread -from oslo.utils import strutils -import six - -from neutron_fwaas.openstack.common._i18n import _ - - -LOG = logging.getLogger(__name__) - - -class InvalidArgumentError(Exception): - def __init__(self, message=None): - super(InvalidArgumentError, self).__init__(message) - - -class UnknownArgumentError(Exception): - def __init__(self, message=None): - super(UnknownArgumentError, self).__init__(message) - - -class ProcessExecutionError(Exception): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, - description=None): - self.exit_code = exit_code - self.stderr = stderr - self.stdout = stdout - self.cmd = cmd - self.description = description - - if description is None: - description = _("Unexpected error while running command.") - if exit_code is None: - exit_code = '-' - message = _('%(description)s\n' - 'Command: %(cmd)s\n' - 'Exit code: %(exit_code)s\n' - 'Stdout: %(stdout)r\n' - 'Stderr: %(stderr)r') % {'description': description, - 'cmd': cmd, - 'exit_code': exit_code, - 'stdout': stdout, - 'stderr': stderr} - super(ProcessExecutionError, self).__init__(message) - - -class NoRootWrapSpecified(Exception): - def __init__(self, message=None): - super(NoRootWrapSpecified, self).__init__(message) - - -def _subprocess_setup(): - # Python installs a SIGPIPE handler by default. This is usually not what - # non-Python subprocesses expect. - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - -def execute(*cmd, **kwargs): - """Helper method to shell out and execute a command through subprocess. - - Allows optional retry. - - :param cmd: Passed to subprocess.Popen. - :type cmd: string - :param process_input: Send to opened process. - :type process_input: string - :param env_variables: Environment variables and their values that - will be set for the process. - :type env_variables: dict - :param check_exit_code: Single bool, int, or list of allowed exit - codes. Defaults to [0]. Raise - :class:`ProcessExecutionError` unless - program exits with one of these code. - :type check_exit_code: boolean, int, or [int] - :param delay_on_retry: True | False. Defaults to True. If set to True, - wait a short amount of time before retrying. - :type delay_on_retry: boolean - :param attempts: How many times to retry cmd. - :type attempts: int - :param run_as_root: True | False. Defaults to False. If set to True, - the command is prefixed by the command specified - in the root_helper kwarg. - :type run_as_root: boolean - :param root_helper: command to prefix to commands called with - run_as_root=True - :type root_helper: string - :param shell: whether or not there should be a shell used to - execute this command. Defaults to false. - :type shell: boolean - :param loglevel: log level for execute commands. - :type loglevel: int. (Should be logging.DEBUG or logging.INFO) - :returns: (stdout, stderr) from process execution - :raises: :class:`UnknownArgumentError` on - receiving unknown arguments - :raises: :class:`ProcessExecutionError` - """ - - process_input = kwargs.pop('process_input', None) - env_variables = kwargs.pop('env_variables', None) - check_exit_code = kwargs.pop('check_exit_code', [0]) - ignore_exit_code = False - delay_on_retry = kwargs.pop('delay_on_retry', True) - attempts = kwargs.pop('attempts', 1) - run_as_root = kwargs.pop('run_as_root', False) - root_helper = kwargs.pop('root_helper', '') - shell = kwargs.pop('shell', False) - loglevel = kwargs.pop('loglevel', logging.DEBUG) - - if isinstance(check_exit_code, bool): - ignore_exit_code = not check_exit_code - check_exit_code = [0] - elif isinstance(check_exit_code, int): - check_exit_code = [check_exit_code] - - if kwargs: - raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs) - - if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0: - if not root_helper: - raise NoRootWrapSpecified( - message=_('Command requested root, but did not ' - 'specify a root helper.')) - cmd = shlex.split(root_helper) + list(cmd) - - cmd = map(str, cmd) - sanitized_cmd = strutils.mask_password(' '.join(cmd)) - - while attempts > 0: - attempts -= 1 - try: - LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd) - _PIPE = subprocess.PIPE # pylint: disable=E1101 - - if os.name == 'nt': - preexec_fn = None - close_fds = False - else: - preexec_fn = _subprocess_setup - close_fds = True - - obj = subprocess.Popen(cmd, - stdin=_PIPE, - stdout=_PIPE, - stderr=_PIPE, - close_fds=close_fds, - preexec_fn=preexec_fn, - shell=shell, - env=env_variables) - result = None - for _i in six.moves.range(20): - # NOTE(russellb) 20 is an arbitrary number of retries to - # prevent any chance of looping forever here. - try: - if process_input is not None: - result = obj.communicate(process_input) - else: - result = obj.communicate() - except OSError as e: - if e.errno in (errno.EAGAIN, errno.EINTR): - continue - raise - break - obj.stdin.close() # pylint: disable=E1101 - _returncode = obj.returncode # pylint: disable=E1101 - LOG.log(loglevel, 'Result was %s' % _returncode) - if not ignore_exit_code and _returncode not in check_exit_code: - (stdout, stderr) = result - sanitized_stdout = strutils.mask_password(stdout) - sanitized_stderr = strutils.mask_password(stderr) - raise ProcessExecutionError(exit_code=_returncode, - stdout=sanitized_stdout, - stderr=sanitized_stderr, - cmd=sanitized_cmd) - return result - except ProcessExecutionError: - if not attempts: - raise - else: - LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd) - if delay_on_retry: - greenthread.sleep(random.randint(20, 200) / 100.0) - finally: - # NOTE(termie): this appears to be necessary to let the subprocess - # call clean something up in between calls, without - # it two execute calls in a row hangs the second one - greenthread.sleep(0) - - -def trycmd(*args, **kwargs): - """A wrapper around execute() to more easily handle warnings and errors. - - Returns an (out, err) tuple of strings containing the output of - the command's stdout and stderr. If 'err' is not empty then the - command can be considered to have failed. - - :discard_warnings True | False. Defaults to False. If set to True, - then for succeeding commands, stderr is cleared - - """ - discard_warnings = kwargs.pop('discard_warnings', False) - - try: - out, err = execute(*args, **kwargs) - failed = False - except ProcessExecutionError as exn: - out, err = '', six.text_type(exn) - failed = True - - if not failed and discard_warnings and err: - # Handle commands that output to stderr but otherwise succeed - err = '' - - return out, err - - -def ssh_execute(ssh, cmd, process_input=None, - addl_env=None, check_exit_code=True): - sanitized_cmd = strutils.mask_password(cmd) - LOG.debug('Running cmd (SSH): %s', sanitized_cmd) - if addl_env: - raise InvalidArgumentError(_('Environment not supported over SSH')) - - if process_input: - # This is (probably) fixable if we need it... - raise InvalidArgumentError(_('process_input not supported over SSH')) - - stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) - channel = stdout_stream.channel - - # NOTE(justinsb): This seems suspicious... - # ...other SSH clients have buffering issues with this approach - stdout = stdout_stream.read() - sanitized_stdout = strutils.mask_password(stdout) - stderr = stderr_stream.read() - sanitized_stderr = strutils.mask_password(stderr) - - stdin_stream.close() - - exit_status = channel.recv_exit_status() - - # exit_status == -1 if no exit code was returned - if exit_status != -1: - LOG.debug('Result was %s' % exit_status) - if check_exit_code and exit_status != 0: - raise ProcessExecutionError(exit_code=exit_status, - stdout=sanitized_stdout, - stderr=sanitized_stderr, - cmd=sanitized_cmd) - - return (sanitized_stdout, sanitized_stderr) - - -def get_worker_count(): - """Utility to get the default worker count. - - @return: The number of CPUs if that can be determined, else a default - worker count of 1 is returned. - """ - try: - return multiprocessing.cpu_count() - except NotImplementedError: - return 1 diff --git a/neutron_fwaas/openstack/common/service.py b/neutron_fwaas/openstack/common/service.py deleted file mode 100644 index 1f67bf081..000000000 --- a/neutron_fwaas/openstack/common/service.py +++ /dev/null @@ -1,504 +0,0 @@ -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# Copyright 2011 Justin Santa Barbara -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Generic Node base class for all workers that run on hosts.""" - -import errno -import logging as std_logging -import os -import random -import signal -import sys -import time - -try: - # Importing just the symbol here because the io module does not - # exist in Python 2.6. - from io import UnsupportedOperation # noqa -except ImportError: - # Python 2.6 - UnsupportedOperation = None - -import eventlet -from eventlet import event -from oslo.config import cfg - -from neutron_fwaas.openstack.common import eventlet_backdoor -from neutron_fwaas.openstack.common._i18n import _LE, _LI, _LW -from neutron_fwaas.openstack.common import log as logging -from neutron_fwaas.openstack.common import systemd -from neutron_fwaas.openstack.common import threadgroup - - -CONF = cfg.CONF -LOG = logging.getLogger(__name__) - - -def _sighup_supported(): - return hasattr(signal, 'SIGHUP') - - -def _is_daemon(): - # The process group for a foreground process will match the - # process group of the controlling terminal. If those values do - # not match, or ioctl() fails on the stdout file handle, we assume - # the process is running in the background as a daemon. - # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics - try: - is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) - except OSError as err: - if err.errno == errno.ENOTTY: - # Assume we are a daemon because there is no terminal. - is_daemon = True - else: - raise - except UnsupportedOperation: - # Could not get the fileno for stdout, so we must be a daemon. - is_daemon = True - return is_daemon - - -def _is_sighup_and_daemon(signo): - if not (_sighup_supported() and signo == signal.SIGHUP): - # Avoid checking if we are a daemon, because the signal isn't - # SIGHUP. - return False - return _is_daemon() - - -def _signo_to_signame(signo): - signals = {signal.SIGTERM: 'SIGTERM', - signal.SIGINT: 'SIGINT'} - if _sighup_supported(): - signals[signal.SIGHUP] = 'SIGHUP' - return signals[signo] - - -def _set_signals_handler(handler): - signal.signal(signal.SIGTERM, handler) - signal.signal(signal.SIGINT, handler) - if _sighup_supported(): - signal.signal(signal.SIGHUP, handler) - - -class Launcher(object): - """Launch one or more services and wait for them to complete.""" - - def __init__(self): - """Initialize the service launcher. - - :returns: None - - """ - self.services = Services() - self.backdoor_port = eventlet_backdoor.initialize_if_enabled() - - def launch_service(self, service): - """Load and start the given service. - - :param service: The service you would like to start. - :returns: None - - """ - service.backdoor_port = self.backdoor_port - self.services.add(service) - - def stop(self): - """Stop all services which are currently running. - - :returns: None - - """ - self.services.stop() - - def wait(self): - """Waits until all services have been stopped, and then returns. - - :returns: None - - """ - self.services.wait() - - def restart(self): - """Reload config files and restart service. - - :returns: None - - """ - cfg.CONF.reload_config_files() - self.services.restart() - - -class SignalExit(SystemExit): - def __init__(self, signo, exccode=1): - super(SignalExit, self).__init__(exccode) - self.signo = signo - - -class ServiceLauncher(Launcher): - def _handle_signal(self, signo, frame): - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - raise SignalExit(signo) - - def handle_signal(self): - _set_signals_handler(self._handle_signal) - - def _wait_for_exit_or_signal(self, ready_callback=None): - status = None - signo = 0 - - LOG.debug('Full set of CONF:') - CONF.log_opt_values(LOG, std_logging.DEBUG) - - try: - if ready_callback: - ready_callback() - super(ServiceLauncher, self).wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_LI('Caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - finally: - self.stop() - - return status, signo - - def wait(self, ready_callback=None): - systemd.notify_once() - while True: - self.handle_signal() - status, signo = self._wait_for_exit_or_signal(ready_callback) - if not _is_sighup_and_daemon(signo): - return status - self.restart() - - -class ServiceWrapper(object): - def __init__(self, service, workers): - self.service = service - self.workers = workers - self.children = set() - self.forktimes = [] - - -class ProcessLauncher(object): - def __init__(self, wait_interval=0.01): - """Constructor. - - :param wait_interval: The interval to sleep for between checks - of child process exit. - """ - self.children = {} - self.sigcaught = None - self.running = True - self.wait_interval = wait_interval - rfd, self.writepipe = os.pipe() - self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') - self.handle_signal() - - def handle_signal(self): - _set_signals_handler(self._handle_signal) - - def _handle_signal(self, signo, frame): - self.sigcaught = signo - self.running = False - - # Allow the process to be killed again and die from natural causes - _set_signals_handler(signal.SIG_DFL) - - def _pipe_watcher(self): - # This will block until the write end is closed when the parent - # dies unexpectedly - self.readpipe.read() - - LOG.info(_LI('Parent process has died unexpectedly, exiting')) - - sys.exit(1) - - def _child_process_handle_signal(self): - # Setup child signal handlers differently - def _sigterm(*args): - signal.signal(signal.SIGTERM, signal.SIG_DFL) - raise SignalExit(signal.SIGTERM) - - def _sighup(*args): - signal.signal(signal.SIGHUP, signal.SIG_DFL) - raise SignalExit(signal.SIGHUP) - - signal.signal(signal.SIGTERM, _sigterm) - if _sighup_supported(): - signal.signal(signal.SIGHUP, _sighup) - # Block SIGINT and let the parent send us a SIGTERM - signal.signal(signal.SIGINT, signal.SIG_IGN) - - def _child_wait_for_exit_or_signal(self, launcher): - status = 0 - signo = 0 - - # NOTE(johannes): All exceptions are caught to ensure this - # doesn't fallback into the loop spawning children. It would - # be bad for a child to spawn more children. - try: - launcher.wait() - except SignalExit as exc: - signame = _signo_to_signame(exc.signo) - LOG.info(_LI('Child caught %s, exiting'), signame) - status = exc.code - signo = exc.signo - except SystemExit as exc: - status = exc.code - except BaseException: - LOG.exception(_LE('Unhandled exception')) - status = 2 - finally: - launcher.stop() - - return status, signo - - def _child_process(self, service): - self._child_process_handle_signal() - - # Reopen the eventlet hub to make sure we don't share an epoll - # fd with parent and/or siblings, which would be bad - eventlet.hubs.use_hub() - - # Close write to ensure only parent has it open - os.close(self.writepipe) - # Create greenthread to watch for parent to close pipe - eventlet.spawn_n(self._pipe_watcher) - - # Reseed random number generator - random.seed() - - launcher = Launcher() - launcher.launch_service(service) - return launcher - - def _start_child(self, wrap): - if len(wrap.forktimes) > wrap.workers: - # Limit ourselves to one process a second (over the period of - # number of workers * 1 second). This will allow workers to - # start up quickly but ensure we don't fork off children that - # die instantly too quickly. - if time.time() - wrap.forktimes[0] < wrap.workers: - LOG.info(_LI('Forking too fast, sleeping')) - time.sleep(1) - - wrap.forktimes.pop(0) - - wrap.forktimes.append(time.time()) - - pid = os.fork() - if pid == 0: - launcher = self._child_process(wrap.service) - while True: - self._child_process_handle_signal() - status, signo = self._child_wait_for_exit_or_signal(launcher) - if not _is_sighup_and_daemon(signo): - break - launcher.restart() - - os._exit(status) - - LOG.info(_LI('Started child %d'), pid) - - wrap.children.add(pid) - self.children[pid] = wrap - - return pid - - def launch_service(self, service, workers=1): - wrap = ServiceWrapper(service, workers) - - LOG.info(_LI('Starting %d workers'), wrap.workers) - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def _wait_child(self): - try: - # Don't block if no child processes have exited - pid, status = os.waitpid(0, os.WNOHANG) - if not pid: - return None - except OSError as exc: - if exc.errno not in (errno.EINTR, errno.ECHILD): - raise - return None - - if os.WIFSIGNALED(status): - sig = os.WTERMSIG(status) - LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), - dict(pid=pid, sig=sig)) - else: - code = os.WEXITSTATUS(status) - LOG.info(_LI('Child %(pid)s exited with status %(code)d'), - dict(pid=pid, code=code)) - - if pid not in self.children: - LOG.warning(_LW('pid %d not in child list'), pid) - return None - - wrap = self.children.pop(pid) - wrap.children.remove(pid) - return wrap - - def _respawn_children(self): - while self.running: - wrap = self._wait_child() - if not wrap: - # Yield to other threads if no children have exited - # Sleep for a short time to avoid excessive CPU usage - # (see bug #1095346) - eventlet.greenthread.sleep(self.wait_interval) - continue - while self.running and len(wrap.children) < wrap.workers: - self._start_child(wrap) - - def wait(self): - """Loop waiting on children to die and respawning as necessary.""" - - systemd.notify_once() - LOG.debug('Full set of CONF:') - CONF.log_opt_values(LOG, std_logging.DEBUG) - - try: - while True: - self.handle_signal() - self._respawn_children() - # No signal means that stop was called. Don't clean up here. - if not self.sigcaught: - return - - signame = _signo_to_signame(self.sigcaught) - LOG.info(_LI('Caught %s, stopping children'), signame) - if not _is_sighup_and_daemon(self.sigcaught): - break - - for pid in self.children: - os.kill(pid, signal.SIGHUP) - self.running = True - self.sigcaught = None - except eventlet.greenlet.GreenletExit: - LOG.info(_LI("Wait called after thread killed. Cleaning up.")) - - self.stop() - - def stop(self): - """Terminate child processes and wait on each.""" - self.running = False - for pid in self.children: - try: - os.kill(pid, signal.SIGTERM) - except OSError as exc: - if exc.errno != errno.ESRCH: - raise - - # Wait for children to die - if self.children: - LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) - while self.children: - self._wait_child() - - -class Service(object): - """Service object for binaries running on hosts.""" - - def __init__(self, threads=1000): - self.tg = threadgroup.ThreadGroup(threads) - - # signal that the service is done shutting itself down: - self._done = event.Event() - - def reset(self): - # NOTE(Fengqian): docs for Event.reset() recommend against using it - self._done = event.Event() - - def start(self): - pass - - def stop(self): - self.tg.stop() - self.tg.wait() - # Signal that service cleanup is done: - if not self._done.ready(): - self._done.send() - - def wait(self): - self._done.wait() - - -class Services(object): - - def __init__(self): - self.services = [] - self.tg = threadgroup.ThreadGroup() - self.done = event.Event() - - def add(self, service): - self.services.append(service) - self.tg.add_thread(self.run_service, service, self.done) - - def stop(self): - # wait for graceful shutdown of services: - for service in self.services: - service.stop() - service.wait() - - # Each service has performed cleanup, now signal that the run_service - # wrapper threads can now die: - if not self.done.ready(): - self.done.send() - - # reap threads: - self.tg.stop() - - def wait(self): - self.tg.wait() - - def restart(self): - self.stop() - self.done = event.Event() - for restart_service in self.services: - restart_service.reset() - self.tg.add_thread(self.run_service, restart_service, self.done) - - @staticmethod - def run_service(service, done): - """Service start wrapper. - - :param service: service to run - :param done: event to wait on until a shutdown is triggered - :returns: None - - """ - service.start() - done.wait() - - -def launch(service, workers=1): - if workers is None or workers == 1: - launcher = ServiceLauncher() - launcher.launch_service(service) - else: - launcher = ProcessLauncher() - launcher.launch_service(service, workers=workers) - - return launcher diff --git a/neutron_fwaas/openstack/common/systemd.py b/neutron_fwaas/openstack/common/systemd.py deleted file mode 100644 index c6c0fc357..000000000 --- a/neutron_fwaas/openstack/common/systemd.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2012-2014 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helper module for systemd service readiness notification. -""" - -import os -import socket -import sys - -from neutron_fwaas.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -def _abstractify(socket_name): - if socket_name.startswith('@'): - # abstract namespace socket - socket_name = '\0%s' % socket_name[1:] - return socket_name - - -def _sd_notify(unset_env, msg): - notify_socket = os.getenv('NOTIFY_SOCKET') - if notify_socket: - sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - try: - sock.connect(_abstractify(notify_socket)) - sock.sendall(msg) - if unset_env: - del os.environ['NOTIFY_SOCKET'] - except EnvironmentError: - LOG.debug("Systemd notification failed", exc_info=True) - finally: - sock.close() - - -def notify(): - """Send notification to Systemd that service is ready. - - For details see - http://www.freedesktop.org/software/systemd/man/sd_notify.html - """ - _sd_notify(False, 'READY=1') - - -def notify_once(): - """Send notification once to Systemd that service is ready. - - Systemd sets NOTIFY_SOCKET environment variable with the name of the - socket listening for notifications from services. - This method removes the NOTIFY_SOCKET environment variable to ensure - notification is sent only once. - """ - _sd_notify(True, 'READY=1') - - -def onready(notify_socket, timeout): - """Wait for systemd style notification on the socket. - - :param notify_socket: local socket address - :type notify_socket: string - :param timeout: socket timeout - :type timeout: float - :returns: 0 service ready - 1 service not ready - 2 timeout occurred - """ - sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - sock.settimeout(timeout) - sock.bind(_abstractify(notify_socket)) - try: - msg = sock.recv(512) - except socket.timeout: - return 2 - finally: - sock.close() - if 'READY=1' in msg: - return 0 - else: - return 1 - - -if __name__ == '__main__': - # simple CLI for testing - if len(sys.argv) == 1: - notify() - elif len(sys.argv) >= 2: - timeout = float(sys.argv[1]) - notify_socket = os.getenv('NOTIFY_SOCKET') - if notify_socket: - retval = onready(notify_socket, timeout) - sys.exit(retval) diff --git a/neutron_fwaas/openstack/common/threadgroup.py b/neutron_fwaas/openstack/common/threadgroup.py deleted file mode 100644 index 53e301d89..000000000 --- a/neutron_fwaas/openstack/common/threadgroup.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2012 Red Hat, Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -import threading - -import eventlet -from eventlet import greenpool - -from neutron_fwaas.openstack.common import log as logging -from neutron_fwaas.openstack.common import loopingcall - - -LOG = logging.getLogger(__name__) - - -def _thread_done(gt, *args, **kwargs): - """Callback function to be passed to GreenThread.link() when we spawn() - Calls the :class:`ThreadGroup` to notify if. - - """ - kwargs['group'].thread_done(kwargs['thread']) - - -class Thread(object): - """Wrapper around a greenthread, that holds a reference to the - :class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when - it has done so it can be removed from the threads list. - """ - def __init__(self, thread, group): - self.thread = thread - self.thread.link(_thread_done, group=group, thread=self) - - def stop(self): - self.thread.kill() - - def wait(self): - return self.thread.wait() - - def link(self, func, *args, **kwargs): - self.thread.link(func, *args, **kwargs) - - -class ThreadGroup(object): - """The point of the ThreadGroup class is to: - - * keep track of timers and greenthreads (making it easier to stop them - when need be). - * provide an easy API to add timers. - """ - def __init__(self, thread_pool_size=10): - self.pool = greenpool.GreenPool(thread_pool_size) - self.threads = [] - self.timers = [] - - def add_dynamic_timer(self, callback, initial_delay=None, - periodic_interval_max=None, *args, **kwargs): - timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs) - timer.start(initial_delay=initial_delay, - periodic_interval_max=periodic_interval_max) - self.timers.append(timer) - - def add_timer(self, interval, callback, initial_delay=None, - *args, **kwargs): - pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs) - pulse.start(interval=interval, - initial_delay=initial_delay) - self.timers.append(pulse) - - def add_thread(self, callback, *args, **kwargs): - gt = self.pool.spawn(callback, *args, **kwargs) - th = Thread(gt, self) - self.threads.append(th) - return th - - def thread_done(self, thread): - self.threads.remove(thread) - - def _stop_threads(self): - current = threading.current_thread() - - # Iterate over a copy of self.threads so thread_done doesn't - # modify the list while we're iterating - for x in self.threads[:]: - if x is current: - # don't kill the current thread. - continue - try: - x.stop() - except eventlet.greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) - - def stop_timers(self): - for x in self.timers: - try: - x.stop() - except Exception as ex: - LOG.exception(ex) - self.timers = [] - - def stop(self, graceful=False): - """stop function has the option of graceful=True/False. - - * In case of graceful=True, wait for all threads to be finished. - Never kill threads. - * In case of graceful=False, kill threads immediately. - """ - self.stop_timers() - if graceful: - # In case of graceful=True, wait for all threads to be - # finished, never kill threads - self.wait() - else: - # In case of graceful=False(Default), kill threads - # immediately - self._stop_threads() - - def wait(self): - for x in self.timers: - try: - x.wait() - except eventlet.greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) - current = threading.current_thread() - - # Iterate over a copy of self.threads so thread_done doesn't - # modify the list while we're iterating - for x in self.threads[:]: - if x is current: - continue - try: - x.wait() - except eventlet.greenlet.GreenletExit: - pass - except Exception as ex: - LOG.exception(ex) diff --git a/neutron_fwaas/openstack/common/uuidutils.py b/neutron_fwaas/openstack/common/uuidutils.py deleted file mode 100644 index 234b880c9..000000000 --- a/neutron_fwaas/openstack/common/uuidutils.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2012 Intel Corporation. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -UUID related utilities and helper functions. -""" - -import uuid - - -def generate_uuid(): - return str(uuid.uuid4()) - - -def is_uuid_like(val): - """Returns validation of a value as a UUID. - - For our purposes, a UUID is a canonical form string: - aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa - - """ - try: - return str(uuid.UUID(val)) == val - except (TypeError, ValueError, AttributeError): - return False diff --git a/neutron_fwaas/openstack/common/versionutils.py b/neutron_fwaas/openstack/common/versionutils.py deleted file mode 100644 index 0e216bc6d..000000000 --- a/neutron_fwaas/openstack/common/versionutils.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright (c) 2013 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Helpers for comparing version strings. -""" - -import functools -import inspect - -import pkg_resources -import six - -from neutron_fwaas.openstack.common._i18n import _ -from neutron_fwaas.openstack.common import log as logging - - -LOG = logging.getLogger(__name__) - - -class deprecated(object): - """A decorator to mark callables as deprecated. - - This decorator logs a deprecation message when the callable it decorates is - used. The message will include the release where the callable was - deprecated, the release where it may be removed and possibly an optional - replacement. - - Examples: - - 1. Specifying the required deprecated release - - >>> @deprecated(as_of=deprecated.ICEHOUSE) - ... def a(): pass - - 2. Specifying a replacement: - - >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()') - ... def b(): pass - - 3. Specifying the release where the functionality may be removed: - - >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1) - ... def c(): pass - - 4. Specifying the deprecated functionality will not be removed: - >>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=0) - ... def d(): pass - - 5. Specifying a replacement, deprecated functionality will not be removed: - >>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=0) - ... def e(): pass - - """ - - # NOTE(morganfainberg): Bexar is used for unit test purposes, it is - # expected we maintain a gap between Bexar and Folsom in this list. - BEXAR = 'B' - FOLSOM = 'F' - GRIZZLY = 'G' - HAVANA = 'H' - ICEHOUSE = 'I' - JUNO = 'J' - KILO = 'K' - - _RELEASES = { - # NOTE(morganfainberg): Bexar is used for unit test purposes, it is - # expected we maintain a gap between Bexar and Folsom in this list. - 'B': 'Bexar', - 'F': 'Folsom', - 'G': 'Grizzly', - 'H': 'Havana', - 'I': 'Icehouse', - 'J': 'Juno', - 'K': 'Kilo', - } - - _deprecated_msg_with_alternative = _( - '%(what)s is deprecated as of %(as_of)s in favor of ' - '%(in_favor_of)s and may be removed in %(remove_in)s.') - - _deprecated_msg_no_alternative = _( - '%(what)s is deprecated as of %(as_of)s and may be ' - 'removed in %(remove_in)s. It will not be superseded.') - - _deprecated_msg_with_alternative_no_removal = _( - '%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.') - - _deprecated_msg_with_no_alternative_no_removal = _( - '%(what)s is deprecated as of %(as_of)s. It will not be superseded.') - - def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None): - """Initialize decorator - - :param as_of: the release deprecating the callable. Constants - are define in this class for convenience. - :param in_favor_of: the replacement for the callable (optional) - :param remove_in: an integer specifying how many releases to wait - before removing (default: 2) - :param what: name of the thing being deprecated (default: the - callable's name) - - """ - self.as_of = as_of - self.in_favor_of = in_favor_of - self.remove_in = remove_in - self.what = what - - def __call__(self, func_or_cls): - if not self.what: - self.what = func_or_cls.__name__ + '()' - msg, details = self._build_message() - - if inspect.isfunction(func_or_cls): - - @six.wraps(func_or_cls) - def wrapped(*args, **kwargs): - LOG.deprecated(msg, details) - return func_or_cls(*args, **kwargs) - return wrapped - elif inspect.isclass(func_or_cls): - orig_init = func_or_cls.__init__ - - # TODO(tsufiev): change `functools` module to `six` as - # soon as six 1.7.4 (with fix for passing `assigned` - # argument to underlying `functools.wraps`) is released - # and added to the neutron_fwaas-incubator requrements - @functools.wraps(orig_init, assigned=('__name__', '__doc__')) - def new_init(self, *args, **kwargs): - LOG.deprecated(msg, details) - orig_init(self, *args, **kwargs) - func_or_cls.__init__ = new_init - return func_or_cls - else: - raise TypeError('deprecated can be used only with functions or ' - 'classes') - - def _get_safe_to_remove_release(self, release): - # TODO(dstanek): this method will have to be reimplemented once - # when we get to the X release because once we get to the Y - # release, what is Y+2? - new_release = chr(ord(release) + self.remove_in) - if new_release in self._RELEASES: - return self._RELEASES[new_release] - else: - return new_release - - def _build_message(self): - details = dict(what=self.what, - as_of=self._RELEASES[self.as_of], - remove_in=self._get_safe_to_remove_release(self.as_of)) - - if self.in_favor_of: - details['in_favor_of'] = self.in_favor_of - if self.remove_in > 0: - msg = self._deprecated_msg_with_alternative - else: - # There are no plans to remove this function, but it is - # now deprecated. - msg = self._deprecated_msg_with_alternative_no_removal - else: - if self.remove_in > 0: - msg = self._deprecated_msg_no_alternative - else: - # There are no plans to remove this function, but it is - # now deprecated. - msg = self._deprecated_msg_with_no_alternative_no_removal - return msg, details - - -def is_compatible(requested_version, current_version, same_major=True): - """Determine whether `requested_version` is satisfied by - `current_version`; in other words, `current_version` is >= - `requested_version`. - - :param requested_version: version to check for compatibility - :param current_version: version to check against - :param same_major: if True, the major version must be identical between - `requested_version` and `current_version`. This is used when a - major-version difference indicates incompatibility between the two - versions. Since this is the common-case in practice, the default is - True. - :returns: True if compatible, False if not - """ - requested_parts = pkg_resources.parse_version(requested_version) - current_parts = pkg_resources.parse_version(current_version) - - if same_major and (requested_parts[0] != current_parts[0]): - return False - - return current_parts >= requested_parts diff --git a/openstack-common.conf b/openstack-common.conf deleted file mode 100644 index f8ef52308..000000000 --- a/openstack-common.conf +++ /dev/null @@ -1,23 +0,0 @@ -[DEFAULT] -# The list of modules to copy from oslo-incubator.git -module=cache -module=context -module=eventlet_backdoor -module=fileutils -module=fixture -module=install_venv_common -module=local -module=lockutils -module=log -module=loopingcall -module=middleware -module=periodic_task -module=policy -module=processutils -module=service -module=systemd -module=threadgroup -module=uuidutils - -# The base module to hold the copy of openstack.common -base=neutron_fwaas